[llvm] 5150d65 - [SLP][X86] Add common CHECK prefix to sub-128-bit vector tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri May 13 06:07:51 PDT 2022


Author: Simon Pilgrim
Date: 2022-05-13T14:07:40+01:00
New Revision: 5150d653aa04cd5a3b7d31f6989b1bed0670924f

URL: https://github.com/llvm/llvm-project/commit/5150d653aa04cd5a3b7d31f6989b1bed0670924f
DIFF: https://github.com/llvm/llvm-project/commit/5150d653aa04cd5a3b7d31f6989b1bed0670924f.diff

LOG: [SLP][X86] Add common CHECK prefix to sub-128-bit vector tests

Added: 
    

Modified: 
    llvm/test/Transforms/SLPVectorizer/X86/arith-add-load.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-and-const-load.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-mul-load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-load.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-load.ll
index e62d6da8e34dc..1b24ce42c2541 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-add-load.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-add-load.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64    -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v3 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v4 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64    -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v3 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v4 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
 
 ; // PR47491
 ; void pr(char* r, char* a){
@@ -12,57 +12,31 @@
 ; }
 
 define void @add4(ptr noalias nocapture noundef %r, ptr noalias nocapture noundef readonly %a) {
-; SSE-LABEL: @add4(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = load i8, ptr [[R:%.*]], align 1
-; SSE-NEXT:    [[ADD:%.*]] = add i8 [[TMP1]], [[TMP0]]
-; SSE-NEXT:    store i8 [[ADD]], ptr [[R]], align 1
-; SSE-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
-; SSE-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
-; SSE-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 1
-; SSE-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2_1]], align 1
-; SSE-NEXT:    [[ADD_1:%.*]] = add i8 [[TMP3]], [[TMP2]]
-; SSE-NEXT:    store i8 [[ADD_1]], ptr [[ARRAYIDX2_1]], align 1
-; SSE-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
-; SSE-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
-; SSE-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 2
-; SSE-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX2_2]], align 1
-; SSE-NEXT:    [[ADD_2:%.*]] = add i8 [[TMP5]], [[TMP4]]
-; SSE-NEXT:    store i8 [[ADD_2]], ptr [[ARRAYIDX2_2]], align 1
-; SSE-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3
-; SSE-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
-; SSE-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 3
-; SSE-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX2_3]], align 1
-; SSE-NEXT:    [[ADD_3:%.*]] = add i8 [[TMP7]], [[TMP6]]
-; SSE-NEXT:    store i8 [[ADD_3]], ptr [[ARRAYIDX2_3]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @add4(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = load i8, ptr [[R:%.*]], align 1
-; AVX-NEXT:    [[ADD:%.*]] = add i8 [[TMP1]], [[TMP0]]
-; AVX-NEXT:    store i8 [[ADD]], ptr [[R]], align 1
-; AVX-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
-; AVX-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
-; AVX-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 1
-; AVX-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2_1]], align 1
-; AVX-NEXT:    [[ADD_1:%.*]] = add i8 [[TMP3]], [[TMP2]]
-; AVX-NEXT:    store i8 [[ADD_1]], ptr [[ARRAYIDX2_1]], align 1
-; AVX-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
-; AVX-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
-; AVX-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 2
-; AVX-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX2_2]], align 1
-; AVX-NEXT:    [[ADD_2:%.*]] = add i8 [[TMP5]], [[TMP4]]
-; AVX-NEXT:    store i8 [[ADD_2]], ptr [[ARRAYIDX2_2]], align 1
-; AVX-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3
-; AVX-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
-; AVX-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 3
-; AVX-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX2_3]], align 1
-; AVX-NEXT:    [[ADD_3:%.*]] = add i8 [[TMP7]], [[TMP6]]
-; AVX-NEXT:    store i8 [[ADD_3]], ptr [[ARRAYIDX2_3]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @add4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[R:%.*]], align 1
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store i8 [[ADD]], ptr [[R]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2_1]], align 1
+; CHECK-NEXT:    [[ADD_1:%.*]] = add i8 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store i8 [[ADD_1]], ptr [[ARRAYIDX2_1]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX2_2]], align 1
+; CHECK-NEXT:    [[ADD_2:%.*]] = add i8 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    store i8 [[ADD_2]], ptr [[ARRAYIDX2_2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3
+; CHECK-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX2_3]], align 1
+; CHECK-NEXT:    [[ADD_3:%.*]] = add i8 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    store i8 [[ADD_3]], ptr [[ARRAYIDX2_3]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %a, align 1
@@ -91,21 +65,13 @@ entry:
 }
 
 define void @add8(ptr noalias nocapture noundef %r, ptr noalias nocapture noundef readonly %a) {
-; SSE-LABEL: @add8(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[R:%.*]], align 1
-; SSE-NEXT:    [[TMP2:%.*]] = add <8 x i8> [[TMP1]], [[TMP0]]
-; SSE-NEXT:    store <8 x i8> [[TMP2]], ptr [[R]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @add8(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[R:%.*]], align 1
-; AVX-NEXT:    [[TMP2:%.*]] = add <8 x i8> [[TMP1]], [[TMP0]]
-; AVX-NEXT:    store <8 x i8> [[TMP2]], ptr [[R]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @add8(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[R:%.*]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = add <8 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store <8 x i8> [[TMP2]], ptr [[R]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %a, align 1
@@ -158,21 +124,13 @@ entry:
 }
 
 define void @add16(ptr noalias nocapture noundef %r, ptr noalias nocapture noundef readonly %a) {
-; SSE-LABEL: @add16(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[R:%.*]], align 1
-; SSE-NEXT:    [[TMP2:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]]
-; SSE-NEXT:    store <16 x i8> [[TMP2]], ptr [[R]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @add16(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[R:%.*]], align 1
-; AVX-NEXT:    [[TMP2:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]]
-; AVX-NEXT:    store <16 x i8> [[TMP2]], ptr [[R]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @add16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[R:%.*]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store <16 x i8> [[TMP2]], ptr [[R]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %a, align 1

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-and-const-load.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-and-const-load.ll
index 73fde350248bc..dd59a7428f26b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-and-const-load.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-and-const-load.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64    -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v3 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v4 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64    -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v3 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v4 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
 
 ; // PR49934
 ; void baz(unsigned char *dst, unsigned char *src) {
@@ -12,49 +12,27 @@
 ; }
 
 define void @and4(ptr noalias nocapture noundef writeonly %dst, ptr noalias nocapture noundef readonly %src) {
-; SSE-LABEL: @and4(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load i8, ptr [[SRC:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = and i8 [[TMP0]], -64
-; SSE-NEXT:    store i8 [[TMP1]], ptr [[DST:%.*]], align 1
-; SSE-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
-; SSE-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
-; SSE-NEXT:    [[TMP3:%.*]] = and i8 [[TMP2]], -64
-; SSE-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 1
-; SSE-NEXT:    store i8 [[TMP3]], ptr [[ARRAYIDX3_1]], align 1
-; SSE-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
-; SSE-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
-; SSE-NEXT:    [[TMP5:%.*]] = and i8 [[TMP4]], -64
-; SSE-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
-; SSE-NEXT:    store i8 [[TMP5]], ptr [[ARRAYIDX3_2]], align 1
-; SSE-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
-; SSE-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
-; SSE-NEXT:    [[TMP7:%.*]] = and i8 [[TMP6]], -64
-; SSE-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
-; SSE-NEXT:    store i8 [[TMP7]], ptr [[ARRAYIDX3_3]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @and4(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load i8, ptr [[SRC:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = and i8 [[TMP0]], -64
-; AVX-NEXT:    store i8 [[TMP1]], ptr [[DST:%.*]], align 1
-; AVX-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
-; AVX-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
-; AVX-NEXT:    [[TMP3:%.*]] = and i8 [[TMP2]], -64
-; AVX-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 1
-; AVX-NEXT:    store i8 [[TMP3]], ptr [[ARRAYIDX3_1]], align 1
-; AVX-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
-; AVX-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
-; AVX-NEXT:    [[TMP5:%.*]] = and i8 [[TMP4]], -64
-; AVX-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
-; AVX-NEXT:    store i8 [[TMP5]], ptr [[ARRAYIDX3_2]], align 1
-; AVX-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
-; AVX-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
-; AVX-NEXT:    [[TMP7:%.*]] = and i8 [[TMP6]], -64
-; AVX-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
-; AVX-NEXT:    store i8 [[TMP7]], ptr [[ARRAYIDX3_3]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @and4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[SRC:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[TMP0]], -64
+; CHECK-NEXT:    store i8 [[TMP1]], ptr [[DST:%.*]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP2]], -64
+; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 1
+; CHECK-NEXT:    store i8 [[TMP3]], ptr [[ARRAYIDX3_1]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = and i8 [[TMP4]], -64
+; CHECK-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
+; CHECK-NEXT:    store i8 [[TMP5]], ptr [[ARRAYIDX3_2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
+; CHECK-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = and i8 [[TMP6]], -64
+; CHECK-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
+; CHECK-NEXT:    store i8 [[TMP7]], ptr [[ARRAYIDX3_3]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %src, align 1
@@ -79,19 +57,12 @@ entry:
 }
 
 define void @and8(ptr noalias nocapture noundef writeonly %dst, ptr noalias nocapture noundef readonly %src) {
-; SSE-LABEL: @and8(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[SRC:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = and <8 x i8> [[TMP0]], <i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64>
-; SSE-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST:%.*]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @and8(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[SRC:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = and <8 x i8> [[TMP0]], <i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64>
-; AVX-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST:%.*]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @and8(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[SRC:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = and <8 x i8> [[TMP0]], <i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64>
+; CHECK-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST:%.*]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %src, align 1
@@ -136,19 +107,12 @@ entry:
 }
 
 define void @and16(ptr noalias nocapture noundef writeonly %dst, ptr noalias nocapture noundef readonly %src) {
-; SSE-LABEL: @and16(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[SRC:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = and <16 x i8> [[TMP0]], <i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64>
-; SSE-NEXT:    store <16 x i8> [[TMP1]], ptr [[DST:%.*]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @and16(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[SRC:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = and <16 x i8> [[TMP0]], <i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64>
-; AVX-NEXT:    store <16 x i8> [[TMP1]], ptr [[DST:%.*]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @and16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[SRC:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = and <16 x i8> [[TMP0]], <i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64, i8 -64>
+; CHECK-NEXT:    store <16 x i8> [[TMP1]], ptr [[DST:%.*]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %src, align 1

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-mul-load.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-mul-load.ll
index 46dc722660c03..4048bf58d44c5 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/arith-mul-load.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-mul-load.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64    -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v3 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
-; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v4 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64    -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v2 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v3 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-unknown -mcpu=x86-64-v4 -basic-aa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
 
 ; // PR47491
 ; void pr(char* r, char* a){
@@ -12,57 +12,31 @@
 ; }
 
 define void @add4(ptr noalias nocapture noundef %r, ptr noalias nocapture noundef readonly %a) {
-; SSE-LABEL: @add4(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = load i8, ptr [[R:%.*]], align 1
-; SSE-NEXT:    [[MUL:%.*]] = mul i8 [[TMP1]], [[TMP0]]
-; SSE-NEXT:    store i8 [[MUL]], ptr [[R]], align 1
-; SSE-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
-; SSE-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
-; SSE-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 1
-; SSE-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2_1]], align 1
-; SSE-NEXT:    [[MUL_1:%.*]] = mul i8 [[TMP3]], [[TMP2]]
-; SSE-NEXT:    store i8 [[MUL_1]], ptr [[ARRAYIDX2_1]], align 1
-; SSE-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
-; SSE-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
-; SSE-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 2
-; SSE-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX2_2]], align 1
-; SSE-NEXT:    [[MUL_2:%.*]] = mul i8 [[TMP5]], [[TMP4]]
-; SSE-NEXT:    store i8 [[MUL_2]], ptr [[ARRAYIDX2_2]], align 1
-; SSE-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3
-; SSE-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
-; SSE-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 3
-; SSE-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX2_3]], align 1
-; SSE-NEXT:    [[MUL_3:%.*]] = mul i8 [[TMP7]], [[TMP6]]
-; SSE-NEXT:    store i8 [[MUL_3]], ptr [[ARRAYIDX2_3]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @add4(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = load i8, ptr [[R:%.*]], align 1
-; AVX-NEXT:    [[MUL:%.*]] = mul i8 [[TMP1]], [[TMP0]]
-; AVX-NEXT:    store i8 [[MUL]], ptr [[R]], align 1
-; AVX-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
-; AVX-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
-; AVX-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 1
-; AVX-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2_1]], align 1
-; AVX-NEXT:    [[MUL_1:%.*]] = mul i8 [[TMP3]], [[TMP2]]
-; AVX-NEXT:    store i8 [[MUL_1]], ptr [[ARRAYIDX2_1]], align 1
-; AVX-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
-; AVX-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
-; AVX-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 2
-; AVX-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX2_2]], align 1
-; AVX-NEXT:    [[MUL_2:%.*]] = mul i8 [[TMP5]], [[TMP4]]
-; AVX-NEXT:    store i8 [[MUL_2]], ptr [[ARRAYIDX2_2]], align 1
-; AVX-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3
-; AVX-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
-; AVX-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 3
-; AVX-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX2_3]], align 1
-; AVX-NEXT:    [[MUL_3:%.*]] = mul i8 [[TMP7]], [[TMP6]]
-; AVX-NEXT:    store i8 [[MUL_3]], ptr [[ARRAYIDX2_3]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @add4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[R:%.*]], align 1
+; CHECK-NEXT:    [[MUL:%.*]] = mul i8 [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store i8 [[MUL]], ptr [[R]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX2_1]], align 1
+; CHECK-NEXT:    [[MUL_1:%.*]] = mul i8 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store i8 [[MUL_1]], ptr [[ARRAYIDX2_1]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX2_2]], align 1
+; CHECK-NEXT:    [[MUL_2:%.*]] = mul i8 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    store i8 [[MUL_2]], ptr [[ARRAYIDX2_2]], align 1
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3
+; CHECK-NEXT:    [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CHECK-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[R]], i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX2_3]], align 1
+; CHECK-NEXT:    [[MUL_3:%.*]] = mul i8 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    store i8 [[MUL_3]], ptr [[ARRAYIDX2_3]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %a, align 1
@@ -91,21 +65,13 @@ entry:
 }
 
 define void @add8(ptr noalias nocapture noundef %r, ptr noalias nocapture noundef readonly %a) {
-; SSE-LABEL: @add8(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[R:%.*]], align 1
-; SSE-NEXT:    [[TMP2:%.*]] = mul <8 x i8> [[TMP1]], [[TMP0]]
-; SSE-NEXT:    store <8 x i8> [[TMP2]], ptr [[R]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @add8(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[R:%.*]], align 1
-; AVX-NEXT:    [[TMP2:%.*]] = mul <8 x i8> [[TMP1]], [[TMP0]]
-; AVX-NEXT:    store <8 x i8> [[TMP2]], ptr [[R]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @add8(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[R:%.*]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = mul <8 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store <8 x i8> [[TMP2]], ptr [[R]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %a, align 1
@@ -158,21 +124,13 @@ entry:
 }
 
 define void @add16(ptr noalias nocapture noundef %r, ptr noalias nocapture noundef readonly %a) {
-; SSE-LABEL: @add16(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 1
-; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[R:%.*]], align 1
-; SSE-NEXT:    [[TMP2:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]]
-; SSE-NEXT:    store <16 x i8> [[TMP2]], ptr [[R]], align 1
-; SSE-NEXT:    ret void
-;
-; AVX-LABEL: @add16(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 1
-; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[R:%.*]], align 1
-; AVX-NEXT:    [[TMP2:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]]
-; AVX-NEXT:    store <16 x i8> [[TMP2]], ptr [[R]], align 1
-; AVX-NEXT:    ret void
+; CHECK-LABEL: @add16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[R:%.*]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store <16 x i8> [[TMP2]], ptr [[R]], align 1
+; CHECK-NEXT:    ret void
 ;
 entry:
   %0 = load i8, ptr %a, align 1


        


More information about the llvm-commits mailing list