[llvm] b8c7d52 - [NFC][X86][LV] Add costmodel test coverage for interleaved i64/f64 load/store stride=4

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 4 07:33:07 PDT 2021


Author: Roman Lebedev
Date: 2021-10-04T17:31:57+03:00
New Revision: b8c7d5229c156e81e63e419e6bdff7598ece1d36

URL: https://github.com/llvm/llvm-project/commit/b8c7d5229c156e81e63e419e6bdff7598ece1d36
DIFF: https://github.com/llvm/llvm-project/commit/b8c7d5229c156e81e63e419e6bdff7598ece1d36.diff

LOG: [NFC][X86][LV] Add costmodel test coverage for interleaved i64/f64 load/store stride=4

Added: 
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-4.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
new file mode 100644
index 000000000000..b790b31bcd07
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
@@ -0,0 +1,75 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx  --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x double] zeroinitializer, align 128
+ at B = global [1024 x i8] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load double, double* %in0, align 8
+; SSE2: LV: Found an estimated cost of 12 for VF 2 For instruction:   %v0 = load double, double* %in0, align 8
+; SSE2: LV: Found an estimated cost of 24 for VF 4 For instruction:   %v0 = load double, double* %in0, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX1: LV: Found an estimated cost of 14 for VF 2 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX1: LV: Found an estimated cost of 32 for VF 4 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX1: LV: Found an estimated cost of 64 for VF 8 For instruction:   %v0 = load double, double* %in0, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX2: LV: Found an estimated cost of 14 for VF 2 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX2: LV: Found an estimated cost of 32 for VF 4 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX2: LV: Found an estimated cost of 64 for VF 8 For instruction:   %v0 = load double, double* %in0, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 5 for VF 2 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 8 for VF 4 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 22 for VF 8 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 80 for VF 16 For instruction:   %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 160 for VF 32 For instruction:   %v0 = load double, double* %in0, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction:   %v0 = load double, double* %in0, align 8
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+  %iv.3 = add nuw nsw i64 %iv, 3
+
+  %in0 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.0
+  %in1 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.1
+  %in2 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.2
+  %in3 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.3
+
+  %v0 = load double, double* %in0
+  %v1 = load double, double* %in1
+  %v2 = load double, double* %in2
+  %v3 = load double, double* %in3
+
+  %reduce.add.0 = fadd double %v0, %v1
+  %reduce.add.1 = fadd double %reduce.add.0, %v2
+  %reduce.add.2 = fadd double %reduce.add.1, %v3
+
+  %reduce.add.2.narrow = fptoui double %reduce.add.2 to i8
+
+  %out = getelementptr inbounds [1024 x i8], [1024 x i8]* @B, i64 0, i64 %iv.0
+  store i8 %reduce.add.2.narrow, i8* %out
+
+  %iv.next = add nuw nsw i64 %iv.0, 4
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
new file mode 100644
index 000000000000..c806af5fd2aa
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
@@ -0,0 +1,75 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx  --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i64] zeroinitializer, align 128
+ at B = global [1024 x i8] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i64, i64* %in0, align 8
+; SSE2: LV: Found an estimated cost of 28 for VF 2 For instruction:   %v0 = load i64, i64* %in0, align 8
+; SSE2: LV: Found an estimated cost of 56 for VF 4 For instruction:   %v0 = load i64, i64* %in0, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX1: LV: Found an estimated cost of 22 for VF 2 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX1: LV: Found an estimated cost of 52 for VF 4 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX1: LV: Found an estimated cost of 104 for VF 8 For instruction:   %v0 = load i64, i64* %in0, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX2: LV: Found an estimated cost of 22 for VF 2 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX2: LV: Found an estimated cost of 52 for VF 4 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX2: LV: Found an estimated cost of 104 for VF 8 For instruction:   %v0 = load i64, i64* %in0, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 5 for VF 2 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 8 for VF 4 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 22 for VF 8 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 80 for VF 16 For instruction:   %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 160 for VF 32 For instruction:   %v0 = load i64, i64* %in0, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction:   %v0 = load i64, i64* %in0, align 8
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+  %iv.3 = add nuw nsw i64 %iv, 3
+
+  %in0 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.0
+  %in1 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.1
+  %in2 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.2
+  %in3 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.3
+
+  %v0 = load i64, i64* %in0
+  %v1 = load i64, i64* %in1
+  %v2 = load i64, i64* %in2
+  %v3 = load i64, i64* %in3
+
+  %reduce.add.0 = add i64 %v0, %v1
+  %reduce.add.1 = add i64 %reduce.add.0, %v2
+  %reduce.add.2 = add i64 %reduce.add.1, %v3
+
+  %reduce.add.2.narrow = trunc i64 %reduce.add.2 to i8
+
+  %out = getelementptr inbounds [1024 x i8], [1024 x i8]* @B, i64 0, i64 %iv.0
+  store i8 %reduce.add.2.narrow, i8* %out
+
+  %iv.next = add nuw nsw i64 %iv.0, 4
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-4.ll
new file mode 100644
index 000000000000..0493d865a704
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-4.ll
@@ -0,0 +1,76 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx  --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i8] zeroinitializer, align 128
+ at B = global [1024 x double] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction:   store double %v3, double* %out3, align 8
+; SSE2: LV: Found an estimated cost of 12 for VF 2 For instruction:   store double %v3, double* %out3, align 8
+; SSE2: LV: Found an estimated cost of 24 for VF 4 For instruction:   store double %v3, double* %out3, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction:   store double %v3, double* %out3, align 8
+; AVX1: LV: Found an estimated cost of 12 for VF 2 For instruction:   store double %v3, double* %out3, align 8
+; AVX1: LV: Found an estimated cost of 32 for VF 4 For instruction:   store double %v3, double* %out3, align 8
+; AVX1: LV: Found an estimated cost of 64 for VF 8 For instruction:   store double %v3, double* %out3, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction:   store double %v3, double* %out3, align 8
+; AVX2: LV: Found an estimated cost of 12 for VF 2 For instruction:   store double %v3, double* %out3, align 8
+; AVX2: LV: Found an estimated cost of 32 for VF 4 For instruction:   store double %v3, double* %out3, align 8
+; AVX2: LV: Found an estimated cost of 64 for VF 8 For instruction:   store double %v3, double* %out3, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction:   store double %v3, double* %out3, align 8
+; AVX512: LV: Found an estimated cost of 5 for VF 2 For instruction:   store double %v3, double* %out3, align 8
+; AVX512: LV: Found an estimated cost of 11 for VF 4 For instruction:   store double %v3, double* %out3, align 8
+; AVX512: LV: Found an estimated cost of 22 for VF 8 For instruction:   store double %v3, double* %out3, align 8
+; AVX512: LV: Found an estimated cost of 44 for VF 16 For instruction:   store double %v3, double* %out3, align 8
+; AVX512: LV: Found an estimated cost of 88 for VF 32 For instruction:   store double %v3, double* %out3, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction:   store double %v3, double* %out3, align 8
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+  %iv.3 = add nuw nsw i64 %iv, 3
+
+  %in = getelementptr inbounds [1024 x i8], [1024 x i8]* @A, i64 0, i64 %iv.0
+  %v.narrow = load i8, i8* %in
+
+  %v = uitofp i8 %v.narrow to double
+
+  %v0 = fadd double %v, 0.0
+  %v1 = fadd double %v, 1.0
+  %v2 = fadd double %v, 2.0
+  %v3 = fadd double %v, 3.0
+
+  %out0 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.0
+  %out1 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.1
+  %out2 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.2
+  %out3 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.3
+
+  store double %v0, double* %out0
+  store double %v1, double* %out1
+  store double %v2, double* %out2
+  store double %v3, double* %out3
+
+  %iv.next = add nuw nsw i64 %iv.0, 4
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-4.ll
new file mode 100644
index 000000000000..7d8cb843fcc4
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-4.ll
@@ -0,0 +1,76 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx  --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i8] zeroinitializer, align 128
+ at B = global [1024 x i64] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i64 %v3, i64* %out3, align 8
+; SSE2: LV: Found an estimated cost of 28 for VF 2 For instruction:   store i64 %v3, i64* %out3, align 8
+; SSE2: LV: Found an estimated cost of 56 for VF 4 For instruction:   store i64 %v3, i64* %out3, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX1: LV: Found an estimated cost of 22 for VF 2 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX1: LV: Found an estimated cost of 52 for VF 4 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX1: LV: Found an estimated cost of 104 for VF 8 For instruction:   store i64 %v3, i64* %out3, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX2: LV: Found an estimated cost of 22 for VF 2 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX2: LV: Found an estimated cost of 52 for VF 4 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX2: LV: Found an estimated cost of 104 for VF 8 For instruction:   store i64 %v3, i64* %out3, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX512: LV: Found an estimated cost of 5 for VF 2 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX512: LV: Found an estimated cost of 11 for VF 4 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX512: LV: Found an estimated cost of 22 for VF 8 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX512: LV: Found an estimated cost of 44 for VF 16 For instruction:   store i64 %v3, i64* %out3, align 8
+; AVX512: LV: Found an estimated cost of 88 for VF 32 For instruction:   store i64 %v3, i64* %out3, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction:   store i64 %v3, i64* %out3, align 8
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+  %iv.3 = add nuw nsw i64 %iv, 3
+
+  %in = getelementptr inbounds [1024 x i8], [1024 x i8]* @A, i64 0, i64 %iv.0
+  %v.narrow = load i8, i8* %in
+
+  %v = zext i8 %v.narrow to i64
+
+  %v0 = add i64 %v, 0
+  %v1 = add i64 %v, 1
+  %v2 = add i64 %v, 2
+  %v3 = add i64 %v, 3
+
+  %out0 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.0
+  %out1 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.1
+  %out2 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.2
+  %out3 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.3
+
+  store i64 %v0, i64* %out0
+  store i64 %v1, i64* %out1
+  store i64 %v2, i64* %out2
+  store i64 %v3, i64* %out3
+
+  %iv.next = add nuw nsw i64 %iv.0, 4
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}


        


More information about the llvm-commits mailing list