[llvm] 456951d - [SLP][NFC]Add a test for possible reordering gap in SLP, NFC.

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 19 08:22:49 PDT 2022


Author: Alexey Bataev
Date: 2022-10-19T08:22:07-07:00
New Revision: 456951dcd3d5ab6cb41aa71831f52cdb2bdcecc4

URL: https://github.com/llvm/llvm-project/commit/456951dcd3d5ab6cb41aa71831f52cdb2bdcecc4
DIFF: https://github.com/llvm/llvm-project/commit/456951dcd3d5ab6cb41aa71831f52cdb2bdcecc4.diff

LOG: [SLP][NFC]Add a test for possible reordering gap in SLP, NFC.

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll b/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll
new file mode 100644
index 0000000000000..8137668e978bf
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=slp-vectorizer -S | FileCheck %s
+
+ at src = common global [8 x double] zeroinitializer, align 64
+ at dst = common global [8 x double] zeroinitializer, align 64
+
+declare double @llvm.sqrt.f64(double)
+declare double @llvm.sin.f64(double)
+
+define void @test() {
+; CHECK-LABEL: @test(
+; CHECK-NEXT:    [[A0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 0), align 8
+; CHECK-NEXT:    [[A1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 1), align 8
+; CHECK-NEXT:    [[A2:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 2), align 8
+; CHECK-NEXT:    [[A3:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 3), align 8
+; CHECK-NEXT:    [[A4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 4), align 8
+; CHECK-NEXT:    [[A5:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 5), align 8
+; CHECK-NEXT:    [[A6:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 6), align 8
+; CHECK-NEXT:    [[A7:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 7), align 8
+; CHECK-NEXT:    [[SIN0:%.*]] = call fast double @llvm.sin.f64(double [[A2]])
+; CHECK-NEXT:    [[SIN1:%.*]] = call fast double @llvm.sin.f64(double [[A3]])
+; CHECK-NEXT:    [[SQRT0:%.*]] = call fast double @llvm.sqrt.f64(double [[A0]])
+; CHECK-NEXT:    [[SQRT1:%.*]] = call fast double @llvm.sqrt.f64(double [[A1]])
+; CHECK-NEXT:    [[SIN2:%.*]] = call fast double @llvm.sin.f64(double [[A6]])
+; CHECK-NEXT:    [[SIN3:%.*]] = call fast double @llvm.sin.f64(double [[A7]])
+; CHECK-NEXT:    [[SQRT2:%.*]] = call fast double @llvm.sqrt.f64(double [[A4]])
+; CHECK-NEXT:    [[SQRT3:%.*]] = call fast double @llvm.sqrt.f64(double [[A5]])
+; CHECK-NEXT:    [[RES1:%.*]] = fadd fast double [[SQRT0]], [[SIN1]]
+; CHECK-NEXT:    [[RES2:%.*]] = fadd fast double [[SIN0]], [[SQRT1]]
+; CHECK-NEXT:    [[RES00:%.*]] = fadd fast double [[RES1]], [[RES2]]
+; CHECK-NEXT:    [[RES3:%.*]] = fadd fast double [[SQRT2]], [[SIN3]]
+; CHECK-NEXT:    [[RES4:%.*]] = fadd fast double [[SIN2]], [[SQRT3]]
+; CHECK-NEXT:    [[RES01:%.*]] = fadd fast double [[RES3]], [[RES4]]
+; CHECK-NEXT:    store double [[RES00]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst, i32 0, i64 0), align 8
+; CHECK-NEXT:    store double [[RES01]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst, i32 0, i64 1), align 8
+; CHECK-NEXT:    ret void
+;
+  %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 0), align 8
+  %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 1), align 8
+  %a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 2), align 8
+  %a3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 3), align 8
+  %a4 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 4), align 8
+  %a5 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 5), align 8
+  %a6 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 6), align 8
+  %a7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 7), align 8
+  %sin0 = call fast double @llvm.sin.f64(double %a2)
+  %sin1 = call fast double @llvm.sin.f64(double %a3)
+  %sqrt0 = call fast double @llvm.sqrt.f64(double %a0)
+  %sqrt1 = call fast double @llvm.sqrt.f64(double %a1)
+  %sin2 = call fast double @llvm.sin.f64(double %a6)
+  %sin3 = call fast double @llvm.sin.f64(double %a7)
+  %sqrt2 = call fast double @llvm.sqrt.f64(double %a4)
+  %sqrt3 = call fast double @llvm.sqrt.f64(double %a5)
+  %res1 = fadd fast double %sqrt0, %sin1
+  %res2 = fadd fast double %sin0, %sqrt1
+  %res00 = fadd fast double %res1, %res2
+  %res3 = fadd fast double %sqrt2, %sin3
+  %res4 = fadd fast double %sin2, %sqrt3
+  %res01 = fadd fast double %res3, %res4
+  store double %res00, double* getelementptr inbounds ([8 x double], [8 x double]* @dst, i32 0, i64 0), align 8
+  store double %res01, double* getelementptr inbounds ([8 x double], [8 x double]* @dst, i32 0, i64 1), align 8
+  ret void
+}


        


More information about the llvm-commits mailing list