[llvm] 3c205ef - [SLP][AArch64] Add fmuladd test coverage

Dinar Temirbulatov via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 3 03:30:17 PST 2023


Author: Dinar Temirbulatov
Date: 2023-01-03T11:28:18Z
New Revision: 3c205efe8bc08d01d9fa91ab4d9cffec828dd8ea

URL: https://github.com/llvm/llvm-project/commit/3c205efe8bc08d01d9fa91ab4d9cffec828dd8ea
DIFF: https://github.com/llvm/llvm-project/commit/3c205efe8bc08d01d9fa91ab4d9cffec828dd8ea.diff

LOG: [SLP][AArch64] Add fmuladd test coverage

Added: 
    llvm/test/Transforms/SLPVectorizer/AArch64/fmulladd.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/fmulladd.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/fmulladd.ll
new file mode 100644
index 0000000000000..16df20505323e
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/fmulladd.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=slp-vectorizer -mtriple=aarch64-unknown-unknown -mcpu=cortex-a53 -S | FileCheck %s
+
+ at b = common global i32 0, align 4
+ at a = common global ptr null, align 8
+
+define void @foo(ptr %d, ptr %e) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x double], ptr [[D:%.*]], i64 2
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x double], ptr [[D]], i64 0, i64 3
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x double], ptr [[E:%.*]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = load double, ptr [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP1]], double [[TMP2]], double [[TMP0]])
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [4 x double], ptr [[D]], i64 2, i64 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load double, ptr [[ARRAYIDX8]], align 8
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds [4 x double], ptr [[E]], i64 3, i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX12]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP1]], double [[TMP5]], double [[TMP4]])
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [4 x double], ptr [[D]], i64 1, i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr [[ARRAYIDX15]], align 8
+; CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [4 x double], ptr [[D]], i64 3, i64 2
+; CHECK-NEXT:    [[TMP8:%.*]] = load double, ptr [[ARRAYIDX17]], align 8
+; CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds [4 x double], ptr [[E]], i64 2, i64 3
+; CHECK-NEXT:    [[TMP9:%.*]] = load double, ptr [[ARRAYIDX19]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP8]], double [[TMP9]], double [[TMP7]])
+; CHECK-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds [4 x double], ptr [[D]], i64 3, i64 3
+; CHECK-NEXT:    [[TMP11:%.*]] = load double, ptr [[ARRAYIDX21]], align 8
+; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds [4 x double], ptr [[E]], i64 3, i64 3
+; CHECK-NEXT:    [[TMP12:%.*]] = load double, ptr [[ARRAYIDX23]], align 8
+; CHECK-NEXT:    [[TMP13:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP11]], double [[TMP12]], double [[TMP10]])
+; CHECK-NEXT:    [[TMP14:%.*]] = load ptr, ptr @a, align 8
+; CHECK-NEXT:    store double [[TMP3]], ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[F_SROA_4_0__SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 8
+; CHECK-NEXT:    store double [[TMP6]], ptr [[F_SROA_4_0__SROA_IDX]], align 8
+; CHECK-NEXT:    [[F_SROA_539_0__SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 40
+; CHECK-NEXT:    store double [[TMP13]], ptr [[F_SROA_539_0__SROA_IDX]], align 8
+; CHECK-NEXT:    store i32 6, ptr @b, align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arrayidx = getelementptr inbounds [4 x double], ptr %d, i64 2
+  %0 = load double, ptr %arrayidx, align 8
+  %arrayidx3 = getelementptr inbounds [4 x double], ptr %d, i64 0, i64 3
+  %1 = load double, ptr %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds [4 x double], ptr %e, i64 3
+  %2 = load double, ptr %arrayidx4, align 8
+  %3 = tail call double @llvm.fmuladd.f64(double %1, double %2, double %0)
+  %arrayidx8 = getelementptr inbounds [4 x double], ptr %d, i64 2, i64 1
+  %4 = load double, ptr %arrayidx8, align 8
+  %arrayidx12 = getelementptr inbounds [4 x double], ptr %e, i64 3, i64 1
+  %5 = load double, ptr %arrayidx12, align 8
+  %6 = tail call double @llvm.fmuladd.f64(double %1, double %5, double %4)
+  %arrayidx15 = getelementptr inbounds [4 x double], ptr %d, i64 1, i64 3
+  %7 = load double, ptr %arrayidx15, align 8
+  %arrayidx17 = getelementptr inbounds [4 x double], ptr %d, i64 3, i64 2
+  %8 = load double, ptr %arrayidx17, align 8
+  %arrayidx19 = getelementptr inbounds [4 x double], ptr %e, i64 2, i64 3
+  %9 = load double, ptr %arrayidx19, align 8
+  %10 = tail call double @llvm.fmuladd.f64(double %8, double %9, double %7)
+  %arrayidx21 = getelementptr inbounds [4 x double], ptr %d, i64 3, i64 3
+  %11 = load double, ptr %arrayidx21, align 8
+  %arrayidx23 = getelementptr inbounds [4 x double], ptr %e, i64 3, i64 3
+  %12 = load double, ptr %arrayidx23, align 8
+  %13 = tail call double @llvm.fmuladd.f64(double %11, double %12, double %10)
+  %14 = load ptr, ptr @a, align 8
+  store double %3, ptr %14, align 8
+  %f.sroa.4.0..sroa_idx = getelementptr inbounds i8, ptr %14, i64 8
+  store double %6, ptr %f.sroa.4.0..sroa_idx, align 8
+  %f.sroa.539.0..sroa_idx = getelementptr inbounds i8, ptr %14, i64 40
+  store double %13, ptr %f.sroa.539.0..sroa_idx, align 8
+  store i32 6, ptr @b, align 4
+  ret void
+}
+
+declare double @llvm.fmuladd.f64(double, double, double)


        


More information about the llvm-commits mailing list