[llvm] f9ceb71 - [SLP][NFC] Add a coverage test for horizontal reductions.

Valery N Dmitriev via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 25 16:04:24 PDT 2022


Author: Valery N Dmitriev
Date: 2022-08-25T16:02:22-07:00
New Revision: f9ceb71542e0b15dd57a60c2bf379caeb4a34bec

URL: https://github.com/llvm/llvm-project/commit/f9ceb71542e0b15dd57a60c2bf379caeb4a34bec
DIFF: https://github.com/llvm/llvm-project/commit/f9ceb71542e0b15dd57a60c2bf379caeb4a34bec.diff

LOG: [SLP][NFC] Add a coverage test for horizontal reductions.

Reduction feeds single insertelement instruction.

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll
new file mode 100644
index 0000000000000..ae588a1657a40
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64 -slp-vectorizer -S -mcpu=skylake-avx512 | FileCheck %s
+
+declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double>, <2 x double*>, i32 immarg, <2 x i1>)
+
+define void @rdx_feeds_single_insert(<2 x double> %v, double* nocapture readonly %arg, double* nocapture readonly %arg1, double* nocapture %arg2) {
+; CHECK-LABEL: @rdx_feeds_single_insert(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARG1:%.*]] to <8 x double>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast <8 x double> [[TMP1]], <double 1.000000e+01, double 1.100000e+01, double 1.200000e+01, double 1.300000e+01, double 1.400000e+01, double 1.500000e+01, double 1.600000e+01, double 1.700000e+01>
+; CHECK-NEXT:    [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP2]])
+; CHECK-NEXT:    [[I:%.*]] = insertelement <2 x double> [[V:%.*]], double [[TMP3]], i64 1
+; CHECK-NEXT:    [[P:%.*]] = getelementptr inbounds double, double* [[ARG2:%.*]], <2 x i64> <i64 0, i64 16>
+; CHECK-NEXT:    call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[I]], <2 x double*> [[P]], i32 8, <2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    ret void
+;
+entry:
+  %ld0.0 = load double, double* %arg1, align 8
+  %mul1.0 = fmul fast double %ld0.0, 10.0
+  %gep0.1 = getelementptr inbounds double, double* %arg1, i64 1
+  %ld0.1 = load double, double* %gep0.1, align 8
+  %mul1.1 = fmul fast double %ld0.1, 11.0
+  %rdx1.0 = fadd fast double %mul1.0, %mul1.1
+  %gep0.2 = getelementptr inbounds double, double* %arg1, i64 2
+  %ld0.2 = load double, double* %gep0.2, align 8
+  %mul1.2 = fmul fast double %ld0.2, 12.0
+  %rdx1.1 = fadd fast double %rdx1.0, %mul1.2
+  %gep0.3 = getelementptr inbounds double, double* %arg1, i64 3
+  %ld0.3 = load double, double* %gep0.3, align 8
+  %mul1.3 = fmul fast double %ld0.3, 13.0
+  %rdx1.2 = fadd fast double %rdx1.1, %mul1.3
+  %gep0.4 = getelementptr inbounds double, double* %arg1, i64 4
+  %ld0.4 = load double, double* %gep0.4, align 8
+  %mul1.4 = fmul fast double %ld0.4, 14.0
+  %rdx1.3 = fadd fast double %rdx1.2, %mul1.4
+  %gep0.5 = getelementptr inbounds double, double* %arg1, i64 5
+  %ld0.5 = load double, double* %gep0.5, align 8
+  %mul1.5 = fmul fast double %ld0.5, 15.0
+  %rdx1.4 = fadd fast double %rdx1.3, %mul1.5
+  %gep0.6 = getelementptr inbounds double, double* %arg1, i64 6
+  %ld0.6 = load double, double* %gep0.6, align 8
+  %mul1.6 = fmul fast double %ld0.6, 16.0
+  %rdx1.5 = fadd fast double %rdx1.4, %mul1.6
+  %gep0.7 = getelementptr inbounds double, double* %arg1, i64 7
+  %ld0.7 = load double, double* %gep0.7, align 8
+  %mul1.7 = fmul fast double %ld0.7, 17.0
+  %rdx1 = fadd fast double %rdx1.5, %mul1.7
+  %i = insertelement <2 x double> %v, double %rdx1, i64 1
+  %p = getelementptr inbounds double, double* %arg2, <2 x i64> <i64 0, i64 16>
+  call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %i, <2 x double*> %p, i32 8, <2 x i1> <i1 true, i1 true>)
+  ret void
+}
+


        


More information about the llvm-commits mailing list