[llvm-branch-commits] [llvm] b548f7a - [LV] Add test where rt checks make vectorization unprofitable.

Florian Hahn via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sat Sep 25 12:29:37 PDT 2021


Author: Florian Hahn
Date: 2021-09-25T15:15:51+01:00
New Revision: b548f7ab7af04e3f8cb0afee9f92c9253526aa64

URL: https://github.com/llvm/llvm-project/commit/b548f7ab7af04e3f8cb0afee9f92c9253526aa64
DIFF: https://github.com/llvm/llvm-project/commit/b548f7ab7af04e3f8cb0afee9f92c9253526aa64.diff

LOG: [LV] Add test where rt checks make vectorization unprofitable.

Added: 
    llvm/test/Transforms/LoopVectorize/X86/pointer-runtime-checks-unprofitable.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LoopVectorize/X86/pointer-runtime-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/pointer-runtime-checks-unprofitable.ll
new file mode 100644
index 0000000000000..628457a1c1e43
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/pointer-runtime-checks-unprofitable.ll
@@ -0,0 +1,61 @@
+; RUN: opt -runtime-memory-check-threshold=9 -passes='loop-vectorize' -mtriple=x86_64-unknown-linux -S %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+target triple = "x86_64-unknown-linux"
+
+declare double @llvm.pow.f64(double, double)
+
+; Test case where the memory runtime checks and vector body is more expensive
+; than running the scalar loop.
+; TODO: should not be vectorized.
+define void @test(double* nocapture %A, double* nocapture %B, double* nocapture %C, double* nocapture %D, double* nocapture %E) {
+; CHECK-LABEL: @test(
+; CHECK: vector.memcheck
+; CHECK: vector.body
+;
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %gep.A = getelementptr inbounds double, double* %A, i64 %iv
+  %l.A = load double, double* %gep.A, align 4
+  store double 0.0, double* %gep.A, align 4
+  %p.1 = call double @llvm.pow.f64(double %l.A, double 2.0)
+
+  %gep.B = getelementptr inbounds double, double* %B, i64 %iv
+  %l.B = load double, double* %gep.B, align 4
+  %p.2 = call double @llvm.pow.f64(double %l.B, double %p.1)
+  store double 0.0, double* %gep.B, align 4
+
+  %gep.C = getelementptr inbounds double, double* %C, i64 %iv
+  %l.C = load double, double* %gep.C, align 4, !noalias !5
+  %p.3 = call double @llvm.pow.f64(double %p.1, double %l.C)
+
+  %gep.D = getelementptr inbounds double, double* %D, i64 %iv
+  %l.D = load double, double* %gep.D
+  %p.4 = call double @llvm.pow.f64(double %p.3, double %l.D)
+  %p.5 = call double @llvm.pow.f64(double %p.4, double %p.3)
+  %mul = fmul double 2.0, %p.5
+  %mul.2 = fmul double %mul, 2.0
+  %mul.3 = fmul double %mul, %mul.2
+  %gep.E = getelementptr inbounds double, double* %E, i64 %iv
+  store double %mul.3, double* %gep.E, align 4, !alias.scope !5
+  %iv.next = add i64 %iv, 1
+  %exitcond = icmp eq i64 %iv.next, 16
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}
+
+!0 = !{!0}
+!1 = !{!1}
+
+; Some scopes in these domains:
+!2 = !{!2, !0}
+!4 = !{!4, !1}
+
+!5 = !{!4} ; A list containing only scope !4
+


        


More information about the llvm-branch-commits mailing list