[llvm] r242437 - [NVPTX] enable SpeculativeExecution in NVPTX

Jingyue Wu jingyue at google.com
Thu Jul 16 13:13:48 PDT 2015


Author: jingyue
Date: Thu Jul 16 15:13:48 2015
New Revision: 242437

URL: http://llvm.org/viewvc/llvm-project?rev=242437&view=rev
Log:
[NVPTX] enable SpeculativeExecution in NVPTX

Summary:
SpeculativeExecution enables a series straight line optimizations (such
as SLSR and NaryReassociate) on conditional code. For example,

  if (...)
    ... b * s ...
  if (...)
    ... (b + 1) * s ...

speculative execution can hoist b * s and (b + 1) * s from then-blocks,
so that we have

  ... b * s ...
  if (...)
    ...
  ... (b + 1) * s ...
  if (...)
    ...

Then, SLSR can rewrite (b + 1) * s to (b * s + s) because after
speculative execution b * s dominates (b + 1) * s.

The performance impact of this change is significant. It speeds up the
benchmarks running EigenFloatContractionKernelInternal16x16
(https://bitbucket.org/eigen/eigen/src/ba68f42fa69e4f43417fe1e52669d4dd5d2b3bee/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h?at=default#cl-526)
by roughly 2%. Some internal benchmarks that have the above code pattern
are improved by up to 40%. No significant slowdowns are observed on
Eigen CUDA microbenchmarks.

Reviewers: jholewinski, broune, eliben

Subscribers: llvm-commits, jholewinski

Differential Revision: http://reviews.llvm.org/D11201

Added:
    llvm/trunk/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll
Modified:
    llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp

Modified: llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp?rev=242437&r1=242436&r2=242437&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp Thu Jul 16 15:13:48 2015
@@ -181,6 +181,7 @@ void NVPTXPassConfig::addIRPasses() {
   // requires manual work and might be error-prone.
   addPass(createDeadCodeEliminationPass());
   addPass(createSeparateConstOffsetFromGEPPass());
+  addPass(createSpeculativeExecutionPass());
   // ReassociateGEPs exposes more opportunites for SLSR. See
   // the example in reassociate-geps-and-slsr.ll.
   addPass(createStraightLineStrengthReducePass());

Added: llvm/trunk/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll?rev=242437&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll (added)
+++ llvm/trunk/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll Thu Jul 16 15:13:48 2015
@@ -0,0 +1,71 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; CUDA code
+; __global__ void foo(int b, int s) {
+;   #pragma unroll
+;   for (int i = 0; i < 4; ++i) {
+;     if (cond(i))
+;       use((b + i) * s);
+;   }
+; }
+define void @foo(i32 %b, i32 %s) {
+; CHECK-LABEL: .visible .entry foo(
+entry:
+; CHECK: ld.param.u32 [[s:%r[0-9]+]], [foo_param_1];
+; CHECK: ld.param.u32 [[b:%r[0-9]+]], [foo_param_0];
+  %call = tail call zeroext i1 @cond(i32 0)
+  br i1 %call, label %if.then, label %for.inc
+
+if.then:                                          ; preds = %entry
+  %mul = mul nsw i32 %b, %s
+; CHECK: mul.lo.s32 [[a0:%r[0-9]+]], [[b]], [[s]]
+  tail call void @use(i32 %mul)
+  br label %for.inc
+
+for.inc:                                          ; preds = %entry, %if.then
+  %call.1 = tail call zeroext i1 @cond(i32 1)
+  br i1 %call.1, label %if.then.1, label %for.inc.1
+
+if.then.1:                                        ; preds = %for.inc
+  %add.1 = add nsw i32 %b, 1
+  %mul.1 = mul nsw i32 %add.1, %s
+; CHECK: add.s32 [[a1:%r[0-9]+]], [[a0]], [[s]]
+  tail call void @use(i32 %mul.1)
+  br label %for.inc.1
+
+for.inc.1:                                        ; preds = %if.then.1, %for.inc
+  %call.2 = tail call zeroext i1 @cond(i32 2)
+  br i1 %call.2, label %if.then.2, label %for.inc.2
+
+if.then.2:                                        ; preds = %for.inc.1
+  %add.2 = add nsw i32 %b, 2
+  %mul.2 = mul nsw i32 %add.2, %s
+; CHECK: add.s32 [[a2:%r[0-9]+]], [[a1]], [[s]]
+  tail call void @use(i32 %mul.2)
+  br label %for.inc.2
+
+for.inc.2:                                        ; preds = %if.then.2, %for.inc.1
+  %call.3 = tail call zeroext i1 @cond(i32 3)
+  br i1 %call.3, label %if.then.3, label %for.inc.3
+
+if.then.3:                                        ; preds = %for.inc.2
+  %add.3 = add nsw i32 %b, 3
+  %mul.3 = mul nsw i32 %add.3, %s
+; CHECK: add.s32 [[a3:%r[0-9]+]], [[a2]], [[s]]
+  tail call void @use(i32 %mul.3)
+  br label %for.inc.3
+
+for.inc.3:                                        ; preds = %if.then.3, %for.inc.2
+  ret void
+}
+
+declare zeroext i1 @cond(i32)
+
+declare void @use(i32)
+
+!nvvm.annotations = !{!0}
+
+!0 = !{void (i32, i32)* @foo, !"kernel", i32 1}





More information about the llvm-commits mailing list