[Lldb-commits] [lldb] r142708 - in /lldb/trunk/test: bench.py benchmarks/disassembly/TestDoAttachThenDisassembly.py benchmarks/expression/TestExpressionCmd.py benchmarks/startup/TestStartupDelays.py dotest.py
Johnny Chen
johnny.chen at apple.com
Fri Oct 21 17:57:05 PDT 2011
Author: johnny
Date: Fri Oct 21 19:57:05 2011
New Revision: 142708
URL: http://llvm.org/viewvc/llvm-project?rev=142708&view=rev
Log:
Add bench.py as a driver script to run some benchmarks on lldb.
Add benchmarks for expression evaluations (TestExpressionCmd.py) and disassembly (TestDoAttachThenDisassembly.py).
An example:
[17:45:55] johnny:/Volumes/data/lldb/svn/trunk/test $ ./bench.py 2>&1 | grep -P '^lldb.*benchmark:'
lldb startup delay (create fresh target) benchmark: Avg: 0.104274 (Laps: 30, Total Elapsed Time: 3.128214)
lldb startup delay (set first breakpoint) benchmark: Avg: 0.102216 (Laps: 30, Total Elapsed Time: 3.066470)
lldb frame variable benchmark: Avg: 1.649162 (Laps: 20, Total Elapsed Time: 32.983245)
lldb stepping benchmark: Avg: 0.104409 (Laps: 50, Total Elapsed Time: 5.220461)
lldb expr cmd benchmark: Avg: 0.206774 (Laps: 25, Total Elapsed Time: 5.169350)
lldb disassembly benchmark: Avg: 0.089086 (Laps: 10, Total Elapsed Time: 0.890859)
Added:
lldb/trunk/test/bench.py (with props)
lldb/trunk/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py
lldb/trunk/test/benchmarks/expression/TestExpressionCmd.py
Modified:
lldb/trunk/test/benchmarks/startup/TestStartupDelays.py
lldb/trunk/test/dotest.py
Added: lldb/trunk/test/bench.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/test/bench.py?rev=142708&view=auto
==============================================================================
--- lldb/trunk/test/bench.py (added)
+++ lldb/trunk/test/bench.py Fri Oct 21 19:57:05 2011
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+"""
+A simple bench runner which delegates to the ./dotest.py test driver to run the
+benchmarks defined in the list named 'benches'.
+
+You need to hand edit 'benches' to modify/change the command lines passed to the
+test driver.
+
+Use the following to get only the benchmark results in your terminal output:
+
+ ./bench.py 2>&1 | grep -P '^lldb.*benchmark:'
+"""
+
+import os, sys
+import re
+
+# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
+# unless there is a mentioning of custom executable program.
+benches = [
+ # Measure startup delays creating a target and setting a breakpoint at main.
+ './dotest.py -v +b -n -p TestStartupDelays.py',
+
+ # Measure 'frame variable' response after stopping at Driver::MainLoop().
+ './dotest.py -v +b -x "-F Driver::MainLoop()" -n -p TestFrameVariableResponse.py',
+
+ # Measure stepping speed after stopping at Driver::MainLoop().
+ './dotest.py -v +b -x "-F Driver::MainLoop()" -n -p TestSteppingSpeed.py',
+
+ # Measure expression cmd response with a simple custom executable program.
+ './dotest.py +b -n -p TestExpressionCmd.py',
+
+ # Attach to a spawned lldb process then run disassembly benchmarks.
+ './dotest.py -v +b -n -p TestDoAttachThenDisassembly.py'
+]
+
+def main():
+ """Read the items from 'benches' and run the command line one by one."""
+ print "Starting bench runner...."
+
+ for command in benches:
+ print "Running %s" % (command)
+ os.system(command)
+
+ print "Bench runner done."
+
+if __name__ == '__main__':
+ main()
Propchange: lldb/trunk/test/bench.py
------------------------------------------------------------------------------
svn:executable = *
Added: lldb/trunk/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py?rev=142708&view=auto
==============================================================================
--- lldb/trunk/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py (added)
+++ lldb/trunk/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py Fri Oct 21 19:57:05 2011
@@ -0,0 +1,66 @@
+"""Test lldb's disassemblt speed."""
+
+import os, sys
+import unittest2
+import lldb
+import pexpect
+from lldbbench import *
+
+class AttachThenDisassemblyBench(BenchBase):
+
+ mydir = os.path.join("benchmarks", "disassembly")
+
+ def setUp(self):
+ BenchBase.setUp(self)
+
+ @benchmarks_test
+ def test_attach_then_disassembly(self):
+ """Attach to a spawned lldb process then run disassembly benchmarks."""
+ print
+ self.run_lldb_attach_then_disassembly(10)
+ print "lldb disassembly benchmark:", self.stopwatch
+
+ def run_lldb_attach_then_disassembly(self, count):
+ target = self.dbg.CreateTarget(self.lldbHere)
+
+ # Spawn a new process and don't display the stdout if not in TraceOn() mode.
+ import subprocess
+ popen = subprocess.Popen([self.lldbHere, self.lldbOption],
+ stdout = open(os.devnull, 'w') if not self.TraceOn() else None)
+ if self.TraceOn():
+ print "pid of spawned process: %d" % popen.pid
+
+ # Attach to the launched lldb process.
+ listener = lldb.SBListener("my.attach.listener")
+ error = lldb.SBError()
+ process = target.AttachToProcessWithID(listener, popen.pid, error)
+
+ # Set thread0 as the selected thread, followed by the 'MainLoop' frame
+ # as the selected frame. Then do disassembly on the function.
+ thread0 = process.GetThreadAtIndex(0)
+ process.SetSelectedThread(thread0)
+ i = 0
+ found = False
+ for f in thread0:
+ #print "frame#%d %s" % (i, f.GetFunctionName())
+ if "MainLoop" in f.GetFunctionName():
+ found = True
+ thread0.SetSelectedFrame(i)
+ if self.TraceOn():
+ print "Found frame#%d for function 'MainLoop'" % i
+ break
+ i += 1
+
+ # Reset the stopwatch now.
+ self.stopwatch.reset()
+ for i in range(count):
+ with self.stopwatch:
+ # Disassemble the function.
+ self.runCmd("disassemble -f")
+
+
+if __name__ == '__main__':
+ import atexit
+ lldb.SBDebugger.Initialize()
+ atexit.register(lambda: lldb.SBDebugger.Terminate())
+ unittest2.main()
Added: lldb/trunk/test/benchmarks/expression/TestExpressionCmd.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/test/benchmarks/expression/TestExpressionCmd.py?rev=142708&view=auto
==============================================================================
--- lldb/trunk/test/benchmarks/expression/TestExpressionCmd.py (added)
+++ lldb/trunk/test/benchmarks/expression/TestExpressionCmd.py Fri Oct 21 19:57:05 2011
@@ -0,0 +1,76 @@
+"""Test lldb's expression evaluations and collect statistics."""
+
+import os, sys
+import unittest2
+import lldb
+import pexpect
+from lldbbench import *
+
+class ExpressionEvaluationCase(BenchBase):
+
+ mydir = os.path.join("benchmarks", "expression")
+
+ def setUp(self):
+ BenchBase.setUp(self)
+ self.source = 'main.cpp'
+ self.line_to_break = line_number(self.source, '// Set breakpoint here.')
+ self.count = lldb.bmIterationCount
+ if self.count <= 0:
+ self.count = 25
+
+ @benchmarks_test
+ def test_expr_cmd(self):
+ """Test lldb's expression commands and collect statistics."""
+ self.buildDefault()
+ self.exe_name = 'a.out'
+
+ print
+ self.run_lldb_repeated_exprs(self.exe_name, self.count)
+ print "lldb expr cmd benchmark:", self.stopwatch
+
+ def run_lldb_repeated_exprs(self, exe_name, count):
+ exe = os.path.join(os.getcwd(), exe_name)
+
+ # Set self.child_prompt, which is "(lldb) ".
+ self.child_prompt = '(lldb) '
+ prompt = self.child_prompt
+
+ # Reset the stopwatch now.
+ self.stopwatch.reset()
+ for i in range(count):
+ # So that the child gets torn down after the test.
+ self.child = pexpect.spawn('%s %s %s' % (self.lldbExec, self.lldbOption, exe))
+ child = self.child
+
+ # Turn on logging for what the child sends back.
+ if self.TraceOn():
+ child.logfile_read = sys.stdout
+
+ child.expect_exact(prompt)
+ child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
+ child.expect_exact(prompt)
+ child.sendline('run')
+ child.expect_exact(prompt)
+ expr_cmd1 = 'expr ptr[j]->point.x'
+ expr_cmd2 = 'expr ptr[j]->point.y'
+
+ with self.stopwatch:
+ child.sendline(expr_cmd1)
+ child.expect_exact(prompt)
+ child.sendline(expr_cmd2)
+ child.expect_exact(prompt)
+
+ child.sendline('quit')
+ try:
+ self.child.expect(pexpect.EOF)
+ except:
+ pass
+
+ self.child = None
+
+
+if __name__ == '__main__':
+ import atexit
+ lldb.SBDebugger.Initialize()
+ atexit.register(lambda: lldb.SBDebugger.Terminate())
+ unittest2.main()
Modified: lldb/trunk/test/benchmarks/startup/TestStartupDelays.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/test/benchmarks/startup/TestStartupDelays.py?rev=142708&r1=142707&r2=142708&view=diff
==============================================================================
--- lldb/trunk/test/benchmarks/startup/TestStartupDelays.py (original)
+++ lldb/trunk/test/benchmarks/startup/TestStartupDelays.py Fri Oct 21 19:57:05 2011
@@ -26,7 +26,7 @@
self.count = lldb.bmIterationCount
if self.count <= 0:
- self.count = 15
+ self.count = 30
@benchmarks_test
def test_startup_delay(self):
Modified: lldb/trunk/test/dotest.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/test/dotest.py?rev=142708&r1=142707&r2=142708&view=diff
==============================================================================
--- lldb/trunk/test/dotest.py (original)
+++ lldb/trunk/test/dotest.py Fri Oct 21 19:57:05 2011
@@ -1062,10 +1062,11 @@
#print "sys.stdout name is", sys.stdout.name
# First, write out the number of collected test cases.
- sys.stderr.write(separator + "\n")
- sys.stderr.write("Collected %d test%s\n\n"
- % (suite.countTestCases(),
- suite.countTestCases() != 1 and "s" or ""))
+ if not noHeaders:
+ sys.stderr.write(separator + "\n")
+ sys.stderr.write("Collected %d test%s\n\n"
+ % (suite.countTestCases(),
+ suite.countTestCases() != 1 and "s" or ""))
class LLDBTestResult(unittest2.TextTestResult):
"""
More information about the lldb-commits
mailing list