[Lldb-commits] [lldb] [lldb][test] Remove benchmark API tests (PR #108629)
Michael Buch via lldb-commits
lldb-commits at lists.llvm.org
Fri Sep 13 12:12:30 PDT 2024
https://github.com/Michael137 created https://github.com/llvm/llvm-project/pull/108629
These benchmarks don't get run as part of the regular API test-suite. And I'm not aware of any CI running this. Also, I haven't quite managed to actually run them locally using the `bench.py` script. It looks like these are obsolete, so I'm proposing to remove the infrastructure around it entirely.
If anyone does know of a use for these do let me know.
>From c58fd8c9a3e7f568cc9fb451297a840b52ccb2b4 Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Fri, 13 Sep 2024 20:07:16 +0100
Subject: [PATCH] [lldb][test] Remove benchmark API tests
These benchmarks don't get run as part of the regular
API test-suite. And I'm not aware of any CI running this.
Also, I haven't quite managed to actually run them locally
using the `bench.py` script. It looks like these are obsolete,
so I'm proposing to remove the infrastructure around it entirely.
If anyone does know of a use for these do let me know.
---
lldb/packages/Python/lldbsuite/test/bench.py | 77 ----------
.../Python/lldbsuite/test/decorators.py | 12 --
lldb/test/API/benchmarks/continue/Makefile | 3 -
.../continue/TestBenchmarkContinue.py | 65 ---------
lldb/test/API/benchmarks/continue/main.cpp | 36 -----
lldb/test/API/benchmarks/expression/Makefile | 3 -
.../expression/TestExpressionCmd.py | 74 ----------
.../expression/TestRepeatedExprs.py | 131 ------------------
lldb/test/API/benchmarks/expression/main.cpp | 43 ------
.../TestFrameVariableResponse.py | 68 ---------
lldb/test/API/benchmarks/libcxxlist/Makefile | 3 -
.../libcxxlist/TestBenchmarkLibcxxList.py | 58 --------
lldb/test/API/benchmarks/libcxxlist/main.cpp | 11 --
lldb/test/API/benchmarks/libcxxmap/Makefile | 3 -
.../libcxxmap/TestBenchmarkLibcxxMap.py | 58 --------
lldb/test/API/benchmarks/libcxxmap/main.cpp | 11 --
.../benchmarks/startup/TestStartupDelays.py | 78 -----------
.../benchmarks/stepping/TestSteppingSpeed.py | 69 ---------
.../TestCompileRunToBreakpointTurnaround.py | 122 ----------------
19 files changed, 925 deletions(-)
delete mode 100644 lldb/packages/Python/lldbsuite/test/bench.py
delete mode 100644 lldb/test/API/benchmarks/continue/Makefile
delete mode 100644 lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py
delete mode 100644 lldb/test/API/benchmarks/continue/main.cpp
delete mode 100644 lldb/test/API/benchmarks/expression/Makefile
delete mode 100644 lldb/test/API/benchmarks/expression/TestExpressionCmd.py
delete mode 100644 lldb/test/API/benchmarks/expression/TestRepeatedExprs.py
delete mode 100644 lldb/test/API/benchmarks/expression/main.cpp
delete mode 100644 lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py
delete mode 100644 lldb/test/API/benchmarks/libcxxlist/Makefile
delete mode 100644 lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py
delete mode 100644 lldb/test/API/benchmarks/libcxxlist/main.cpp
delete mode 100644 lldb/test/API/benchmarks/libcxxmap/Makefile
delete mode 100644 lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py
delete mode 100644 lldb/test/API/benchmarks/libcxxmap/main.cpp
delete mode 100644 lldb/test/API/benchmarks/startup/TestStartupDelays.py
delete mode 100644 lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py
delete mode 100644 lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py
diff --git a/lldb/packages/Python/lldbsuite/test/bench.py b/lldb/packages/Python/lldbsuite/test/bench.py
deleted file mode 100644
index 1a11b3ef4f0e64..00000000000000
--- a/lldb/packages/Python/lldbsuite/test/bench.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-
-"""
-A simple bench runner which delegates to the ./dotest.py test driver to run the
-benchmarks defined in the list named 'benches'.
-
-You need to hand edit 'benches' to modify/change the command lines passed to the
-test driver.
-
-Use the following to get only the benchmark results in your terminal output:
-
- ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
-"""
-
-import os
-from optparse import OptionParser
-
-# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
-# unless there is a mentioning of custom executable program.
-benches = [
- # Measure startup delays creating a target, setting a breakpoint, and run
- # to breakpoint stop.
- "./dotest.py -v +b %E %X -n -p TestStartupDelays.py",
- # Measure 'frame variable' response after stopping at a breakpoint.
- "./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py",
- # Measure stepping speed after stopping at a breakpoint.
- "./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py",
- # Measure expression cmd response with a simple custom executable program.
- "./dotest.py +b -n -p TestExpressionCmd.py",
- # Attach to a spawned process then run disassembly benchmarks.
- "./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py",
-]
-
-
-def main():
- """Read the items from 'benches' and run the command line one by one."""
- parser = OptionParser(
- usage="""\
-%prog [options]
-Run the standard benchmarks defined in the list named 'benches'.\
-"""
- )
- parser.add_option(
- "-e",
- "--executable",
- type="string",
- action="store",
- dest="exe",
- help="The target program launched by lldb.",
- )
- parser.add_option(
- "-x",
- "--breakpoint-spec",
- type="string",
- action="store",
- dest="break_spec",
- help="The lldb breakpoint spec for the target program.",
- )
-
- # Parses the options, if any.
- opts, args = parser.parse_args()
-
- print("Starting bench runner....")
-
- for item in benches:
- command = item.replace("%E", '-e "%s"' % opts.exe if opts.exe else "")
- command = command.replace(
- "%X", '-x "%s"' % opts.break_spec if opts.break_spec else ""
- )
- print("Running %s" % (command))
- os.system(command)
-
- print("Bench runner done.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py
index 834f01aaa61e6b..34319e203a3177 100644
--- a/lldb/packages/Python/lldbsuite/test/decorators.py
+++ b/lldb/packages/Python/lldbsuite/test/decorators.py
@@ -426,18 +426,6 @@ def impl(func):
return impl
-def benchmarks_test(func):
- """Decorate the item as a benchmarks test."""
-
- def should_skip_benchmarks_test():
- return "benchmarks test"
-
- # Mark this function as such to separate them from the regular tests.
- result = skipTestIfFn(should_skip_benchmarks_test)(func)
- result.__benchmarks_test__ = True
- return result
-
-
def no_debug_info_test(func):
"""Decorate the item as a test what don't use any debug info. If this annotation is specified
then the test runner won't generate a separate test for each debug info format."""
diff --git a/lldb/test/API/benchmarks/continue/Makefile b/lldb/test/API/benchmarks/continue/Makefile
deleted file mode 100644
index 99998b20bcb050..00000000000000
--- a/lldb/test/API/benchmarks/continue/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-CXX_SOURCES := main.cpp
-
-include Makefile.rules
diff --git a/lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py b/lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py
deleted file mode 100644
index f2f15b3347eaa0..00000000000000
--- a/lldb/test/API/benchmarks/continue/TestBenchmarkContinue.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Test lldb data formatter subsystem.
-"""
-
-import lldb
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbbench import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import lldbutil
-
-
-class TestBenchmarkContinue(BenchBase):
- @benchmarks_test
- def test_run_command(self):
- """Benchmark different ways to continue a process"""
- self.build()
- self.data_formatter_commands()
-
- def setUp(self):
- # Call super's setUp().
- BenchBase.setUp(self)
-
- def data_formatter_commands(self):
- """Benchmark different ways to continue a process"""
- self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
-
- bkpt = self.target().FindBreakpointByID(
- lldbutil.run_break_set_by_source_regexp(self, "// break here")
- )
-
- self.runCmd("run", RUN_SUCCEEDED)
-
- # The stop reason of the thread should be breakpoint.
- self.expect(
- "thread list",
- STOPPED_DUE_TO_BREAKPOINT,
- substrs=["stopped", "stop reason = breakpoint"],
- )
-
- # This is the function to remove the custom formats in order to have a
- # clean slate for the next test case.
- def cleanup():
- self.runCmd("type format clear", check=False)
- self.runCmd("type summary clear", check=False)
- self.runCmd("type filter clear", check=False)
- self.runCmd("type synth clear", check=False)
- self.runCmd("settings set target.max-children-count 256", check=False)
-
- # Execute the cleanup function during test case tear down.
- self.addTearDownHook(cleanup)
-
- runCmd_sw = Stopwatch()
- lldbutil_sw = Stopwatch()
-
- for i in range(0, 15):
- runCmd_sw.start()
- self.runCmd("continue")
- runCmd_sw.stop()
-
- for i in range(0, 15):
- lldbutil_sw.start()
- lldbutil.continue_to_breakpoint(self.process(), bkpt)
- lldbutil_sw.stop()
-
- print("runCmd: %s\nlldbutil: %s" % (runCmd_sw, lldbutil_sw))
diff --git a/lldb/test/API/benchmarks/continue/main.cpp b/lldb/test/API/benchmarks/continue/main.cpp
deleted file mode 100644
index d715a1150d0607..00000000000000
--- a/lldb/test/API/benchmarks/continue/main.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-#include <map>
-
-#define intint_map std::map<int, int>
-
-int g_the_foo = 0;
-
-int thefoo_rw(int arg = 1)
-{
- if (arg < 0)
- arg = 0;
- if (!arg)
- arg = 1;
- g_the_foo += arg;
- return g_the_foo;
-}
-
-int main()
-{
- intint_map ii;
-
- for (int i = 0; i < 15; i++)
- {
- ii[i] = i + 1;
- thefoo_rw(i); // break here
- }
-
- ii.clear();
-
- for (int j = 0; j < 15; j++)
- {
- ii[j] = j + 1;
- thefoo_rw(j); // break here
- }
-
- return 0;
-}
diff --git a/lldb/test/API/benchmarks/expression/Makefile b/lldb/test/API/benchmarks/expression/Makefile
deleted file mode 100644
index 99998b20bcb050..00000000000000
--- a/lldb/test/API/benchmarks/expression/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-CXX_SOURCES := main.cpp
-
-include Makefile.rules
diff --git a/lldb/test/API/benchmarks/expression/TestExpressionCmd.py b/lldb/test/API/benchmarks/expression/TestExpressionCmd.py
deleted file mode 100644
index 8261b1b25da9f2..00000000000000
--- a/lldb/test/API/benchmarks/expression/TestExpressionCmd.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""Test lldb's expression evaluations and collect statistics."""
-
-import sys
-import lldb
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbbench import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import configuration
-from lldbsuite.test import lldbutil
-
-
-class ExpressionEvaluationCase(BenchBase):
- def setUp(self):
- BenchBase.setUp(self)
- self.source = "main.cpp"
- self.line_to_break = line_number(self.source, "// Set breakpoint here.")
- self.count = 25
-
- @benchmarks_test
- @add_test_categories(["pexpect"])
- def test_expr_cmd(self):
- """Test lldb's expression commands and collect statistics."""
- self.build()
- self.exe_name = "a.out"
-
- print()
- self.run_lldb_repeated_exprs(self.exe_name, self.count)
- print("lldb expr cmd benchmark:", self.stopwatch)
-
- def run_lldb_repeated_exprs(self, exe_name, count):
- import pexpect
-
- exe = self.getBuildArtifact(exe_name)
-
- # Set self.child_prompt, which is "(lldb) ".
- self.child_prompt = "(lldb) "
- prompt = self.child_prompt
-
- # Reset the stopwatch now.
- self.stopwatch.reset()
- for i in range(count):
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn(
- "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
- )
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- child.expect_exact(prompt)
- child.sendline(
- "breakpoint set -f %s -l %d" % (self.source, self.line_to_break)
- )
- child.expect_exact(prompt)
- child.sendline("run")
- child.expect_exact(prompt)
- expr_cmd1 = "expr ptr[j]->point.x"
- expr_cmd2 = "expr ptr[j]->point.y"
-
- with self.stopwatch:
- child.sendline(expr_cmd1)
- child.expect_exact(prompt)
- child.sendline(expr_cmd2)
- child.expect_exact(prompt)
-
- child.sendline("quit")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- self.child = None
diff --git a/lldb/test/API/benchmarks/expression/TestRepeatedExprs.py b/lldb/test/API/benchmarks/expression/TestRepeatedExprs.py
deleted file mode 100644
index acc6b74c17b7e6..00000000000000
--- a/lldb/test/API/benchmarks/expression/TestRepeatedExprs.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""Test evaluating expressions repeatedly comparing lldb against gdb."""
-
-import sys
-import lldb
-from lldbsuite.test.lldbbench import BenchBase
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import configuration
-from lldbsuite.test import lldbutil
-
-
-class RepeatedExprsCase(BenchBase):
- def setUp(self):
- BenchBase.setUp(self)
- self.source = "main.cpp"
- self.line_to_break = line_number(self.source, "// Set breakpoint here.")
- self.lldb_avg = None
- self.gdb_avg = None
- self.count = 100
-
- @benchmarks_test
- @add_test_categories(["pexpect"])
- def test_compare_lldb_to_gdb(self):
- """Test repeated expressions with lldb vs. gdb."""
- self.build()
- self.exe_name = "a.out"
-
- print()
- self.run_lldb_repeated_exprs(self.exe_name, self.count)
- print("lldb benchmark:", self.stopwatch)
- self.run_gdb_repeated_exprs(self.exe_name, self.count)
- print("gdb benchmark:", self.stopwatch)
- print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
-
- def run_lldb_repeated_exprs(self, exe_name, count):
- import pexpect
-
- exe = self.getBuildArtifact(exe_name)
-
- # Set self.child_prompt, which is "(lldb) ".
- self.child_prompt = "(lldb) "
- prompt = self.child_prompt
-
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn(
- "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
- )
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- child.expect_exact(prompt)
- child.sendline("breakpoint set -f %s -l %d" % (self.source, self.line_to_break))
- child.expect_exact(prompt)
- child.sendline("run")
- child.expect_exact(prompt)
- expr_cmd1 = "expr ptr[j]->point.x"
- expr_cmd2 = "expr ptr[j]->point.y"
-
- # Reset the stopwatch now.
- self.stopwatch.reset()
- for i in range(count):
- with self.stopwatch:
- child.sendline(expr_cmd1)
- child.expect_exact(prompt)
- child.sendline(expr_cmd2)
- child.expect_exact(prompt)
- child.sendline("process continue")
- child.expect_exact(prompt)
-
- child.sendline("quit")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- self.lldb_avg = self.stopwatch.avg()
- if self.TraceOn():
- print("lldb expression benchmark:", str(self.stopwatch))
- self.child = None
-
- def run_gdb_repeated_exprs(self, exe_name, count):
- import pexpect
-
- exe = self.getBuildArtifact(exe_name)
-
- # Set self.child_prompt, which is "(gdb) ".
- self.child_prompt = "(gdb) "
- prompt = self.child_prompt
-
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn("gdb --nx %s" % exe)
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- child.expect_exact(prompt)
- child.sendline("break %s:%d" % (self.source, self.line_to_break))
- child.expect_exact(prompt)
- child.sendline("run")
- child.expect_exact(prompt)
- expr_cmd1 = "print ptr[j]->point.x"
- expr_cmd2 = "print ptr[j]->point.y"
-
- # Reset the stopwatch now.
- self.stopwatch.reset()
- for i in range(count):
- with self.stopwatch:
- child.sendline(expr_cmd1)
- child.expect_exact(prompt)
- child.sendline(expr_cmd2)
- child.expect_exact(prompt)
- child.sendline("continue")
- child.expect_exact(prompt)
-
- child.sendline("quit")
- child.expect_exact("The program is running. Exit anyway?")
- child.sendline("y")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- self.gdb_avg = self.stopwatch.avg()
- if self.TraceOn():
- print("gdb expression benchmark:", str(self.stopwatch))
- self.child = None
diff --git a/lldb/test/API/benchmarks/expression/main.cpp b/lldb/test/API/benchmarks/expression/main.cpp
deleted file mode 100644
index 1a095d350227b8..00000000000000
--- a/lldb/test/API/benchmarks/expression/main.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-#include <stdio.h>
-
-class Point {
-public:
- int x;
- int y;
- Point(int a, int b):
- x(a),
- y(b)
- {}
-};
-
-class Data {
-public:
- int id;
- Point point;
- Data(int i):
- id(i),
- point(0, 0)
- {}
-};
-
-int main(int argc, char const *argv[]) {
- Data *data[1000];
- Data **ptr = data;
- for (int i = 0; i < 1000; ++i) {
- ptr[i] = new Data(i);
- ptr[i]->point.x = i;
- ptr[i]->point.y = i+1;
- }
-
- printf("Finished populating data.\n");
- for (int j = 0; j < 1000; ++j) {
- bool dump = argc > 1; // Set breakpoint here.
- // Evaluate a couple of expressions (2*1000 = 2000 exprs):
- // expr ptr[j]->point.x
- // expr ptr[j]->point.y
- if (dump) {
- printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y);
- }
- }
- return 0;
-}
diff --git a/lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py b/lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py
deleted file mode 100644
index e364fb8ce77821..00000000000000
--- a/lldb/test/API/benchmarks/frame_variable/TestFrameVariableResponse.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""Test lldb's response time for 'frame variable' command."""
-
-import sys
-import lldb
-from lldbsuite.test import configuration
-from lldbsuite.test import lldbtest_config
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbbench import *
-
-
-class FrameVariableResponseBench(BenchBase):
- def setUp(self):
- BenchBase.setUp(self)
- self.exe = lldbtest_config.lldbExec
- self.break_spec = "-n main"
- self.count = 20
-
- @benchmarks_test
- @no_debug_info_test
- @add_test_categories(["pexpect"])
- def test_startup_delay(self):
- """Test response time for the 'frame variable' command."""
- print()
- self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
- print("lldb frame variable benchmark:", self.stopwatch)
-
- def run_frame_variable_bench(self, exe, break_spec, count):
- import pexpect
-
- # Set self.child_prompt, which is "(lldb) ".
- self.child_prompt = "(lldb) "
- prompt = self.child_prompt
-
- # Reset the stopwatchs now.
- self.stopwatch.reset()
- for i in range(count):
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn(
- "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
- )
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- # Set our breakpoint.
- child.sendline("breakpoint set %s" % break_spec)
- child.expect_exact(prompt)
-
- # Run the target and expect it to be stopped due to breakpoint.
- child.sendline("run") # Aka 'process launch'.
- child.expect_exact(prompt)
-
- with self.stopwatch:
- # Measure the 'frame variable' response time.
- child.sendline("frame variable")
- child.expect_exact(prompt)
-
- child.sendline("quit")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- # The test is about to end and if we come to here, the child process has
- # been terminated. Mark it so.
- self.child = None
diff --git a/lldb/test/API/benchmarks/libcxxlist/Makefile b/lldb/test/API/benchmarks/libcxxlist/Makefile
deleted file mode 100644
index 99998b20bcb050..00000000000000
--- a/lldb/test/API/benchmarks/libcxxlist/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-CXX_SOURCES := main.cpp
-
-include Makefile.rules
diff --git a/lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py b/lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py
deleted file mode 100644
index 01de980f3bc15c..00000000000000
--- a/lldb/test/API/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Test lldb data formatter subsystem.
-"""
-
-import lldb
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbbench import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import lldbutil
-
-
-class TestBenchmarkLibcxxList(BenchBase):
- @benchmarks_test
- def test_run_command(self):
- """Benchmark the std::list data formatter (libc++)"""
- self.build()
- self.data_formatter_commands()
-
- def setUp(self):
- # Call super's setUp().
- BenchBase.setUp(self)
-
- def data_formatter_commands(self):
- """Benchmark the std::list data formatter (libc++)"""
- self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
-
- bkpt = self.target().FindBreakpointByID(
- lldbutil.run_break_set_by_source_regexp(self, "break here")
- )
-
- self.runCmd("run", RUN_SUCCEEDED)
-
- # The stop reason of the thread should be breakpoint.
- self.expect(
- "thread list",
- STOPPED_DUE_TO_BREAKPOINT,
- substrs=["stopped", "stop reason = breakpoint"],
- )
-
- # This is the function to remove the custom formats in order to have a
- # clean slate for the next test case.
- def cleanup():
- self.runCmd("type format clear", check=False)
- self.runCmd("type summary clear", check=False)
- self.runCmd("type filter clear", check=False)
- self.runCmd("type synth clear", check=False)
- self.runCmd("settings set target.max-children-count 256", check=False)
-
- # Execute the cleanup function during test case tear down.
- self.addTearDownHook(cleanup)
-
- sw = Stopwatch()
-
- sw.start()
- self.expect("frame variable -A list", substrs=["[300]", "300"])
- sw.stop()
-
- print("time to print: %s" % (sw))
diff --git a/lldb/test/API/benchmarks/libcxxlist/main.cpp b/lldb/test/API/benchmarks/libcxxlist/main.cpp
deleted file mode 100644
index 9c4113ad0514eb..00000000000000
--- a/lldb/test/API/benchmarks/libcxxlist/main.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <list>
-
-int main()
-{
- std::list<int> list;
- for (int i = 0;
- i < 1500;
- i++)
- list.push_back(i);
- return list.size(); // break here
-}
diff --git a/lldb/test/API/benchmarks/libcxxmap/Makefile b/lldb/test/API/benchmarks/libcxxmap/Makefile
deleted file mode 100644
index 99998b20bcb050..00000000000000
--- a/lldb/test/API/benchmarks/libcxxmap/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-CXX_SOURCES := main.cpp
-
-include Makefile.rules
diff --git a/lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py b/lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py
deleted file mode 100644
index 10056ab3fdf746..00000000000000
--- a/lldb/test/API/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Test lldb data formatter subsystem.
-"""
-
-import lldb
-from lldbsuite.test.lldbbench import *
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import lldbutil
-
-
-class TestBenchmarkLibcxxMap(BenchBase):
- @benchmarks_test
- def test_run_command(self):
- """Benchmark the std::map data formatter (libc++)"""
- self.build()
- self.data_formatter_commands()
-
- def setUp(self):
- # Call super's setUp().
- BenchBase.setUp(self)
-
- def data_formatter_commands(self):
- """Benchmark the std::map data formatter (libc++)"""
- self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
-
- bkpt = self.target().FindBreakpointByID(
- lldbutil.run_break_set_by_source_regexp(self, "break here")
- )
-
- self.runCmd("run", RUN_SUCCEEDED)
-
- # The stop reason of the thread should be breakpoint.
- self.expect(
- "thread list",
- STOPPED_DUE_TO_BREAKPOINT,
- substrs=["stopped", "stop reason = breakpoint"],
- )
-
- # This is the function to remove the custom formats in order to have a
- # clean slate for the next test case.
- def cleanup():
- self.runCmd("type format clear", check=False)
- self.runCmd("type summary clear", check=False)
- self.runCmd("type filter clear", check=False)
- self.runCmd("type synth clear", check=False)
- self.runCmd("settings set target.max-children-count 256", check=False)
-
- # Execute the cleanup function during test case tear down.
- self.addTearDownHook(cleanup)
-
- sw = Stopwatch()
-
- sw.start()
- self.expect("frame variable -A map", substrs=["[300]", "300"])
- sw.stop()
-
- print("time to print: %s" % (sw))
diff --git a/lldb/test/API/benchmarks/libcxxmap/main.cpp b/lldb/test/API/benchmarks/libcxxmap/main.cpp
deleted file mode 100644
index 45efb26b6b0405..00000000000000
--- a/lldb/test/API/benchmarks/libcxxmap/main.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <map>
-
-int main()
-{
- std::map<int, int> map;
- for (int i = 0;
- i < 1500;
- i++)
- map[i] = i;
- return map.size(); // break here
-}
diff --git a/lldb/test/API/benchmarks/startup/TestStartupDelays.py b/lldb/test/API/benchmarks/startup/TestStartupDelays.py
deleted file mode 100644
index faec21e95e5d20..00000000000000
--- a/lldb/test/API/benchmarks/startup/TestStartupDelays.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Test lldb's startup delays creating a target, setting a breakpoint, and run to breakpoint stop."""
-
-import sys
-import lldb
-from lldbsuite.test import configuration
-from lldbsuite.test import lldbtest_config
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbbench import *
-
-
-class StartupDelaysBench(BenchBase):
- def setUp(self):
- BenchBase.setUp(self)
- # Create self.stopwatch2 for measuring "set first breakpoint".
- # The default self.stopwatch is for "create fresh target".
- self.stopwatch2 = Stopwatch()
- # Create self.stopwatch3 for measuring "run to breakpoint".
- self.stopwatch3 = Stopwatch()
- self.exe = lldbtest_config.lldbExec
- self.break_spec = "-n main"
- self.count = 30
-
- @benchmarks_test
- @no_debug_info_test
- @add_test_categories(["pexpect"])
- def test_startup_delay(self):
- """Test start up delays creating a target, setting a breakpoint, and run to breakpoint stop."""
- print()
- self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
- print("lldb startup delay (create fresh target) benchmark:", self.stopwatch)
- print("lldb startup delay (set first breakpoint) benchmark:", self.stopwatch2)
- print("lldb startup delay (run to breakpoint) benchmark:", self.stopwatch3)
-
- def run_startup_delays_bench(self, exe, break_spec, count):
- import pexpect
-
- # Set self.child_prompt, which is "(lldb) ".
- self.child_prompt = "(lldb) "
- prompt = self.child_prompt
-
- # Reset the stopwatchs now.
- self.stopwatch.reset()
- self.stopwatch2.reset()
- for i in range(count):
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn(
- "%s %s" % (lldbtest_config.lldbExec, self.lldbOption)
- )
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- with self.stopwatch:
- # Create a fresh target.
- child.sendline("file %s" % exe) # Aka 'target create'.
- child.expect_exact(prompt)
-
- with self.stopwatch2:
- # Read debug info and set the first breakpoint.
- child.sendline("breakpoint set %s" % break_spec)
- child.expect_exact(prompt)
-
- with self.stopwatch3:
- # Run to the breakpoint just set.
- child.sendline("run")
- child.expect_exact(prompt)
-
- child.sendline("quit")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- # The test is about to end and if we come to here, the child process has
- # been terminated. Mark it so.
- self.child = None
diff --git a/lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py b/lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py
deleted file mode 100644
index d0f9b0d61d17ef..00000000000000
--- a/lldb/test/API/benchmarks/stepping/TestSteppingSpeed.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""Test lldb's stepping speed."""
-
-import sys
-import lldb
-from lldbsuite.test import configuration
-from lldbsuite.test import lldbtest_config
-from lldbsuite.test.lldbbench import *
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import lldbutil
-
-
-class SteppingSpeedBench(BenchBase):
- def setUp(self):
- BenchBase.setUp(self)
- self.exe = lldbtest_config.lldbExec
- self.break_spec = "-n main"
- self.count = 50
-
- self.trace("self.exe=%s" % self.exe)
- self.trace("self.break_spec=%s" % self.break_spec)
-
- @benchmarks_test
- @no_debug_info_test
- @add_test_categories(["pexpect"])
- def test_run_lldb_steppings(self):
- """Test lldb steppings on a large executable."""
- print()
- self.run_lldb_steppings(self.exe, self.break_spec, self.count)
- print("lldb stepping benchmark:", self.stopwatch)
-
- def run_lldb_steppings(self, exe, break_spec, count):
- import pexpect
-
- # Set self.child_prompt, which is "(lldb) ".
- self.child_prompt = "(lldb) "
- prompt = self.child_prompt
-
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn(
- "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
- )
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- child.expect_exact(prompt)
- child.sendline("breakpoint set %s" % break_spec)
- child.expect_exact(prompt)
- child.sendline("run")
- child.expect_exact(prompt)
-
- # Reset the stopwatch now.
- self.stopwatch.reset()
- for i in range(count):
- with self.stopwatch:
- # Disassemble the function.
- child.sendline("next") # Aka 'thread step-over'.
- child.expect_exact(prompt)
-
- child.sendline("quit")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- self.child = None
diff --git a/lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py b/lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py
deleted file mode 100644
index 91527cd114534c..00000000000000
--- a/lldb/test/API/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""Benchmark the turnaround time starting a debugger and run to the breakpoint with lldb vs. gdb."""
-
-import sys
-import lldb
-from lldbsuite.test.lldbbench import *
-from lldbsuite.test.decorators import *
-from lldbsuite.test.lldbtest import *
-from lldbsuite.test import configuration
-from lldbsuite.test import lldbutil
-
-
-class CompileRunToBreakpointBench(BenchBase):
- def setUp(self):
- BenchBase.setUp(self)
- self.exe = lldbtest_config.lldbExec
- self.function = "Driver::MainLoop()"
- self.count = 3
-
- self.lldb_avg = None
- self.gdb_avg = None
-
- @benchmarks_test
- @no_debug_info_test
- @add_test_categories(["pexpect"])
- def test_run_lldb_then_gdb(self):
- """Benchmark turnaround time with lldb vs. gdb."""
- print()
- self.run_lldb_turnaround(self.exe, self.function, self.count)
- print("lldb turnaround benchmark:", self.stopwatch)
- self.run_gdb_turnaround(self.exe, self.function, self.count)
- print("gdb turnaround benchmark:", self.stopwatch)
- print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
-
- def run_lldb_turnaround(self, exe, function, count):
- import pexpect
-
- def run_one_round():
- prompt = self.child_prompt
-
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn(
- "%s %s %s" % (lldbtest_config.lldbExec, self.lldbOption, exe)
- )
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- child.expect_exact(prompt)
- child.sendline("breakpoint set -F %s" % function)
- child.expect_exact(prompt)
- child.sendline("run")
- child.expect_exact(prompt)
-
- # Set self.child_prompt, which is "(lldb) ".
- self.child_prompt = "(lldb) "
- # Reset the stopwatch now.
- self.stopwatch.reset()
-
- for i in range(count + 1):
- # Ignore the first invoke lldb and run to the breakpoint turnaround
- # time.
- if i == 0:
- run_one_round()
- else:
- with self.stopwatch:
- run_one_round()
-
- self.child.sendline("quit")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- self.lldb_avg = self.stopwatch.avg()
- self.child = None
-
- def run_gdb_turnaround(self, exe, function, count):
- import pexpect
-
- def run_one_round():
- prompt = self.child_prompt
-
- # So that the child gets torn down after the test.
- self.child = pexpect.spawn("gdb --nx %s" % exe)
- child = self.child
-
- # Turn on logging for what the child sends back.
- if self.TraceOn():
- child.logfile_read = sys.stdout
-
- child.expect_exact(prompt)
- child.sendline("break %s" % function)
- child.expect_exact(prompt)
- child.sendline("run")
- child.expect_exact(prompt)
-
- # Set self.child_prompt, which is "(gdb) ".
- self.child_prompt = "(gdb) "
- # Reset the stopwatch now.
- self.stopwatch.reset()
-
- for i in range(count + 1):
- # Ignore the first invoke lldb and run to the breakpoint turnaround
- # time.
- if i == 0:
- run_one_round()
- else:
- with self.stopwatch:
- run_one_round()
-
- self.child.sendline("quit")
- self.child.expect_exact("The program is running. Exit anyway?")
- self.child.sendline("y")
- try:
- self.child.expect(pexpect.EOF)
- except:
- pass
-
- self.gdb_avg = self.stopwatch.avg()
- self.child = None
More information about the lldb-commits
mailing list