[LNT] r307711 - Cleanup logging code
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 11 13:58:38 PDT 2017
Author: matze
Date: Tue Jul 11 13:58:38 2017
New Revision: 307711
URL: http://llvm.org/viewvc/llvm-project?rev=307711&view=rev
Log:
Cleanup logging code
- Fix some code setting up loggers for "lnt" even though all our logs
go to "lnt.server.ui.app".
- Consistenly use standard python logging class instead of custom
note/warning/error functions.
- Setup shared logger in lnt.util.
- Use a common function to setup the loggers in lnttool to avoid code
duplication.
Added:
lnt/trunk/lnt/lnttool/common.py
Modified:
lnt/trunk/lnt/lnttool/create.py
lnt/trunk/lnt/lnttool/main.py
lnt/trunk/lnt/lnttool/updatedb.py
lnt/trunk/lnt/lnttool/viewcomparison.py
lnt/trunk/lnt/server/db/fieldchange.py
lnt/trunk/lnt/server/db/migrate.py
lnt/trunk/lnt/server/db/migrations/upgrade_1_to_2.py
lnt/trunk/lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py
lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py
lnt/trunk/lnt/server/db/rules_manager.py
lnt/trunk/lnt/server/db/testsuite.py
lnt/trunk/lnt/server/db/v4db.py
lnt/trunk/lnt/server/instance.py
lnt/trunk/lnt/server/reporting/analysis.py
lnt/trunk/lnt/server/ui/app.py
lnt/trunk/lnt/server/ui/regression_views.py
lnt/trunk/lnt/testing/__init__.py
lnt/trunk/lnt/testing/profile/perf.py
lnt/trunk/lnt/testing/util/commands.py
lnt/trunk/lnt/testing/util/compilers.py
lnt/trunk/lnt/testing/util/rcs.py
lnt/trunk/lnt/testing/util/valgrind.py
lnt/trunk/lnt/tests/compile.py
lnt/trunk/lnt/tests/nt.py
lnt/trunk/lnt/tests/test_suite.py
lnt/trunk/lnt/util/ImportData.py
lnt/trunk/lnt/util/__init__.py
lnt/trunk/lnt/util/async_ops.py
Added: lnt/trunk/lnt/lnttool/common.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/common.py?rev=307711&view=auto
==============================================================================
--- lnt/trunk/lnt/lnttool/common.py (added)
+++ lnt/trunk/lnt/lnttool/common.py Tue Jul 11 13:58:38 2017
@@ -0,0 +1,17 @@
+from lnt.util import logger
+import logging
+
+
+def init_logger(loglevel, show_sql=False, stream=None):
+ handler = logging.StreamHandler(stream)
+ handler.setLevel(loglevel)
+ handler.setFormatter(logging.Formatter(
+ '%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
+ logger.addHandler(handler)
+ logger.setLevel(loglevel)
+
+ # Enable full SQL logging, if requested.
+ if show_sql:
+ sa_logger = logging.getLogger("sqlalchemy")
+ sa_logger.setLevel(loglevel)
+ sa_logger.addHandler(handler)
Modified: lnt/trunk/lnt/lnttool/create.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/create.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/create.py (original)
+++ lnt/trunk/lnt/lnttool/create.py Tue Jul 11 13:58:38 2017
@@ -9,6 +9,7 @@ import click
import lnt.testing
import lnt.server.db.migrate
+from .common import init_logger
kConfigVersion = (0, 1, 0)
kConfigTemplate = """\
@@ -119,19 +120,8 @@ def action_create(instance_path, name, c
LNT configuration.
"""
- # Setup the base LNT logger.
- logger = logging.getLogger("lnt")
- logger.setLevel(logging.WARNING)
- handler = logging.StreamHandler(sys.stderr)
- handler.setFormatter(logging.Formatter(
- '%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
- logger.addHandler(handler)
-
- # Enable full SQL logging, if requested.
- if show_sql:
- sa_logger = logging.getLogger("sqlalchemy")
- sa_logger.setLevel(logging.INFO)
- sa_logger.addHandler(handler)
+ init_logger(logging.INFO if show_sql else logging.WARNING,
+ show_sql=show_sql)
default_db_version = "0.4"
Modified: lnt/trunk/lnt/lnttool/main.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/main.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/main.py (original)
+++ lnt/trunk/lnt/lnttool/main.py Tue Jul 11 13:58:38 2017
@@ -8,12 +8,13 @@ import code
import click
import lnt
-from lnt.testing.util.commands import note, warning, error, LOGGER_NAME
+from lnt.util import logger
import lnt.testing.profile.profile as profile
from lnt.tests.nt import NTTest
from lnt.tests.compile import CompileTest
from lnt.tests.test_suite import TestSuiteTest
+from .common import init_logger
from .create import action_create
from .convert import action_convert
from .import_data import action_import
@@ -57,24 +58,7 @@ view the results.
"""
import lnt.server.ui.app
- # Setup the base LNT logger.
- # Root logger in debug.
- logger = logging.getLogger(LOGGER_NAME)
- if debugger:
- logger.setLevel(logging.DEBUG)
- handler = logging.StreamHandler()
- handler.setLevel(logging.DEBUG)
- handler.setFormatter(logging.Formatter(
- '%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
- logger.addHandler(handler)
-
- # Enable full SQL logging, if requested.
- if show_sql:
- sa_logger = logging.getLogger("sqlalchemy")
- if debugger:
- sa_logger.setLevel(logging.DEBUG)
- sa_logger.setLevel(logging.INFO)
- sa_logger.addHandler(handler)
+ init_logger(logging.DEBUG, show_sql=show_sql)
app = lnt.server.ui.app.App.create_standalone(instance_path,)
if debugger:
@@ -137,14 +121,7 @@ def _print_result_url(results, verbose):
ignore_unknown_options=True, allow_extra_args=True,))
def action_runtest():
"""run a builtin test application"""
- logger = logging.getLogger(LOGGER_NAME)
- logger.setLevel(logging.INFO)
- handler = logging.StreamHandler()
- handler.setLevel(logging.INFO)
- handler.setFormatter(logging.Formatter(
- '%(asctime)s %(levelname)s: %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'))
- logger.addHandler(handler)
+ init_logger(logging.INFO)
action_runtest.add_command(NTTest.cli_wrapper)
@@ -177,8 +154,8 @@ def action_submit(url, files, commit, ve
"""submit a test report to the server"""
if not commit:
- warning("submit called with --commit=0, your results will not be saved"
- " at the server.")
+ logger.warning("submit called with --commit=0, " +
+ "your results will not be saved at the server.")
from lnt.util import ServerUtil
import lnt.util.ImportData
@@ -196,20 +173,7 @@ def action_submit(url, files, commit, ve
def action_update(db_path, show_sql):
"""create and or auto-update the given database"""
- # Setup the base LNT logger.
- logger = logging.getLogger("lnt")
- logger.setLevel(logging.INFO)
- handler = logging.StreamHandler(sys.stderr)
- handler.setFormatter(logging.Formatter(
- '%(asctime)s %(levelname)s: %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'))
- logger.addHandler(handler)
-
- # Enable full SQL logging, if requested.
- if show_sql:
- sa_logger = logging.getLogger("sqlalchemy")
- sa_logger.setLevel(logging.INFO)
- sa_logger.addHandler(handler)
+ init_logger(logging.INFO, show_sql=show_sql, stream=sys.stderr)
# Update the database.
lnt.server.db.migrate.update_path(db_path)
@@ -271,7 +235,7 @@ def action_send_daily_report(instance_pa
date = datetime.datetime.utcnow()
# Generate the daily report.
- note("building report data...")
+ logger.info("building report data...")
report = lnt.server.reporting.dailyreport.DailyReport(
ts, year=date.year, month=date.month, day=date.day,
day_start_offset_hours=date.hour, for_mail=True,
@@ -279,7 +243,7 @@ def action_send_daily_report(instance_pa
filter_machine_regex=filter_machine_regex)
report.build()
- note("generating HTML report...")
+ logger.info("generating HTML report...")
ts_url = "%s/db_%s/v4/%s" \
% (config.zorgURL, database, testsuite)
subject = "Daily Report: %04d-%02d-%02d" % (
@@ -336,13 +300,7 @@ def action_send_run_comparison(instance_
import smtplib
import lnt.server.reporting.dailyreport
- # Setup the base LNT logger.
- logger = logging.getLogger("lnt")
- logger.setLevel(logging.ERROR)
- handler = logging.StreamHandler(sys.stderr)
- handler.setFormatter(logging.Formatter(
- '%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
- logger.addHandler(handler)
+ init_logger(logging.ERROR)
# Load the LNT instance.
instance = lnt.server.instance.Instance.frompath(instance_path)
@@ -362,9 +320,9 @@ def action_send_run_comparison(instance_
run_b = ts.query(ts.Run).\
filter_by(id=run_b_id).first()
if run_a is None:
- error("invalid run ID %r (not in database)" % (run_a_id,))
+ logger.error("invalid run ID %r (not in database)" % (run_a_id,))
if run_b is None:
- error("invalid run ID %r (not in database)" % (run_b_id,))
+ logger.error("invalid run ID %r (not in database)" % (run_b_id,))
# Generate the report.
data = lnt.server.reporting.runs.generate_run_data(
Modified: lnt/trunk/lnt/lnttool/updatedb.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/updatedb.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/updatedb.py (original)
+++ lnt/trunk/lnt/lnttool/updatedb.py Tue Jul 11 13:58:38 2017
@@ -2,7 +2,7 @@ import contextlib
import click
import lnt.server.instance
-from lnt.testing.util.commands import warning
+from lnt.util import logger
@click.command("updatedb")
@@ -84,7 +84,7 @@ def action_updatedb(instance_path, datab
num_deletes = ts.query(ts.Machine).filter_by(name=name).delete()
if num_deletes == 0:
- warning("unable to find machine named: %r" % name)
+ logger.warning("unable to find machine named: %r" % name)
if order:
ts.delete(order)
Modified: lnt/trunk/lnt/lnttool/viewcomparison.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/viewcomparison.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/viewcomparison.py (original)
+++ lnt/trunk/lnt/lnttool/viewcomparison.py Tue Jul 11 13:58:38 2017
@@ -11,8 +11,9 @@ import contextlib
import click
+from lnt.util import logger
from lnt.util.ImportData import import_and_report
-from lnt.testing.util.commands import note, warning
+from .common import init_logger
def start_browser(url, debug=False):
@@ -26,7 +27,7 @@ def start_browser(url, debug=False):
# Wait for server to start...
if debug:
- note('waiting for server to start...')
+ logger.info('waiting for server to start...')
for i in range(10000):
if url_is_up(url):
break
@@ -35,10 +36,10 @@ def start_browser(url, debug=False):
sys.stderr.flush()
time.sleep(.01)
else:
- warning('unable to detect that server started')
+ logger.warning('unable to detect that server started')
if debug:
- note('opening webbrowser...')
+ logger.info('opening webbrowser...')
webbrowser.open(url)
@@ -60,14 +61,7 @@ def action_view_comparison(report_a, rep
import lnt.server.ui.app
import lnt.server.db.migrate
- # Set up the default logger.
- logger = logging.getLogger("lnt")
- logger.setLevel(logging.ERROR)
- handler = logging.StreamHandler(sys.stderr)
- handler.setFormatter(logging.Formatter(
- '%(asctime)s %(levelname)s: %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'))
- logger.addHandler(handler)
+ init_logger(logging.ERROR)
# Create a temporary directory to hold the instance.
tmpdir = tempfile.mkdtemp(suffix='lnt')
@@ -99,7 +93,7 @@ def action_view_comparison(report_a, rep
# Dispatch another thread to start the webbrowser.
comparison_url = '%s/v4/nts/2?compare_to=1' % (url,)
- note("opening comparison view: %s" % (comparison_url,))
+ logger.info("opening comparison view: %s" % (comparison_url,))
if not dry_run:
thread.start_new_thread(start_browser, (comparison_url, True))
Modified: lnt/trunk/lnt/server/db/fieldchange.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/fieldchange.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/fieldchange.py (original)
+++ lnt/trunk/lnt/server/db/fieldchange.py Tue Jul 11 13:58:38 2017
@@ -2,8 +2,8 @@ import difflib
import sqlalchemy.sql
from sqlalchemy.orm.exc import ObjectDeletedError
import lnt.server.reporting.analysis
-from lnt.testing.util.commands import warning
-from lnt.testing.util.commands import note, timed
+from lnt.testing.util.commands import timed
+from lnt.util import logger
from lnt.server.db.regression import new_regression, RegressionState
from lnt.server.db.regression import get_ris
from lnt.server.db.regression import rebuild_title
@@ -46,7 +46,8 @@ def delete_fieldchange(ts, change):
all()
if len(remaining) == 0:
r = ts.query(ts.Regression).get(r)
- note("Deleting regression because it has not changes:" + repr(r))
+ logger.info("Deleting regression because it has not changes:" +
+ repr(r))
ts.delete(r)
deleted_ids.append(r)
ts.commit()
@@ -87,8 +88,8 @@ def regenerate_fieldchanges_for_run(ts,
# be used in so many runs.
run_size = len(runs_to_load)
if run_size > 50:
- warning("Generating field changes for {} runs."
- "That will be very slow.".format(run_size))
+ logger.warning("Generating field changes for {} runs."
+ "That will be very slow.".format(run_size))
runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)
# Only store fieldchanges for "metric" samples like execution time;
@@ -113,7 +114,7 @@ def regenerate_fieldchanges_for_run(ts,
if not result.is_result_performance_change() and f:
# With more data, its not a regression. Kill it!
- note("Removing field change: {}".format(f.id))
+ logger.info("Removing field change: {}".format(f.id))
deleted = delete_fieldchange(ts, f)
continue
@@ -135,7 +136,8 @@ def regenerate_fieldchanges_for_run(ts,
found, new_reg = identify_related_changes(ts, f)
if found:
- note("Found field change: {}".format(run.machine))
+ logger.info("Found field change: {}".format(
+ run.machine))
# Always update FCs with new values.
if f:
@@ -208,14 +210,14 @@ def identify_related_changes(ts, fc):
# Matching
MSG = "Found a match: {} with score {}."
regression = ts.query(ts.Regression).get(regression_id)
- note(MSG.format(str(regression),
- confidence))
+ logger.info(MSG.format(str(regression),
+ confidence))
ri = ts.RegressionIndicator(regression, fc)
ts.add(ri)
# Update the default title if needed.
rebuild_title(ts, regression)
ts.commit()
return True, regression
- note("Could not find a partner, creating new Regression for change")
+ logger.info("Could not find a partner, creating new Regression for change")
new_reg = new_regression(ts, [fc.id])
return False, new_reg
Modified: lnt/trunk/lnt/server/db/migrate.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/migrate.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/migrate.py (original)
+++ lnt/trunk/lnt/server/db/migrate.py Tue Jul 11 13:58:38 2017
@@ -7,7 +7,6 @@ Define facilities for automatically upgr
# version'. This was done in case we need to add some kind of migration
# functionality for the individual test suites, which is not unreasonable.
-import logging
import os
import re
@@ -16,6 +15,7 @@ import sqlalchemy.ext.declarative
import sqlalchemy.orm
from sqlalchemy import Column, String, Integer
+from lnt.util import logger
import lnt.server.db.util
###
@@ -112,8 +112,6 @@ def _load_migrations():
###
# Auto-upgrading support.
-logger = logging.getLogger(__name__)
-
def _set_schema_version(engine, schema_name, new_version):
# Keep the updating to a single transaction that is immediately committed.
session = sqlalchemy.orm.sessionmaker(engine)()
Modified: lnt/trunk/lnt/server/db/migrations/upgrade_1_to_2.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/migrations/upgrade_1_to_2.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/migrations/upgrade_1_to_2.py (original)
+++ lnt/trunk/lnt/server/db/migrations/upgrade_1_to_2.py Tue Jul 11 13:58:38 2017
@@ -9,15 +9,13 @@
# extraction, and we recompute the run order for them.
import json
-import logging
import pprint
import re
+from lnt.util import logger
import sqlalchemy
from sqlalchemy import Table, MetaData, Column
-logger = logging.getLogger('lnt')
-
def update_testsuite(engine, session, db_key_name):
class Run(object):
pass
Modified: lnt/trunk/lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py (original)
+++ lnt/trunk/lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py Tue Jul 11 13:58:38 2017
@@ -6,7 +6,7 @@ This can be used to implement server sid
import re
import os
import sys
-from lnt.testing.util.commands import note, warning
+from lnt.util import logger
from flask import current_app
ignored = None
@@ -22,12 +22,12 @@ def _populate_blacklist():
path = os.path.join(os.path.dirname(sys.argv[0]), "blacklist")
if path and os.path.isfile(path):
- note("Loading blacklist file: {}".format(path))
+ logger.info("Loading blacklist file: {}".format(path))
with open(path, 'r') as f:
for l in f.readlines():
ignored.append(re.compile(l.strip()))
else:
- warning("Ignoring blacklist file: {}".format(path))
+ logger.warning("Ignoring blacklist file: {}".format(path))
def filter_by_benchmark_name(ts, field_change):
@@ -41,11 +41,11 @@ def filter_by_benchmark_name(ts, field_c
field_change.machine.name,
benchmark_name,
field_change.field.name])
- note(full_name)
+ logger.info(full_name)
for regex in ignored:
if regex.match(full_name):
- note("Dropping field change {} because it matches {}".format(full_name,
- regex.pattern))
+ logger.info("Dropping field change {} because it matches {}"
+ .format(full_name, regex.pattern))
return False
return True
Modified: lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py (original)
+++ lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py Tue Jul 11 13:58:38 2017
@@ -4,7 +4,8 @@ Staged or Active + fixed -> Verify
"""
from lnt.server.db.regression import RegressionState
from lnt.server.db.regression import get_cr_for_field_change, get_ris
-from lnt.testing.util.commands import note, timed
+from lnt.util import logger
+from lnt.testing.util.commands import timed
def _fixed_rind(ts, rind):
"""Is this regression indicator fixed?"""
@@ -34,7 +35,7 @@ def regression_evolution(ts, run_id):
Look at regressions in detect, do they match our policy? If no, move to NTBF.
"""
- note("Running regression evolution")
+ logger.info("Running regression evolution")
changed = 0
evolve_states = [RegressionState.DETECTED, RegressionState.STAGED, RegressionState.ACTIVE]
regressions = ts.query(ts.Regression).filter(ts.Regression.state.in_(evolve_states)).all()
@@ -45,25 +46,25 @@ def regression_evolution(ts, run_id):
for regression in detects:
if is_fixed(ts, regression):
- note("Detected fixed regression" + str(regression))
+ logger.info("Detected fixed regression" + str(regression))
regression.state = RegressionState.IGNORED
regression.title = regression.title + " [Detected Fixed]"
changed += 1
for regression in staged:
if is_fixed(ts, regression):
- note("Staged fixed regression" + str(regression))
+ logger.info("Staged fixed regression" + str(regression))
regression.state = RegressionState.DETECTED_FIXED
regression.title = regression.title + " [Detected Fixed]"
changed += 1
for regression in active:
if is_fixed(ts, regression):
- note("Active fixed regression" + str(regression))
+ logger.info("Active fixed regression" + str(regression))
regression.state = RegressionState.DETECTED_FIXED
regression.title = regression.title + " [Detected Fixed]"
changed += 1
ts.commit()
- note("Changed the state of {} regressions".format(changed))
+ logger.info("Changed the state of {} regressions".format(changed))
post_submission_hook = regression_evolution
Modified: lnt/trunk/lnt/server/db/rules_manager.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules_manager.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules_manager.py (original)
+++ lnt/trunk/lnt/server/db/rules_manager.py Tue Jul 11 13:58:38 2017
@@ -4,7 +4,8 @@ Define facilities for automatically appl
import os
import re
-from lnt.testing.util.commands import note, warning, timed, error
+from lnt.util import logger
+from lnt.testing.util.commands import timed
def load_rules():
"""
@@ -31,7 +32,8 @@ def load_rules():
# Ignore non-matching files.
m = rule_script_rex.match(item)
if m is None:
- warning("ignoring item {} in rule directory: {}".format(item, rules_path))
+ logger.warning("ignoring item {} in rule directory: {}"
+ .format(item, rules_path))
continue
name = m.groups()[0]
Modified: lnt/trunk/lnt/server/db/testsuite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/testsuite.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/testsuite.py (original)
+++ lnt/trunk/lnt/server/db/testsuite.py Tue Jul 11 13:58:38 2017
@@ -4,7 +4,6 @@ Database models for the TestSuites abstr
import json
import lnt
-import logging
import sys
import testsuitedb
import util
@@ -15,6 +14,7 @@ import sqlalchemy.orm
from sqlalchemy import *
from sqlalchemy.schema import Index
from sqlalchemy.orm import relation
+from lnt.util import logger
Base = sqlalchemy.ext.declarative.declarative_base()
@@ -256,9 +256,9 @@ class TestSuite(Base):
.first()
if prev_schema is not None:
if prev_schema.jsonschema != schema.jsonschema:
- logging.info("Previous Schema:")
- logging.info(json.dumps(json.loads(prev_schema.jsonschema),
- indent=2))
+ logger.info("Previous Schema:")
+ logger.info(json.dumps(json.loads(prev_schema.jsonschema),
+ indent=2))
# New schema? Save it in the database and we are good.
engine = v4db.engine
prev_schema.upgrade_to(engine, schema, dry_run=True)
Modified: lnt/trunk/lnt/server/db/v4db.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/v4db.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/v4db.py (original)
+++ lnt/trunk/lnt/server/db/v4db.py Tue Jul 11 13:58:38 2017
@@ -1,7 +1,6 @@
from lnt.testing.util.commands import fatal
import glob
import yaml
-import logging
try:
import threading
@@ -15,6 +14,7 @@ import lnt.testing
import lnt.server.db.testsuitedb
import lnt.server.db.migrate
+from lnt.util import logger
from lnt.server.db import testsuite
import lnt.server.db.util
@@ -90,8 +90,8 @@ class V4DB(object):
data = yaml.load(schema_fd)
suite = testsuite.TestSuite.from_json(data)
self.testsuite.add_suite(suite)
- logging.info("External TestSuite '%s' loaded from '%s'" %
- (suite.name, schema_file))
+ logger.info("External TestSuite '%s' loaded from '%s'" %
+ (suite.name, schema_file))
def _load_shemas(self):
schemasDir = self.config.schemasDir
@@ -99,8 +99,8 @@ class V4DB(object):
try:
self._load_schema_file(schema_file)
except:
- logging.error("Could not load schema '%s'" % schema_file,
- exc_info=True)
+ logger.error("Could not load schema '%s'" % schema_file,
+ exc_info=True)
def __init__(self, path, config, baseline_revision=0, echo=False):
# If the path includes no database type, assume sqlite.
Modified: lnt/trunk/lnt/server/instance.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/instance.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/instance.py (original)
+++ lnt/trunk/lnt/server/instance.py Tue Jul 11 13:58:38 2017
@@ -5,7 +5,8 @@ import tempfile
import lnt.server.config
-from lnt.testing.util.commands import note, warning, error, fatal
+from lnt.util import logger
+from lnt.testing.util.commands import fatal
class Instance(object):
"""
@@ -33,7 +34,7 @@ class Instance(object):
# into a temporary directory.
tmpdir = tempfile.mkdtemp(suffix='lnt')
- note("extracting input tarfile %r to %r" % (path, tmpdir))
+ logger.info("extracting input tarfile %r to %r" % (path, tmpdir))
tf = tarfile.open(path)
tf.extractall(tmpdir)
Modified: lnt/trunk/lnt/server/reporting/analysis.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/analysis.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/analysis.py (original)
+++ lnt/trunk/lnt/server/reporting/analysis.py Tue Jul 11 13:58:38 2017
@@ -1,16 +1,11 @@
"""
Utilities for helping with the analysis of data, for reporting purposes.
"""
-
-import logging
-
+from lnt.util import logger
from lnt.util import stats
from lnt.server.ui import util
from lnt.testing import FAIL
-LOGGER_NAME = "lnt.server.ui.app"
-logger = logging.getLogger(LOGGER_NAME)
-
REGRESSED = 'REGRESSED'
IMPROVED = 'IMPROVED'
UNCHANGED_PASS = 'UNCHANGED_PASS'
Modified: lnt/trunk/lnt/server/ui/app.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/app.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/app.py (original)
+++ lnt/trunk/lnt/server/ui/app.py Tue Jul 11 13:58:38 2017
@@ -29,7 +29,7 @@ import lnt.server.ui.profile_views
import lnt.server.ui.regression_views
import lnt.server.ui.views
from lnt.server.ui.api import load_api_resources
-from lnt.testing.util.commands import warning, error
+from lnt.util import logger
class RootSlashPatchMiddleware(object):
@@ -127,7 +127,7 @@ class Request(flask.Request):
def close(self):
t = self.elapsed_time()
if t > 10:
- warning("Request {} took {}s".format(self.url, t))
+ logger.warning("Request {} took {}s".format(self.url, t))
db = getattr(self, 'db', None)
if db is not None:
db.close()
@@ -138,7 +138,7 @@ class LNTExceptionLoggerFlask(flask.Flas
def log_exception(self, exc_info):
# We need to stringify the traceback, since logs are sent via
# pickle.
- error("Exception: " + traceback.format_exc())
+ logger.error("Exception: " + traceback.format_exc())
class App(LNTExceptionLoggerFlask):
Modified: lnt/trunk/lnt/server/ui/regression_views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/regression_views.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/regression_views.py (original)
+++ lnt/trunk/lnt/server/ui/regression_views.py Tue Jul 11 13:58:38 2017
@@ -18,9 +18,9 @@ from lnt.server.ui.decorators import v4_
import lnt.server.reporting.analysis
from lnt.server.ui.globals import v4_url_for
+from lnt.util import logger
from lnt.server.ui.util import FLASH_DANGER, FLASH_SUCCESS
from lnt.server.reporting.analysis import REGRESSED
-from lnt.testing.util.commands import note
import lnt.server.db.fieldchange
from lnt.server.db.regression import RegressionState, new_regression
from lnt.server.db.regression import get_first_runs_of_fieldchange
@@ -476,7 +476,7 @@ def v4_make_regression(machine_id, test_
regression.state = RegressionState.ACTIVE
ts.commit()
- note("Manually created new regressions: {}".format(regression.id))
+ logger.info("Manually created new regressions: {}".format(regression.id))
flash("Created " + regression.title, FLASH_SUCCESS)
return redirect(v4_url_for("v4_regression_detail", id=regression.id))
Modified: lnt/trunk/lnt/testing/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/__init__.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/__init__.py (original)
+++ lnt/trunk/lnt/testing/__init__.py Tue Jul 11 13:58:38 2017
@@ -8,7 +8,7 @@ data suitable for submitting to the serv
import datetime
import re
-import logging
+from lnt.util import logger
try:
import json
@@ -328,7 +328,7 @@ def upgrade_1_to_2(data, ts_name):
upgrade = _upgrades.get(tag)
if upgrade is None:
- logging.warn("No upgrade schema known for '%s'\n" % tag)
+ logger.warning("No upgrade schema known for '%s'\n" % tag)
upgrade = _default_upgrade
# Flatten Machine.Info into machine
@@ -386,7 +386,7 @@ def upgrade_1_to_2(data, ts_name):
if dot != '.':
raise ValueError("Tests/%s: name does not end in .metric" %
test_Name)
- logging.warning("Found unknown metric '%s'" % metric)
+ logger.warning("Found unknown metric '%s'" % metric)
upgrade.metric_rename['.'+metric] = metric
result_test = result_tests_dict.get(name)
Modified: lnt/trunk/lnt/testing/profile/perf.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/profile/perf.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/profile/perf.py (original)
+++ lnt/trunk/lnt/testing/profile/perf.py Tue Jul 11 13:58:38 2017
@@ -1,7 +1,7 @@
import json, os, traceback
from profile import ProfileImpl
from profilev1impl import ProfileV1
-from lnt.testing.util.commands import warning
+from lnt.util import logger
try:
import cPerf
@@ -41,5 +41,5 @@ class LinuxPerfProfile(ProfileImpl):
except:
if propagateExceptions:
raise
- warning(traceback.format_exc())
+ logger.warning(traceback.format_exc())
return None
Modified: lnt/trunk/lnt/testing/util/commands.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/util/commands.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/util/commands.py (original)
+++ lnt/trunk/lnt/testing/util/commands.py Tue Jul 11 13:58:38 2017
@@ -6,8 +6,9 @@ import errno
import os
import sys
-import logging
import time
+from lnt.util import logger
+
try:
from flask import current_app, flash
except:
@@ -16,21 +17,11 @@ except:
pass
# FIXME: Find a better place for this code.
from lnt.server.ui.util import FLASH_INFO
-LOGGER_NAME = "lnt.server.ui.app"
-
-
-def get_logger():
- logger = logging.getLogger(LOGGER_NAME)
- return logger
-
-note = lambda message: get_logger().info(message)
-warning = lambda message: get_logger().warning(message)
-error = lambda message: get_logger().error(message)
def visible_note(message):
"""Log a note to the logger as well as page with a flash."""
- get_logger().info(message)
+ logger.info(message)
try:
flash(message, FLASH_INFO)
except RuntimeError:
@@ -49,16 +40,16 @@ def timed(func):
delta = t_end - t_start
msg = '%r (%s, %r) %2.2f sec' % (func.__name__, short_args, kw, delta)
if delta > 10:
- warning(msg)
+ logger.warning(msg)
else:
- note(msg)
+ logger.info(msg)
return result
return timed
def fatal(message):
- get_logger().critical(message)
+ logger.critical(message)
sys.exit(1)
@@ -146,7 +137,7 @@ def resolve_command_path(name):
# Otherwise we most likely have a command name, try to look it up.
path = which(name)
if path is not None:
- note("resolved command %r to path %r" % (name, path))
+ logger.info("resolved command %r to path %r" % (name, path))
return path
# If that failed just return the original name.
Modified: lnt/trunk/lnt/testing/util/compilers.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/util/compilers.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/util/compilers.py (original)
+++ lnt/trunk/lnt/testing/util/compilers.py Tue Jul 11 13:58:38 2017
@@ -3,8 +3,8 @@ import os
import re
import tempfile
+from lnt.util import logger
from commands import capture
-from commands import error
from commands import fatal
from commands import rm_f
@@ -79,9 +79,11 @@ def get_cc_info(path, cc_flags=[]):
fatal("unable to determine cc1 binary: %r: %r" % (cc, ln))
cc1_binary, = m.groups()
if cc1_binary is None:
- error("unable to find compiler cc1 binary: %r: %r" % (cc, cc_version))
+ logger.error("unable to find compiler cc1 binary: %r: %r" %
+ (cc, cc_version))
if version_ln is None:
- error("unable to find compiler version: %r: %r" % (cc, cc_version))
+ logger.error("unable to find compiler version: %r: %r" %
+ (cc, cc_version))
else:
m = re.match(r'(.*) version ([^ ]*) +(\([^(]*\))(.*)', version_ln)
if m is not None:
@@ -92,8 +94,8 @@ def get_cc_info(path, cc_flags=[]):
if m is not None:
cc_name,cc_version_num = m.groups()
else:
- error("unable to determine compiler version: %r: %r" % (
- cc, version_ln))
+ logger.error("unable to determine compiler version: %r: %r" %
+ (cc, version_ln))
cc_name = "unknown"
# Compute normalized compiler name and type. We try to grab source
@@ -118,7 +120,8 @@ def get_cc_info(path, cc_flags=[]):
cc_build = 'PROD'
cc_src_tag, = m.groups()
else:
- error('unable to determine gcc build version: %r' % cc_build_string)
+ logger.error('unable to determine gcc build version: %r' %
+ cc_build_string)
elif (cc_name in ('clang', 'LLVM', 'Debian clang', 'Apple clang', 'Apple LLVM') and
(cc_extra == '' or 'based on LLVM' in cc_extra or
(cc_extra.startswith('(') and cc_extra.endswith(')')))):
@@ -147,8 +150,9 @@ def get_cc_info(path, cc_flags=[]):
if m:
cc_src_branch,cc_src_revision = m.groups()
else:
- error('unable to determine Clang development build info: %r' % (
- (cc_name, cc_build_string, cc_extra),))
+ logger.error('unable to determine '
+ 'Clang development build info: %r' %
+ ((cc_name, cc_build_string, cc_extra),))
cc_src_branch = ""
m = re.search('clang-([0-9.]*)', cc_src_branch)
@@ -175,8 +179,9 @@ def get_cc_info(path, cc_flags=[]):
cc_alt_src_branch = ""
else:
- error('unable to determine Clang development build info: %r' % (
- (cc_name, cc_build_string, cc_extra),))
+ logger.error('unable to determine '
+ 'Clang development build info: %r' % (
+ (cc_name, cc_build_string, cc_extra),))
elif cc_name == 'gcc' and 'LLVM build' in cc_extra:
llvm_capable = True
@@ -190,11 +195,11 @@ def get_cc_info(path, cc_flags=[]):
else:
cc_build = 'DEV'
else:
- error("unable to determine compiler name: %r" % ((cc_name,
- cc_build_string),))
+ logger.error("unable to determine compiler name: %r" %
+ ((cc_name, cc_build_string),))
if cc_build is None:
- error("unable to determine compiler build: %r" % cc_version)
+ logger.error("unable to determine compiler build: %r" % cc_version)
# If LLVM capable, fetch the llvm target instead.
if llvm_capable:
@@ -202,8 +207,8 @@ def get_cc_info(path, cc_flags=[]):
if m:
cc_target, = m.groups()
else:
- error("unable to determine LLVM compiler target: %r: %r" %
- (cc, cc_target_assembly))
+ logger.error("unable to determine LLVM compiler target: %r: %r" %
+ (cc, cc_target_assembly))
cc_exec_hash = hashlib.sha1()
cc_exec_hash.update(open(cc,'rb').read())
Modified: lnt/trunk/lnt/testing/util/rcs.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/util/rcs.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/util/rcs.py (original)
+++ lnt/trunk/lnt/testing/util/rcs.py Tue Jul 11 13:58:38 2017
@@ -1,5 +1,6 @@
import re
import os
+from lnt.util import logger
from lnt.testing.util import commands
_git_svn_id_re = re.compile("^ git-svn-id: [^@]*@([0-9]+) .*$")
@@ -22,7 +23,7 @@ def get_source_version(path):
last_line = res.split("\n")[-1]
m = _git_svn_id_re.match(last_line)
if not m:
- commands.warning("unable to understand git svn log: %r" % res)
+ logger.warning("unable to understand git svn log: %r" % res)
return
return m.group(1)
elif os.path.exists(os.path.join(path, ".git")):
Modified: lnt/trunk/lnt/testing/util/valgrind.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/util/valgrind.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/util/valgrind.py (original)
+++ lnt/trunk/lnt/testing/util/valgrind.py Tue Jul 11 13:58:38 2017
@@ -1,8 +1,7 @@
"""
Utilities for working with Valgrind.
"""
-
-from lnt.testing.util.commands import warning
+from lnt.util import logger
# See:
# http://valgrind.org/docs/manual/cl-format.html#cl-format.overview
@@ -40,19 +39,22 @@ class CalltreeData(object):
description_lines.append(value.strip())
elif key == 'cmd':
if command is not None:
- warning("unexpected multiple 'cmd' keys in %r" % (path,))
+ logger.warning("unexpected multiple 'cmd' keys in %r" %
+ (path,))
command = value.strip()
elif key == 'events':
if events is not None:
- warning("unexpected multiple 'events' keys in %r" % (path,))
+ logger.warning("unexpected multiple 'events' keys in %r" %
+ (path,))
events = value.split()
elif key == 'positions':
if positions is not initial_positions:
- warning("unexpected multiple 'positions' keys in %r" % (
- path,))
+ logger.warning(
+ "unexpected multiple 'positions' keys in %r" %
+ (path,))
positions = value.split()
else:
- warning("found unknown key %r in %r" % (key, path))
+ logger.warning("found unknown key %r in %r" % (key, path))
# Validate that required fields were present.
if events is None:
Modified: lnt/trunk/lnt/tests/compile.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/compile.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/compile.py (original)
+++ lnt/trunk/lnt/tests/compile.py Tue Jul 11 13:58:38 2017
@@ -18,10 +18,11 @@ import click
import lnt.testing
import lnt.testing.util.compilers
from lnt.testing.util import commands, machineinfo
-from lnt.testing.util.commands import note, fatal, resolve_command_path
+from lnt.testing.util.commands import fatal, resolve_command_path
from lnt.testing.util.misc import timestamp
from lnt.tests import builtintest
from lnt.util import stats
+from lnt.util import logger
# For each test, compile with all these combinations of flags.
@@ -828,7 +829,8 @@ class CompileTest(builtintest.BuiltinTes
if opts.cc and opts.cxx is None:
opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
if opts.cxx is not None:
- note("inferred C++ compiler under test as: %r" % (opts.cxx,))
+ logger.info("inferred C++ compiler under test as: %r" %
+ (opts.cxx,))
if opts.cxx is None:
self._fatal('--cxx is required (and could not be inferred)')
@@ -940,20 +942,21 @@ class CompileTest(builtintest.BuiltinTes
else:
# Otherwise, use the inferred run order.
variables['run_order'] = cc_info['inferred_run_order']
- note("inferred run order to be: %r" % (variables['run_order'],))
+ logger.info("inferred run order to be: %r" %
+ (variables['run_order'],))
if opts.verbose:
format = pprint.pformat(variables)
msg = '\n\t'.join(['using variables:'] + format.splitlines())
- note(msg)
+ logger.info(msg)
format = pprint.pformat(machine_info)
msg = '\n\t'.join(['using machine info:'] + format.splitlines())
- note(msg)
+ logger.info(msg)
format = pprint.pformat(run_info)
msg = '\n\t'.join(['using run info:'] + format.splitlines())
- note(msg)
+ logger.info(msg)
# Compute the set of flags to test.
if not opts.flags_to_test:
Modified: lnt/trunk/lnt/tests/nt.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/nt.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/nt.py (original)
+++ lnt/trunk/lnt/tests/nt.py Tue Jul 11 13:58:38 2017
@@ -19,7 +19,7 @@ import lnt.testing
import lnt.testing.util.compilers
import lnt.util.ImportData as ImportData
-from lnt.testing.util.commands import note, warning, fatal
+from lnt.testing.util.commands import fatal
from lnt.testing.util.commands import capture, mkdir_p, which
from lnt.testing.util.commands import resolve_command_path
@@ -29,6 +29,7 @@ from lnt.testing.util.misc import timest
from lnt.server.reporting.analysis import UNCHANGED_PASS, UNCHANGED_FAIL
from lnt.server.reporting.analysis import REGRESSED, IMPROVED
+from lnt.util import logger
from lnt.util import ImportData
import builtintest
@@ -420,7 +421,8 @@ class TestConfiguration(object):
if llvm_arch is not None:
make_variables['ARCH'] = llvm_arch
else:
- warning("unable to infer ARCH, some tests may not run correctly!")
+ logger.warning("unable to infer ARCH, " +
+ "some tests may not run correctly!")
# Add in any additional make flags passed in via --make-param.
for entry in self.make_parameters:
@@ -1117,8 +1119,8 @@ def run_test(nick_prefix, iteration, con
else:
name,value = entry.split('=', 1)
if name in target:
- warning("user parameter %r overwrote existing value: %r" % (
- name, target.get(name)))
+ logger.warning("parameter %r overwrote existing value: %r" %
+ (name, target.get(name)))
print target,name,value
target[name] = value
@@ -1351,8 +1353,7 @@ def _process_reruns(config, server_reply
except KeyError:
# Server might send us back an error.
if server_reply.get('error', None):
- warning("Server returned an error:" +
- server_reply['error'])
+ logger.warning("Server returned an error:" + server_reply['error'])
fatal("No Server results. Cannot do reruns.")
logging.fatal()
# Holds the combined local and server results.
@@ -1442,13 +1443,13 @@ def _process_reruns(config, server_reply
# Now lets do the reruns.
rerun_results = []
summary = "Rerunning {} of {} benchmarks."
- note(summary.format(len(rerunable_benches),
- len(collated_results.values())))
+ logger.info(summary.format(len(rerunable_benches),
+ len(collated_results.values())))
for i, bench in enumerate(rerunable_benches):
- note("Rerunning: {} [{}/{}]".format(bench.name,
- i + 1,
- len(rerunable_benches)))
+ logger.info("Rerunning: {} [{}/{}]".format(bench.name,
+ i + 1,
+ len(rerunable_benches)))
fresh_samples = rerun_test(config,
bench.name,
@@ -1734,7 +1735,7 @@ class NTTest(builtintest.BuiltinTest):
# Deprecate --simple
if opts.test_simple:
- warning("--simple is deprecated, it is the default.")
+ logger.warning("--simple is deprecated, it is the default.")
del opts.test_simple
if opts.test_style == "simple":
@@ -1780,8 +1781,8 @@ class NTTest(builtintest.BuiltinTest):
opts.cxx_under_test = lnt.testing.util.compilers.infer_cxx_compiler(
opts.cc_under_test)
if opts.cxx_under_test is not None:
- note("inferred C++ compiler under test as: %r" % (
- opts.cxx_under_test,))
+ logger.info("inferred C++ compiler under test as: %r" %
+ (opts.cxx_under_test,))
# The cxx_under_test option is required if we are testing C++.
if opts.test_cxx and opts.cxx_under_test is None:
@@ -1807,7 +1808,8 @@ class NTTest(builtintest.BuiltinTest):
# given a C++ compiler that doesn't exist, reset it to just use the
# given C compiler.
if not os.path.exists(opts.cxx_under_test):
- warning("invalid cxx_under_test, falling back to cc_under_test")
+ logger.warning("invalid cxx_under_test, " +
+ "falling back to cc_under_test")
opts.cxx_under_test = opts.cc_under_test
if opts.without_llvm:
@@ -1869,8 +1871,8 @@ class NTTest(builtintest.BuiltinTest):
# Warn if the user asked to run under an iOS simulator SDK, but
# didn't set an isysroot for compilation.
if opts.isysroot is None:
- warning('expected --isysroot when executing with '
- '--ios-simulator-sdk')
+ logger.warning('expected --isysroot when executing with '
+ '--ios-simulator-sdk')
config = TestConfiguration(vars(opts), timestamp())
# FIXME: We need to validate that there is no configured output in the
@@ -1879,9 +1881,10 @@ class NTTest(builtintest.BuiltinTest):
# These notes are used by the regression tests to check if we've handled
# flags correctly.
- note('TARGET_FLAGS: {}'.format(' '.join(config.target_flags)))
+ logger.info('TARGET_FLAGS: {}'.format(' '.join(config.target_flags)))
if config.qemu_user_mode:
- note('QEMU_USER_MODE_COMMAND: {}'.format(config.qemu_user_mode_command))
+ logger.info('QEMU_USER_MODE_COMMAND: {}'
+ .format(config.qemu_user_mode_command))
# Multisample, if requested.
if opts.multisample is not None:
@@ -1956,8 +1959,8 @@ class NTTest(builtintest.BuiltinTest):
result = ServerUtil.submitFile(server, report_path,
commit, False)
except (urllib2.HTTPError, urllib2.URLError) as e:
- warning("submitting to {} failed with {}".format(
- server, e))
+ logger.warning("submitting to {} failed with {}"
+ .format(server, e))
else:
# Simulate a submission to retrieve the results report.
# Construct a temporary database and import the result.
Modified: lnt/trunk/lnt/tests/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/test_suite.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/test_suite.py (original)
+++ lnt/trunk/lnt/tests/test_suite.py Tue Jul 11 13:58:38 2017
@@ -17,11 +17,12 @@ from collections import defaultdict
import jinja2
import click
+from lnt.util import logger
import lnt.testing
import lnt.testing.profile
import lnt.testing.util.compilers
from lnt.testing.util.misc import timestamp
-from lnt.testing.util.commands import note, fatal, warning
+from lnt.testing.util.commands import fatal
from lnt.testing.util.commands import mkdir_p
from lnt.testing.util.commands import resolve_command_path, isexecfile
@@ -93,7 +94,7 @@ def _importProfile(name_filename):
name, filename = name_filename
if not os.path.exists(filename):
- warning('Profile %s does not exist' % filename)
+ logger.warning('Profile %s does not exist' % filename)
return None
pf = lnt.testing.profile.profile.Profile.fromFile(filename)
@@ -334,8 +335,8 @@ class TestSuiteTest(BuiltinTest):
self.opts.cxx = \
lnt.testing.util.compilers.infer_cxx_compiler(self.opts.cc)
if self.opts.cxx is not None:
- note("Inferred C++ compiler under test as: %r"
- % (self.opts.cxx,))
+ logger.info("Inferred C++ compiler under test as: %r"
+ % (self.opts.cxx,))
else:
self._fatal("unable to infer --cxx - set it manually.")
else:
@@ -439,8 +440,8 @@ class TestSuiteTest(BuiltinTest):
# We don't support compiling without testing as we can't get compile-
# time numbers from LIT without running the tests.
if opts.compile_multisample > opts.exec_multisample:
- note("Increasing number of execution samples to %d" %
- opts.compile_multisample)
+ logger.info("Increasing number of execution samples to %d" %
+ opts.compile_multisample)
opts.exec_multisample = opts.compile_multisample
if opts.auto_name:
@@ -448,7 +449,7 @@ class TestSuiteTest(BuiltinTest):
cc_info = self._get_cc_info(cmake_vars)
cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
opts.label += "__%s__%s" % (cc_nick, cc_info['cc_target'].split('-')[0])
- note('Using nickname: %r' % opts.label)
+ logger.info('Using nickname: %r' % opts.label)
# When we can't detect the clang version we use 0 instead. That
# is a horrible failure mode because all of our data ends up going
@@ -540,15 +541,15 @@ class TestSuiteTest(BuiltinTest):
return self.opts.threads
def _check_call(self, *args, **kwargs):
- note('Execute: %s' % ' '.join(args[0]))
+ logger.info('Execute: %s' % ' '.join(args[0]))
if 'cwd' in kwargs:
- note(' (In %s)' % kwargs['cwd'])
+ logger.info(' (In %s)' % kwargs['cwd'])
return subprocess.check_call(*args, **kwargs)
def _check_output(self, *args, **kwargs):
- note('Execute: %s' % ' '.join(args[0]))
+ logger.info('Execute: %s' % ' '.join(args[0]))
if 'cwd' in kwargs:
- note(' (In %s)' % kwargs['cwd'])
+ logger.info(' (In %s)' % kwargs['cwd'])
output = subprocess.check_output(*args, **kwargs)
sys.stdout.write(output)
return output
@@ -645,7 +646,7 @@ class TestSuiteTest(BuiltinTest):
cmake_flags += ['-C', cache]
for l in lines:
- note(l)
+ logger.info(l)
cmake_cmd = [cmake_cmd] + cmake_flags + [self._test_suite_dir()] + \
['-D%s=%s' % (k, v) for k, v in defs.items()]
@@ -672,7 +673,7 @@ class TestSuiteTest(BuiltinTest):
target = self.opts.only_test[1]
subdir = os.path.join(*components)
- note('Building...')
+ logger.info('Building...')
if not self.opts.succinct:
args = ["VERBOSE=1", target]
else:
@@ -708,16 +709,17 @@ class TestSuiteTest(BuiltinTest):
nr_threads = self._test_threads()
if profile:
if nr_threads != 1:
- warning('Gathering profiles with perf requires -j 1 as ' +
- 'perf record cannot be run multiple times ' +
- 'simultaneously. Overriding -j %s to -j 1' % nr_threads)
+ logger.warning('Gathering profiles with perf requires -j 1 ' +
+ 'as perf record cannot be run multiple times ' +
+ 'simultaneously. Overriding -j %s to -j 1' % \
+ nr_threads)
nr_threads = 1
extra_args += ['--param', 'profile=perf']
if self.opts.perf_events:
extra_args += ['--param',
'perf_profile_events=%s' % self.opts.perf_events]
- note('Testing...')
+ logger.info('Testing...')
try:
self._check_call([lit_cmd,
'-v',
@@ -878,8 +880,8 @@ class TestSuiteTest(BuiltinTest):
# Now import the profiles in parallel.
if profiles_to_import:
- note('Importing %d profiles with %d threads...' %
- (len(profiles_to_import), multiprocessing.cpu_count()))
+ logger.info('Importing %d profiles with %d threads...' %
+ (len(profiles_to_import), multiprocessing.cpu_count()))
TIMEOUT = 800
try:
pool = multiprocessing.Pool()
@@ -889,9 +891,9 @@ class TestSuiteTest(BuiltinTest):
for sample in samples
if sample is not None])
except multiprocessing.TimeoutError:
- warning('Profiles had not completed importing after %s seconds.'
- % TIMEOUT)
- note('Aborting profile import and continuing')
+ logger.warning('Profiles had not completed importing after ' +
+ '%s seconds.' % TIMEOUT)
+ logger.info('Aborting profile import and continuing')
if self.opts.single_result:
# If we got this far, the result we were looking for didn't exist.
@@ -923,7 +925,7 @@ class TestSuiteTest(BuiltinTest):
for patt in patts:
for file in glob.glob(src + patt):
shutil.copy(file, dest)
- note(file + " --> " + dest)
+ logger.info(file + " --> " + dest)
def diagnose(self):
"""Build a triage report that contains information about a test.
@@ -954,10 +956,10 @@ class TestSuiteTest(BuiltinTest):
cmd = self._configure(path, execute=False)
cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']
- note(' '.join(cmd_temps))
+ logger.info(' '.join(cmd_temps))
out = subprocess.check_output(cmd_temps)
- note(out)
+ logger.info(out)
# Figure out our test's target.
make_cmd = [self.opts.make, "VERBOSE=1", 'help']
@@ -972,20 +974,20 @@ class TestSuiteTest(BuiltinTest):
make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
"timeit-host", "fpcmp-host"]
- note(" ".join(make_deps))
+ logger.info(" ".join(make_deps))
p = subprocess.Popen(make_deps,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
std_out, std_err = p.communicate()
- note(std_out)
+ logger.info(std_out)
make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
- note(" ".join(make_save_temps))
+ logger.info(" ".join(make_save_temps))
p = subprocess.Popen(make_save_temps,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
std_out, std_err = p.communicate()
- note(std_out)
+ logger.info(std_out)
with open(report_path + "/build.log", 'w') as f:
f.write(std_out)
# Executable(s) and test file:
@@ -1004,10 +1006,10 @@ class TestSuiteTest(BuiltinTest):
# Now lets do -ftime-report.
cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']
- note(' '.join(cmd_time_report))
+ logger.info(' '.join(cmd_time_report))
out = subprocess.check_output(cmd_time_report)
- note(out)
+ logger.info(out)
make_time_report = [self.opts.make, "VERBOSE=1", short_name]
p = subprocess.Popen(make_time_report,
@@ -1017,15 +1019,15 @@ class TestSuiteTest(BuiltinTest):
with open(report_path + "/time-report.txt", 'w') as f:
f.write(std_err)
- note("Wrote: " + report_path + "/time-report.txt")
+ logger.info("Wrote: " + report_path + "/time-report.txt")
# Now lets do -llvm -stats.
cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']
- note(' '.join(cmd_stats_report))
+ logger.info(' '.join(cmd_stats_report))
out = subprocess.check_output(cmd_stats_report)
- note(out)
+ logger.info(out)
make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
p = subprocess.Popen(make_stats_report,
@@ -1035,7 +1037,7 @@ class TestSuiteTest(BuiltinTest):
with open(report_path + "/stats-report.txt", 'w') as f:
f.write(std_err)
- note("Wrote: " + report_path + "/stats-report.txt")
+ logger.info("Wrote: " + report_path + "/stats-report.txt")
# Collect Profile:
if "Darwin" in platform.platform():
@@ -1062,7 +1064,7 @@ class TestSuiteTest(BuiltinTest):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
std_out, std_err = p.communicate()
- warning("Using sudo to collect execution trace.")
+ logger.warning("Using sudo to collect execution trace.")
make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
p = subprocess.Popen(make_save_temps,
stdout=subprocess.PIPE,
@@ -1070,17 +1072,18 @@ class TestSuiteTest(BuiltinTest):
std_out, std_err = p.communicate()
sys.stdout.write(std_out)
sys.stderr.write(std_err)
- warning("Tests may fail because of iprofiler's output.")
+ logger.warning("Tests may fail because of iprofiler's output.")
# The dtps file will be saved as root, make it so
# that we can read it.
chmod = sudo + ["chown", "-R", getpass.getuser(), short_name + ".dtps"]
subprocess.call(chmod)
profile = local_path + "/" + short_name + ".dtps"
shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
- note(profile + "-->" + report_path)
+ logger.info(profile + "-->" + report_path)
else:
- warning("Skipping execution profiling because this is not Darwin.")
- note("Report produced in: " + report_path)
+ logger.warning("Skipping execution profiling because " +
+ "this is not Darwin.")
+ logger.info("Report produced in: " + report_path)
# Run through the rest of LNT, but don't allow this to be submitted
# because there is no data.
Modified: lnt/trunk/lnt/util/ImportData.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/ImportData.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/util/ImportData.py (original)
+++ lnt/trunk/lnt/util/ImportData.py Tue Jul 11 13:58:38 2017
@@ -3,9 +3,9 @@ import collections
import lnt.testing
import lnt.formats
import lnt.server.reporting.analysis
-from lnt.testing.util.commands import note
from lnt.util import NTEmailReport
from lnt.util import async_ops
+from lnt.util import logger
def import_and_report(config, db_name, db, file, format, ts_name,
commit=False, show_sample_count=False,
@@ -135,7 +135,7 @@ def import_and_report(config, db_name, d
result['result_url'] = "db_{}/v4/{}/{}".format(db_name, ts_name, run.id)
result['report_time'] = time.time() - importStartTime
result['total_time'] = time.time() - startTime
- note("Successfully created {}".format(result['result_url']))
+ logger.info("Successfully created {}".format(result['result_url']))
# If this database has a shadow import configured, import the run into that
# database as well.
if config and config.databases[db_name].shadow_import:
Modified: lnt/trunk/lnt/util/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/__init__.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/util/__init__.py (original)
+++ lnt/trunk/lnt/util/__init__.py Tue Jul 11 13:58:38 2017
@@ -1 +1,5 @@
+import logging
+
+logger = logging.getLogger("lnt.server.ui.app")
+
__all__ = []
Modified: lnt/trunk/lnt/util/async_ops.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/async_ops.py?rev=307711&r1=307710&r2=307711&view=diff
==============================================================================
--- lnt/trunk/lnt/util/async_ops.py (original)
+++ lnt/trunk/lnt/util/async_ops.py Tue Jul 11 13:58:38 2017
@@ -11,7 +11,6 @@ suite that we need inside each subproces
import atexit
import os
import time
-import logging
from flask import current_app, g
import sys
import lnt.server.db.fieldchange as fieldchange
@@ -23,7 +22,7 @@ import contextlib
import multiprocessing
from multiprocessing import Pool, TimeoutError, Manager, Process
from threading import Lock
-from lnt.testing.util.commands import note, warning, timed, error
+from lnt.util import logger
NUM_WORKERS = 4 # The number of subprocesses to spawn per LNT process.
WORKERS = None # The worker pool.
WORKERS_LOCK = Lock()
@@ -38,7 +37,7 @@ def launch_workers():
WORKERS_LOCK.acquire()
try:
if not WORKERS:
- note("Starting workers")
+ logger.info("Starting workers")
manager = Manager()
WORKERS = True
try:
@@ -58,9 +57,9 @@ def sig_handler(signo, frame):
def cleanup():
- note("Running process cleanup.")
+ logger.info("Running process cleanup.")
for p in JOBS:
- note("Waiting for %s %s" % (p.name, p.pid))
+ logger.info("Waiting for %s %s" % (p.name, p.pid))
if p.is_alive:
p.join()
@@ -87,18 +86,19 @@ def check_workers(is_logged):
if still_running > 5:
# This could be run outside of the application context, so use
# full logger name.
- logging.getLogger("lnt.server.ui.app").warning(msg)
+ logger.warning(msg)
elif still_running > 0:
- logging.getLogger("lnt.server.ui.app").info(msg)
+ logger.info(msg)
else:
- logging.getLogger("lnt.server.ui.app").info("Job queue empty.")
+ logger.info("Job queue empty.")
return len(JOBS)
def async_run_job(job, db_name, ts, func_args, db_config):
"""Send a job to the async wrapper in the subprocess."""
# If the run is not in the database, we can't do anything more.
- note("Queuing background job to process fieldchanges " + str(os.getpid()))
+ logger.info("Queuing background job to process fieldchanges " +
+ str(os.getpid()))
launch_workers()
check_workers(True)
@@ -133,7 +133,8 @@ def async_wrapper(job, ts_args, func_arg
lnt.server.db.v4db.V4DB.close_all_engines()
clean_db = True
sleep(3)
- note("Running async wrapper: {} ".format(job.__name__)+ str(os.getpid()))
+ logger.info("Running async wrapper: {} ".format(job.__name__) +
+ str(os.getpid()))
config = ts_args['db_info']
_v4db = config.get_database(ts_args['db'])
#with contextlib.closing(_v4db) as db:
@@ -145,13 +146,14 @@ def async_wrapper(job, ts_args, func_arg
msg = "Finished: {name} in {time:.2f}s ".format(name=job.__name__,
time=delta)
if delta < 100:
- note(msg)
+ logger.info(msg)
else:
- warning(msg)
+ logger.warning(msg)
except:
# Put all exception text into an exception and raise that for our
# parent process.
- error("Subprocess failed with:" + "".join(traceback.format_exception(*sys.exc_info())))
+ logger.error("Subprocess failed with:" +
+ "".join(traceback.format_exception(*sys.exc_info())))
sys.exit(1)
sys.exit(0)
@@ -160,10 +162,10 @@ def make_callback():
app = current_app
def async_job_finished(arg):
if isinstance(arg, Exception):
- logging.getLogger("lnt.server.ui.app").error(str(arg))
+ logger.error(str(arg))
raise arg
if isinstance(arg, list):
for log_entry in arg:
- logging.getLogger("lnt.server.ui.app").handle(log_entry)
+ logger.handle(log_entry)
check_workers()
return async_job_finished
More information about the llvm-commits
mailing list