[llvm-commits] [LNT] r161335 - in /lnt/trunk/lnt: db/perfdb.py db/perfdbsummary.py db/runinfo.py lnttool/import_data.py lnttool/main.py lnttool/report.py server/config.py server/ui/app.py server/ui/graphutil.py server/ui/views.py util/ImportData.py util/NTEmailReport.py util/NTUtil.py

Daniel Dunbar daniel at zuster.org
Mon Aug 6 13:02:49 PDT 2012


Author: ddunbar
Date: Mon Aug  6 15:02:49 2012
New Revision: 161335

URL: http://llvm.org/viewvc/llvm-project?rev=161335&view=rev
Log:
Remove v0.3 database (PerfDB) itself and associated code (like reporting).

Removed:
    lnt/trunk/lnt/db/perfdb.py
    lnt/trunk/lnt/db/perfdbsummary.py
    lnt/trunk/lnt/lnttool/report.py
    lnt/trunk/lnt/server/ui/graphutil.py
    lnt/trunk/lnt/util/NTUtil.py
Modified:
    lnt/trunk/lnt/db/runinfo.py
    lnt/trunk/lnt/lnttool/import_data.py
    lnt/trunk/lnt/lnttool/main.py
    lnt/trunk/lnt/server/config.py
    lnt/trunk/lnt/server/ui/app.py
    lnt/trunk/lnt/server/ui/views.py
    lnt/trunk/lnt/util/ImportData.py
    lnt/trunk/lnt/util/NTEmailReport.py

Removed: lnt/trunk/lnt/db/perfdb.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/db/perfdb.py?rev=161334&view=auto
==============================================================================
--- lnt/trunk/lnt/db/perfdb.py (original)
+++ lnt/trunk/lnt/db/perfdb.py (removed)
@@ -1,495 +0,0 @@
-#!/usr/bin/python
-
-###
-# SQLAlchemy database layer
-
-import sqlalchemy
-import sqlalchemy.ext.declarative
-import sqlalchemy.orm
-from sqlalchemy import *
-from sqlalchemy.schema import Index
-from sqlalchemy.orm import relation, backref
-from sqlalchemy.orm.collections import attribute_mapped_collection
-
-Base = sqlalchemy.ext.declarative.declarative_base()
-class Revision(Base):
-    __tablename__ = 'Revision'
-
-    id = Column("ID", Integer, primary_key=True)
-    name = Column("Name", String(256))
-    number = Column("Number", Integer)
-
-    def __init__(self, name, number):
-        self.name = name
-        self.number = number
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__, (self.name, self.number))
-
-class Machine(Base):
-    __tablename__ = 'Machine'
-
-    id = Column("ID", Integer, primary_key=True)
-    name = Column("Name", String(256))
-    number = Column("Number", Integer)
-
-    info = relation('MachineInfo',
-                    collection_class=attribute_mapped_collection('key'),
-                    backref=backref('machine'))
-
-    def __init__(self, name, number):
-        self.name = name
-        self.number = number
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__, (self.name, self.number))
-
-class MachineInfo(Base):
-    __tablename__ = 'MachineInfo'
-
-    id = Column("ID", Integer, primary_key=True)
-    machine_id = Column("Machine", Integer, ForeignKey('Machine.ID'))
-    key = Column("Key", String(256))
-    value = Column("Value", String(4096))
-
-    def __init__(self, machine, key, value):
-        self.machine = machine
-        self.key = key
-        self.value = value
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.machine, self.key, self.value))
-
-class Run(Base):
-    __tablename__ = 'Run'
-
-    id = Column("ID", Integer, primary_key=True)
-    machine_id = Column("MachineID", Integer, ForeignKey('Machine.ID'))
-    start_time = Column("StartTime", DateTime)
-    end_time = Column("EndTime", DateTime)
-
-    machine = relation(Machine)
-
-    info = relation('RunInfo',
-                    collection_class=attribute_mapped_collection('key'),
-                    backref=backref('run'))
-
-    def __init__(self, machine, start_time, end_time):
-        self.machine = machine
-        self.start_time = start_time
-        self.end_time = end_time
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.machine, self.start_time, self.end_time))
-
-class RunInfo(Base):
-    __tablename__ = 'RunInfo'
-
-    id = Column("ID", Integer, primary_key=True)
-    run_id = Column("Run", Integer, ForeignKey('Run.ID'))
-    key = Column("Key", String(256), index=True)
-    value = Column("Value", String(4096))
-
-    def __init__(self, run, key, value):
-        self.run = run
-        self.key = key
-        self.value = value
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.run, self.key, self.value))
-
-class Test(Base):
-    __tablename__ = 'Test'
-
-    id = Column("ID", Integer, primary_key=True)
-    name = Column("Name", String(512))
-
-    info = relation('TestInfo',
-                    collection_class=attribute_mapped_collection('key'),
-                    backref=backref('test'))
-
-    def __init__(self, name):
-        self.name = name
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.name,))
-
-    def get_parameter_set(self):
-        items = [(k,v.value) for k,v in self.info.items()]
-        items.sort()
-        return tuple(items)
-
-class TestInfo(Base):
-    __tablename__ = 'TestInfo'
-
-    id = Column("ID", Integer, primary_key=True)
-    test_id = Column("Test", Integer, ForeignKey('Test.ID'))
-    key = Column("Key", String(256))
-    value = Column("Value", String(4096))
-
-    def __init__(self, test, key, value):
-        self.test = test
-        self.key = key
-        self.value = value
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.test, self.key, self.value))
-
-class Sample(Base):
-    __tablename__ = 'Sample'
-
-    id = Column("ID", Integer, primary_key=True)
-    run_id = Column("RunID", Integer, ForeignKey('Run.ID'), index=True)
-    test_id = Column("TestID", Integer, ForeignKey('Test.ID'), index=True)
-    value = Column("Value", Float)
-
-    run = relation(Run)
-    test = relation(Test)
-
-    def __init__(self, run, test, value):
-        self.run = run
-        self.test = test
-        self.value = value
-
-    def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.run, self.test, self.value))
-
-# Define an additonal index on (RunID, TestID).
-Index("ix_Sample_RunID_TestID", Sample.run_id, Sample.test_id)
-
-###
-# PerfDB wrapper, to avoid direct SA dependency when possible.
-
-def info_eq(a, b):
-    a = list(a)
-    b = list(b)
-    a.sort()
-    b.sort()
-    return a == b
-
-class PerfDB:
-    def __init__(self, path, echo=False):
-        if (not path.startswith('mysql://') and
-            not path.startswith('sqlite://')):
-            path = 'sqlite:///' + path
-        self.path = path
-        self.engine = sqlalchemy.create_engine(path, echo=echo)
-
-        # Create the tables in case this is a new database.
-        Base.metadata.create_all(self.engine)
-
-        self.session = sqlalchemy.orm.sessionmaker(self.engine)()
-        self.modified_machine = self.modified_run = self.modified_test = False
-
-        # Make sure revision numbers exists.
-        for r in ("Machine","MachineInfo","Run","RunInfo","Test","TestInfo"):
-            self.get_revision(r)
-        self.commit()
-
-        # Add shortcut alias.
-        self.query = self.session.query
-
-    def get_revision(self, name):
-        for r in self.session.query(Revision).filter_by(name=name):
-            return r
-        r = Revision(name, 0)
-        self.session.add(r)
-        return r
-    def get_revision_number(self, name):
-        return self.get_revision(name).number
-
-    def machines(self, name=None):
-        q = self.session.query(Machine)
-        if name:
-            q = q.filter_by(name=name)
-        return q
-
-    def tests(self, name=None):
-        q = self.session.query(Test)
-        if name:
-            q = q.filter_by(name=name)
-        return q
-
-    def runs(self, machine=None):
-        q = self.session.query(Run)
-        if machine:
-            q = q.filter_by(machine=machine)
-        return q
-
-    def samples(self, run=None, test=None):
-        q = self.session.query(Sample)
-        if run:
-            q = q.filter_by(run_id=run.id)
-        if test:
-            q = q.filter_by(test_id=test.id)
-        return q
-
-    def getNumMachines(self):
-        return self.machines().count()
-
-    def getNumRuns(self):
-        return self.runs().count()
-
-    def getNumTests(self):
-        return self.tests().count()
-
-    def getNumSamples(self):
-        return self.samples().count()
-
-    def getMachine(self, id):
-        return self.session.query(Machine).filter_by(id=id).one()
-
-    def getRun(self, id):
-        return self.session.query(Run).filter_by(id=id).one()
-
-    def getTest(self, id):
-        return self.session.query(Test).filter_by(id=id).one()
-
-    def getOrCreateMachine(self, name, info):
-        # FIXME: Not really the right way...
-        number = 1
-        for m in self.machines(name=name):
-            if info_eq([(i.key, i.value) for i in m.info.values()], info):
-                return m,False
-            number += 1
-
-        # Make a new record
-        m = Machine(name, number)
-        m.info = dict((k,MachineInfo(m,k,v)) for k,v in info)
-        self.session.add(m)
-        self.modified_machine = True
-        return m,True
-
-    def getOrCreateTest(self, name, info):
-        # FIXME: Not really the right way...
-        for t in self.tests(name):
-            if info_eq([(i.key, i.value) for i in t.info.values()], info):
-                return t,False
-
-        t = Test(name)
-        t.info = dict((k,TestInfo(t,k,v)) for k,v in info)
-        self.session.add(t)
-        self.modified_test = True
-        return t,True
-
-    def getOrCreateRun(self, machine, start_time, end_time, info):
-        from datetime import datetime
-        start_time = datetime.strptime(start_time,
-                                       "%Y-%m-%d %H:%M:%S")
-        end_time = datetime.strptime(end_time,
-                                     "%Y-%m-%d %H:%M:%S")
-
-        # FIXME: Not really the right way...
-        for r in self.session.query(Run).filter_by(machine=machine):
-            # FIXME: Execute this filter in SQL, but resolve the
-            # normalization issue w.r.t. SQLAlchemy first. I think we
-            # may be running afoul of SQLite not normalizing the
-            # datetime. If I don't do this then sqlalchemy issues a
-            # query in the format YYYY-MM-DD HH:MM:SS.ssss which
-            # doesn't work.
-            if r.start_time != start_time or r.end_time != end_time:
-                continue
-            if info_eq([(i.key, i.value) for i in r.info.values()], info):
-                return r,False
-
-        # Make a new record
-        r = Run(machine, start_time, end_time)
-        r.info = dict((k,RunInfo(r,k,v)) for k,v in info)
-        self.session.add(r)
-        self.modified_run = True
-        return r,True
-
-    def addSample(self, run, test, value):
-        s = Sample(run, test, value)
-        self.session.add(s)
-        return s
-
-    def addSamples(self, samples):
-        """addSamples([(run_id, test_id, value), ...]) -> None
-
-        Batch insert a list of samples."""
-
-        # Flush to keep session consistent.
-        self.session.flush()
-
-        for run_id,test_id,value in samples:
-            q = Sample.__table__.insert().values(RunID = run_id,
-                                                 TestID = test_id,
-                                                 Value = value)
-            self.session.execute(q)
-
-    def commit(self):
-        if self.modified_machine:
-            self.get_revision("Machine").number += 1
-            self.get_revision("MachineInfo").number += 1
-        if self.modified_run:
-            self.get_revision("Run").number += 1
-            self.get_revision("RunInfo").number += 1
-        if self.modified_test:
-            self.get_revision("Test").number += 1
-            self.get_revision("TestInfo").number += 1
-        self.session.commit()
-        self.modified_machine = self.modified_test = self.modified_run = False
-
-    def rollback(self):
-        self.session.rollback()
-        self.modified_machine = self.modified_test = self.modified_run = False
-
-    def importDataFromDict(self, data, config=None):
-        return importDataFromDict(self, data)
-
-    def get_db_summary(self):
-        import perfdbsummary
-        return perfdbsummary.PerfDBSummary.fromdb(self)
-
-def importDataFromDict(db, data):
-    # FIXME: Validate data
-    machineData = data['Machine']
-    runData = data['Run']
-    testsData = data['Tests']
-
-    # Get the machine
-    # FIXME: Validate machine
-    machine,_ = db.getOrCreateMachine(machineData['Name'],
-                                      machineData['Info'].items())
-
-    # Accept 'Time' as an alias for 'Start Time'
-    if 'Start Time' not in runData and 'Time' in runData:
-        import time
-        t = time.strptime(runData['Time'],
-                          "%a, %d %b %Y %H:%M:%S -0700 (PDT)")
-        runData['Start Time'] = time.strftime('%Y-%m-%d %H:%M', t)
-
-    # Create the run.
-    run,inserted = db.getOrCreateRun(machine,
-                                     runData.get('Start Time',''),
-                                     runData.get('End Time',''),
-                                     runData.get('Info',{}).items())
-    if not inserted:
-        return False,run
-
-    # Batch load the set of tests instead of repeatedly querying to unique.
-    #
-    # FIXME: Add explicit config object.
-    test_info = {}
-    for id,k,v in db.session.query(TestInfo.test_id, TestInfo.key,
-                                   TestInfo.value):
-        info = test_info[id] = test_info.get(id,{})
-        info[str(k)] = str(v)
-
-    testMap = {}
-    for test_id,test_name in db.session.query(Test.id, Test.name):
-        info = test_info.get(test_id,{}).items()
-        info.sort()
-        testMap[(str(test_name),tuple(info))] = test_id
-
-    # Create the tests up front, so we can resolve IDs.
-    test_ids = []
-    late_ids = []
-    for i,testData in enumerate(testsData):
-        name = str(testData['Name'])
-        info = [(str(k),str(v)) for k,v in testData['Info'].items()]
-        info.sort()
-        test_id = testMap.get((name,tuple(info)))
-        if test_id is None:
-            test,created = db.getOrCreateTest(testData['Name'],info)
-            late_ids.append((i,test))
-        test_ids.append(test_id)
-
-    # Flush now to resolve test and run ids.
-    #
-    # FIXME: Surely there is a cleaner way to handle this?
-    db.session.flush()
-
-    if late_ids:
-        for i,t in late_ids:
-            test_ids[i] = t.id
-
-    db.addSamples([(run.id, test_id, value)
-                   for test_id,testData in zip(test_ids, testsData)
-                   for value in testData['Data']])
-    return True,run
-
-def test_sa_db(dbpath):
-    if not dbpath.startswith('mysql://'):
-        dbpath = 'sqlite:///' + dbpath
-    engine = sqlalchemy.create_engine(dbpath)
-
-    Session = sqlalchemy.orm.sessionmaker(engine)
-    Session.configure(bind=engine)
-
-    session = Session()
-
-    m = session.query(Machine).first()
-    print m
-    print m.info
-
-    r = session.query(Run).first()
-    print r
-    print r.info
-
-    t = session.query(Test)[20]
-    print t
-    print t.info
-
-    s = session.query(Sample)[20]
-    print s
-
-    import time
-    start = time.time()
-    print
-    q = session.query(Sample)
-    q = q.filter(Sample.run_id == 994)
-    print
-    res = session.execute(q)
-    print res
-    N = 0
-    for row in res:
-        if N == 1:
-            print row
-        N += 1
-    print N, time.time() - start
-    print
-
-    start = time.time()
-    N = 0
-    for row in q:
-        if N == 1:
-            print row
-        N += 1
-    print N, time.time() - start
-
-def main():
-    global opts
-    from optparse import OptionParser
-    parser = OptionParser("usage: %prog dbpath")
-    opts,args = parser.parse_args()
-
-    if len(args) != 1:
-        parser.error("incorrect number of argments")
-
-    dbpath, = args
-
-    # Test the SQLAlchemy layer.
-    test_sa_db(dbpath)
-
-    # Test the PerfDB wrapper.
-    db = PerfDB(dbpath)
-
-    print "Opened %r" % dbpath
-
-    for m in db.machines():
-        print m
-        for r in db.runs(m):
-            print '  run - id:%r, start:%r,'\
-                ' # samples: %d.' % (r.id, r.start_time,
-                                     db.samples(run=r).count())
-
-if __name__ == '__main__':
-    main()

Removed: lnt/trunk/lnt/db/perfdbsummary.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/db/perfdbsummary.py?rev=161334&view=auto
==============================================================================
--- lnt/trunk/lnt/db/perfdbsummary.py (original)
+++ lnt/trunk/lnt/db/perfdbsummary.py (removed)
@@ -1,241 +0,0 @@
-"""
-Classes for caching metadata about a PerfDB instance.
-"""
-
-from lnt.db.perfdb import Run, RunInfo, Sample, Test
-
-class SuiteSummary:
-    def __init__(self, name, path):
-        self.name = name
-        self.path = path
-
-class PerfDBSummary:
-    @staticmethod
-    def fromdb(db):
-        revision = db.get_revision_number("Run")
-
-        # Look for all the run tags and use them to identify the available
-        # suites.
-        q = db.session.query(RunInfo.value.distinct())
-        q = q.filter(RunInfo.key == "tag")
-
-        suites = [SuiteSummary("Nightlytest", ("nightlytest",))]
-        for tag, in q:
-            if tag == 'nightlytest':
-                continue
-            suites.append(SuiteSummary(tag, ("simple",tag)))
-
-        suites.sort(key=lambda s: s.name)
-        return PerfDBSummary(revision, suites)
-
-    def __init__(self, revision, suites):
-        self.revision = revision
-        self.suites = suites
-
-    def is_up_to_date(self, db):
-        return (not db.modified_run and
-                self.revision == db.get_revision_number("Run"))
-
-class SimpleSuiteSummary(object):
-    @staticmethod
-    def fromdb(db, tag):
-        revision = db.get_revision_number("Test")
-
-        # Find all test names.
-        q = db.session.query(Test)
-        q = q.filter(Test.name.startswith(tag))
-        tests = list(q)
-
-        # Collect all the test data.
-        test_names = set()
-        parameter_sets = set()
-        test_id_map = {}
-        for t in tests:
-            name = t.name.split('.', 1)[1]
-
-            key = t.get_parameter_set()
-
-            parameter_sets.add(key)
-            test_id_map[(name, key)] = t.id
-
-            if name.endswith('.success'):
-                test_name = name.rsplit('.', 1)[0]
-            elif name.endswith('.status'):
-                test_name = name.rsplit('.', 1)[0]
-            else:
-                test_name = name
-
-            test_names.add(test_name)
-
-        # Order the test names.
-        test_names = list(test_names)
-        test_names.sort()
-
-        # Collect the set of all parameter keys.
-        parameter_keys = list(set([k for pset in parameter_sets
-                                   for k,v in pset]))
-        parameter_keys.sort()
-
-        # Order the parameter sets and convert to dictionaries.
-        parameter_sets = list(parameter_sets)
-        parameter_sets.sort()
-
-        return SimpleSuiteSummary(revision, tag, test_names,
-                                  test_id_map, parameter_keys, parameter_sets)
-
-    def __init__(self, revision, tag, test_names,
-                 test_id_map, parameter_keys, parameter_sets):
-        self.revision = revision
-        self.tag = tag
-        self.test_names = test_names
-        self.test_id_map = test_id_map
-        self.parameter_keys = parameter_keys
-        self.parameter_sets = parameter_sets
-        self.parameter_maps = map(dict, parameter_sets)
-        self.test_info_map = dict([(v,k) for k,v in test_id_map.items()])
-
-    def is_up_to_date(self, db):
-        return (not db.modified_test and
-                self.revision == db.get_revision_number("Test"))
-
-    def get_test_names_in_runs(self, db, runs):
-        # Load the distinct test ids for these runs.
-        test_ids = db.session.query(Sample.test_id)\
-            .filter(Sample.run_id.in_(runs)).distinct()
-
-        # Get the test names for the test ids.
-        test_names = [self.test_info_map[id][0]
-                      for id, in test_ids]
-
-        # Limit to the tests we actually report.
-        test_names = list(set(test_names) & set(self.test_names))
-        test_names.sort()
-
-        return test_names
-
-_cache = {}
-def get_simple_suite_summary(db, tag):
-    key = (db.path, tag)
-    entry = _cache.get(key)
-    if entry is None or not entry.is_up_to_date(db):
-        _cache[key] = entry = SimpleSuiteSummary.fromdb(db, tag)
-    return entry
-
-class SimpleSuiteRunSummary(object):
-    _cache = {}
-    @staticmethod
-    def get_summary(db, tag):
-        key = (db.path, tag)
-        entry = SimpleSuiteRunSummary._cache.get(key)
-        if entry is None or not entry.is_up_to_date(db):
-            entry = SimpleSuiteRunSummary.fromdb(db, tag)
-            SimpleSuiteRunSummary._cache[key] = entry
-        return entry
-
-    @staticmethod
-    def fromdb(db, tag):
-        revision = db.get_revision_number("RunInfo")
-
-        # Find all run_orders for runs with this tag, ordered by run time so
-        # that runs are ordered by both (run_order, time) in the final ordering.
-        all_run_orders = db.session.query(RunInfo.value, RunInfo.run_id,
-                                          Run.machine_id).\
-            join(Run).\
-            order_by(Run.start_time.desc()).\
-            filter(RunInfo.key == "run_order").\
-            filter(RunInfo.run_id.in_(
-                db.session.query(RunInfo.run_id).\
-                    filter(RunInfo.key == "tag").\
-                    filter(RunInfo.value == tag).subquery()))
-        all_run_orders = list(all_run_orders)
-
-        order_by_run = dict((run_id,order)
-                            for order,run_id,machine_id in all_run_orders)
-        machine_id_by_run = dict((run_id,machine_id)
-                                 for order,run_id,machine_id in all_run_orders)
-
-        # Create a mapping from run_order to the available runs with that order.
-        runs_by_order = {}
-        for order,run_id,_ in all_run_orders:
-            runs = runs_by_order.get(order)
-            if runs is None:
-                runs = runs_by_order[order] = []
-            runs.append(run_id)
-
-        # Get all available run_orders, in order.
-        def order_key(run_order):
-            return run_order
-        run_orders = runs_by_order.keys()
-        run_orders.sort(key = order_key)
-        run_orders.reverse()
-
-        # Construct the total order of runs.
-        runs_in_order = []
-        for order in run_orders:
-            runs_in_order.extend(runs_by_order[order])
-
-        return SimpleSuiteRunSummary(
-            revision, tag, run_orders, runs_by_order, runs_in_order,
-            order_by_run, machine_id_by_run)
-
-    def __init__(self, revision, tag, run_orders, runs_by_order, runs_in_order,
-                 order_by_run, machine_id_by_run):
-        self.revision = revision
-        self.tag = tag
-        self.run_orders = run_orders
-        self.runs_by_order = runs_by_order
-        self.runs_in_order = runs_in_order
-        self.order_by_run = order_by_run
-        self.machine_id_by_run = machine_id_by_run
-        self.run_status_kinds = {}
-
-    def is_up_to_date(self, db):
-        return (not db.modified_run and
-                self.revision == db.get_revision_number("RunInfo"))
-
-    def contains_run(self, run_id):
-        return run_id in self.machine_id_by_run
-
-    def get_run_order(self, run_id):
-        return self.order_by_run.get(run_id)
-
-    def get_runs_on_machine(self, machine_id):
-        return [k for k,v in self.machine_id_by_run.items()
-                if v == machine_id]
-
-    def get_run_ordered_index(self, run_id):
-        try:
-            return self.runs_in_order.index(run_id)
-        except:
-            print run_id
-            print self.runs_in_order
-            raise
-
-    def get_previous_run_on_machine(self, run_id):
-        machine_id = self.machine_id_by_run[run_id]
-        index = self.get_run_ordered_index(run_id)
-        for i in range(index + 1, len(self.runs_in_order)):
-            id = self.runs_in_order[i]
-            if machine_id == self.machine_id_by_run[id]:
-                return id
-
-    def get_next_run_on_machine(self, run_id):
-        machine_id = self.machine_id_by_run[run_id]
-        index = self.get_run_ordered_index(run_id)
-        for i in range(0, index)[::-1]:
-            id = self.runs_in_order[i]
-            if machine_id == self.machine_id_by_run[id]:
-                return id
-
-    def get_run_status_kind(self, db, run_id):
-        kind = self.run_status_kinds.get(run_id)
-        if kind is None:
-            # Compute the status kind by for .success tests in this run.
-            if db.session.query(Test.name).join(Sample)\
-                    .filter(Sample.run_id == run_id)\
-                    .filter(Test.name.endswith(".success")).first() is not None:
-                kind = False
-            else:
-                kind = True
-        self.run_status_kinds[run_id] = kind
-        return kind

Modified: lnt/trunk/lnt/db/runinfo.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/db/runinfo.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/db/runinfo.py (original)
+++ lnt/trunk/lnt/db/runinfo.py Mon Aug  6 15:02:49 2012
@@ -1,8 +1,3 @@
-from lnt.util import stats
-from lnt.server.ui import util
-from lnt.db.perfdb import Sample
-from lnt.testing import PASS, FAIL, XFAIL
-
 REGRESSED = 'REGRESSED'
 IMPROVED = 'IMPROVED'
 UNCHANGED_PASS = 'UNCHANGED_PASS'
@@ -106,154 +101,3 @@
                 return REGRESSED
         else:
             return UNCHANGED_PASS
-
-class SimpleRunInfo:
-    def __init__(self, db, test_suite_summary):
-        self.db = db
-        self.test_suite_summary = test_suite_summary
-
-        self.sample_map = util.multidict()
-        self.loaded_samples = set()
-
-    def get_test_status_in_run(self, run_id, status_kind, test_name, pset):
-        if status_kind == False: # .success
-            status_name = test_name + '.success'
-            status_test_id = self.test_suite_summary.test_id_map.get(
-                (status_name, pset))
-            run_status = self.sample_map.get((run_id, status_test_id))
-            if run_status and int(run_status[0]) == 1:
-                return PASS
-            else:
-                return FAIL
-        else:
-            status_name = test_name + '.status'
-            status_test_id = self.test_suite_summary.test_id_map.get(
-                (status_name, pset))
-            run_status = self.sample_map.get((run_id, status_test_id))
-            if not run_status:
-                return PASS
-            else:
-                # FIXME: What to do about the multiple entries here. We could
-                # start by just treating non-matching samples as errors.
-                return int(run_status[0])
-
-    def get_run_comparison_result(self, run, run_status_kind,
-                                  compare_to, compare_to_status_kind,
-                                  test_name, pset, comparison_window=[]):
-        # Get the test.
-        test_id = self.test_suite_summary.test_id_map.get((test_name, pset))
-        if test_id is None:
-            return ComparisonResult(run_value=None, prev_value=None, delta=None,
-                                    pct_delta=None, stddev=None, MAD=None,
-                                    cur_failed=None, prev_failed=None,
-                                    samples=[])
-
-        # Load the sample data for the current and previous runs and the
-        # comparison window.
-        if compare_to is None:
-            compare_id = None
-        else:
-            compare_id = compare_to.id
-        runs_to_load = set(comparison_window)
-        runs_to_load.add(run.id)
-        if compare_id is not None:
-            runs_to_load.add(compare_id)
-        self._load_samples_for_runs(runs_to_load)
-
-        # Lookup the current and previous values.
-        run_values = self.sample_map.get((run.id, test_id))
-        prev_values = self.sample_map.get((compare_id, test_id))
-
-        # Determine whether this (test,pset) passed or failed in the current and
-        # previous runs.
-        run_failed = prev_failed = False
-        run_status = prev_status = None
-        run_status = self.get_test_status_in_run(
-            run.id, run_status_kind, test_name, pset)
-        if compare_to:
-            prev_status = self.get_test_status_in_run(
-                compare_to.id, compare_to_status_kind, test_name, pset)
-        else:
-            prev_status = None
-
-        # FIXME: Support XFAILs better.
-        run_failed = run_status == FAIL
-        prev_failed = prev_status == FAIL
-
-        # Get the current and previous values.
-        if run_values:
-            run_value = min(run_values)
-        else:
-            run_value = None
-        if prev_values:
-            prev_value = min(prev_values)
-        else:
-            prev_value = None
-
-        # If we have multiple values for this run, use that to estimate the
-        # distribution.
-        if run_values and len(run_values) > 1:
-            stddev = stats.standard_deviation(run_values)
-            MAD = stats.median_absolute_deviation(run_values)
-            stddev_mean = stats.mean(run_values)
-            stddev_is_estimated = False
-        else:
-            stddev = None
-            MAD = None
-            stddev_mean = None
-            stddev_is_estimated = False
-
-        # If we are missing current or comparison values we are done.
-        if run_value is None or prev_value is None:
-            return ComparisonResult(
-                run_value, prev_value, delta=None,
-                pct_delta = None, stddev = stddev, MAD = MAD,
-                cur_failed = run_failed, prev_failed = prev_failed,
-                samples = run_values)
-
-        # Compute the comparison status for the test value.
-        delta = run_value - prev_value
-        if prev_value != 0:
-            pct_delta = delta / prev_value
-        else:
-            pct_delta = 0.0
-
-        # If we don't have an estimate for the distribution, attempt to "guess"
-        # it using the comparison window.
-        #
-        # FIXME: We can substantially improve the algorithm for guessing the
-        # noise level from a list of values. Probably better to just find a way
-        # to kill this code though.
-        if stddev is None:
-            # Get all previous values in the comparison window, for passing
-            # runs.
-            #
-            # FIXME: This is using the wrong status kind. :/
-            prev_values = [v for run_id in comparison_window
-                           for v in self.sample_map.get((run_id, test_id), ())
-                           if self.get_test_status_in_run(
-                    run_id, run_status_kind, test_name, pset) == PASS]
-            if prev_values:
-                stddev = stats.standard_deviation(prev_values)
-                MAD = stats.median_absolute_deviation(prev_values)
-                stddev_mean = stats.mean(prev_values)
-                stddev_is_estimated = True
-
-        return ComparisonResult(run_value, prev_value, delta,
-                                pct_delta, stddev, MAD,
-                                run_failed, prev_failed, run_values,
-                                stddev_mean, stddev_is_estimated)
-
-    def _load_samples_for_runs(self, runs):
-        # Find the set of new runs to load.
-        to_load = set(runs) - self.loaded_samples
-        if not to_load:
-            return
-
-        q = self.db.session.query(Sample.value, Sample.run_id, Sample.test_id)
-        q = q.filter(Sample.run_id.in_(to_load))
-        for value,run_id,test_id in q:
-            self.sample_map[(run_id,test_id)] = value
-
-        self.loaded_samples |= to_load
-

Modified: lnt/trunk/lnt/lnttool/import_data.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/import_data.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/import_data.py (original)
+++ lnt/trunk/lnt/lnttool/import_data.py Mon Aug  6 15:02:49 2012
@@ -1,9 +1,6 @@
 import os, pprint, sys, time
 
-import lnt.db.perfdb
-from lnt import formats
-import lnt.server.config
-import lnt.server.db.v4db
+import lnt.formats
 import lnt.util.ImportData
 import lnt.server.instance
 
@@ -16,7 +13,7 @@
     parser.add_option("", "--database", dest="database", default="default",
                       help="database to write to [%default]")
     parser.add_option("", "--format", dest="format",
-                      choices=formats.format_names + ['<auto>'],
+                      choices=lnt.formats.format_names + ['<auto>'],
                       default='<auto>')
     parser.add_option("", "--commit", dest="commit", type=int,
                       default=False)

Modified: lnt/trunk/lnt/lnttool/main.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/main.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/main.py (original)
+++ lnt/trunk/lnt/lnttool/main.py Mon Aug  6 15:02:49 2012
@@ -12,7 +12,6 @@
 import lnt
 import lnt.util.ImportData
 from lnt import testing
-from lnt.db import perfdb
 from lnt.testing.util.commands import note, warning, error, fatal
 
 def action_runserver(name, args):
@@ -90,7 +89,6 @@
 from create import action_create
 from convert import action_convert
 from import_data import action_import
-from report import action_report
 from updatedb import action_updatedb
 
 def action_checkformat(name, args):

Removed: lnt/trunk/lnt/lnttool/report.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/report.py?rev=161334&view=auto
==============================================================================
--- lnt/trunk/lnt/lnttool/report.py (original)
+++ lnt/trunk/lnt/lnttool/report.py (removed)
@@ -1,217 +0,0 @@
-from optparse import OptionParser, OptionGroup
-
-from lnt import testing
-from lnt.db import perfdb
-from lnt.db import perfdbsummary, runinfo
-from lnt.db.perfdb import Run, RunInfo, Machine, Sample, Test
-from lnt.testing.util.commands import note, warning, error, fatal
-from lnt.util import stats
-
-def print_table(rows):
-    def format_cell(value):
-        if isinstance(value, str):
-            return value
-        elif isinstance(value, int):
-            return str(value)
-        elif isinstance(value, float):
-            return "%.4f" % value
-        else:
-            return str(value)
-
-    N = len(rows[0])
-    for row in rows:
-        if len(row) != N:
-            raise ValueError,"Invalid table"
-
-        print "\t".join(map(format_cell, row))
-
-def action_report(name, args):
-    """performance reporting tools"""
-
-    parser = OptionParser("""\
-%%prog %s [options] <db>""" % name)
-    parser.add_option("-v", "--verbose", dest="verbose",
-                      help="show verbose test results",
-                      action="store_true", default=False)
-    parser.add_option("", "--run-order", dest="run_order",
-                      help="run order to select on",
-                      type=int, default=None)
-    parser.add_option("", "--arch", dest="arch",
-                      help="arch field to select on",
-                      type=str, default=None)
-    parser.add_option("", "--optflags", dest="optflags",
-                      help="optimization flags field to select on",
-                      type=str, default=None)
-    parser.add_option("", "--machine", dest="machine_name",
-                      help="machine name to select on",
-                      type=str, default=None)
-
-    (opts, args) = parser.parse_args(args)
-    if len(args) != 1:
-        parser.error("incorrect number of argments")
-
-    path, = args
-    db = perfdb.PerfDB('sqlite:///%s' % path)
-    tag = 'nts'
-
-    if opts.run_order is None:
-        parser.error("--run-order is required")
-
-    # First, find all runs with the desired order.
-    q = db.session.query(Run).\
-        join(RunInfo).\
-        order_by(Run.start_time.desc()).\
-        filter(RunInfo.key == "run_order").\
-        filter(RunInfo.value == "% 7d" % opts.run_order)
-    matching_runs = list(q)
-
-    # Try to help user if nothing was found.
-    if not matching_runs:
-        available_orders = set(
-            db.session.query(RunInfo.value).\
-                filter(RunInfo.key == "run_order"))
-        fatal("no runs found matching --run-order %d, available orders: %s" % (
-                opts.run_order, str(sorted(int(x)
-                                           for x, in available_orders))))
-
-    # Match based on the machine name, if given.
-    if opts.machine_name:
-        selected = [r for r in matching_runs
-                    if r.machine.name == opts.machine_name]
-        if not selected:
-            available_names = set(r.machine.name
-                                  for r in matching_runs)
-            fatal(
-                "no runs found matching --machine %s, available names: [%s]" %(
-                    opts.machine_name, ", ".join(sorted(available_names))))
-        matching_runs = selected
-
-    # Match based on the architecture, if given.
-    if opts.arch:
-        selected = [r for r in matching_runs
-                    if 'ARCH' in r.info
-                    if r.info['ARCH'].value == opts.arch]
-        if not selected:
-            available_archs = set(r.info['ARCH'].value
-                                  for r in matching_runs
-                                  if 'ARCH' in r.info)
-            fatal("no runs found matching --arch %s, available archs: [%s]" % (
-                    opts.arch, ", ".join(sorted(available_archs))))
-        matching_runs = selected
-
-    # Match based on the optflags, if given.
-    if opts.optflags:
-        selected = [r for r in matching_runs
-                    if 'OPTFLAGS' in r.info
-                    if r.info['OPTFLAGS'].value == opts.optflags]
-        if not selected:
-            available_flags = set(r.info['OPTFLAGS'].value
-                                  for r in matching_runs
-                                  if 'OPTFLAGS' in r.info)
-            fatal(
-                "no runs found matching --optflags %s, available flags: [%s]" %(
-                    opts.optflags, ", ".join(sorted(available_flags))))
-        matching_runs = selected
-
-    # Inform the user of the final list of selected runs.
-    note("selection arguments resulted in %d runs" % (len(matching_runs),))
-    for run in matching_runs:
-        note("Run: % 5d, Start Time: %s, Machine: %s:%d" % (
-            run.id, run.start_time.strftime('%Y-%m-%d %H:%M:%S'),
-            run.machine.name, run.machine.number))
-
-    # Take only the first matched run, for now. This will be the latest, by the
-    # original ordering clause.
-    note("selecting newest run for reporting...")
-    matching_runs = [matching_runs[0]]
-
-    # Inform the user of the final list of selected runs.
-    note("reporting over %d total runs" % (len(matching_runs),))
-    for run in matching_runs:
-        note("Run: % 5d, Start Time: %s, Machine: %s:%d" % (
-            run.id, run.start_time.strftime('%Y-%m-%d %H:%M:%S'),
-            run.machine.name, run.machine.number))
-
-    # Get the run summary which has run ordering information.
-    run_summary = perfdbsummary.SimpleSuiteRunSummary.get_summary(db, tag)
-
-    # Load the test suite summary.
-    ts_summary = perfdbsummary.get_simple_suite_summary(db, tag)
-
-    # Gather the names of all tests across these runs, for more normalized
-    # reporting.
-    test_names = ts_summary.get_test_names_in_runs(
-        db, [r.id for r in matching_runs])
-    test_components = sorted(set(t.rsplit('.',1)[1] for t in test_names))
-    test_base_names = sorted(set(t.rsplit('.',1)[0] for t in test_names))
-
-    # Load all the data.
-    items = {}
-    for test_component in test_components:
-        for name,value in get_test_passes(db, run_summary, ts_summary,
-                                          test_component, test_base_names,
-                                          matching_runs):
-            items[(test_component, name)] = value
-
-    # Dump the results.
-    print "%s\t%s\t%s\t%s\t%s\t%s\t%s" % (
-        "Test", "Mean Compile Time", "Mean Execution Time",
-        "Std.Dev. Compile Time", "Std.Dev. Execution Time",
-        "Num. Compile Time Samples", "Num. Execution Time Samples")
-    for name in test_base_names:
-        compile_results = items.get(('compile', name), [])
-        exec_results = items.get(('exec', name), [])
-        if compile_results:
-            compile_value = "%.4f" % stats.mean(compile_results)
-            compile_stddev = "%.4f" % stats.standard_deviation(compile_results)
-        else:
-            compile_value = compile_stddev = ""
-        if exec_results:
-            exec_value = "%.4f" % stats.mean(exec_results)
-            exec_stddev = "%.4f" % stats.standard_deviation(exec_results)
-        else:
-            exec_value = exec_stddev = ""
-        print "%s\t%s\t%s\t%s\t%s\t%d\t%d" % (
-            name, compile_value, exec_value,
-            compile_stddev, exec_stddev,
-            len(compile_results), len(exec_results))
-
-def get_test_passes(db, run_summary, ts_summary,
-                    test_component, test_base_names, runs):
-    if not runs:
-        return
-
-    sri = runinfo.SimpleRunInfo(db, ts_summary)
-    sri._load_samples_for_runs([r.id for r in runs])
-
-    run_status_info = [(r, run_summary.get_run_status_kind(db, r.id))
-                       for r in runs]
-
-    pset = ()
-    for test_base_name in test_base_names:
-        test_name = "%s.%s" % (test_base_name, test_component)
-        test_id = ts_summary.test_id_map.get((test_name, pset))
-        if test_id is None:
-            continue
-
-        run_values = sum((sri.sample_map.get((run.id, test_id))
-                          for run in runs
-                          if (run.id, test_id) in sri.sample_map), [])
-        # Ignore tests that weren't reported in some runs (e.g., -disable-cxx).
-        if not run_values:
-            continue
-
-        # Find the test status, treat any non-determinism as a FAIL.
-        run_status = list(set([sri.get_test_status_in_run(
-                r.id, rsk, test_name, pset)
-                      for (r,rsk) in run_status_info]))
-        if len(run_status) == 1:
-            status_kind = run_status[0]
-        else:
-            status_kind = testing.FAIL
-
-        # Ignore failing methods.
-        if status_kind == testing.FAIL:
-            continue
-
-        yield (test_base_name, run_values)

Modified: lnt/trunk/lnt/server/config.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/config.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/server/config.py (original)
+++ lnt/trunk/lnt/server/config.py Mon Aug  6 15:02:49 2012
@@ -5,7 +5,6 @@
 import os
 import re
 
-import lnt.db.perfdb
 import lnt.server.db.v4db
 
 class EmailConfig:

Modified: lnt/trunk/lnt/server/ui/app.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/app.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/app.py (original)
+++ lnt/trunk/lnt/server/ui/app.py Mon Aug  6 15:02:49 2012
@@ -16,9 +16,6 @@
 import lnt.server.ui.globals
 import lnt.server.ui.views
 
-from lnt.db import perfdbsummary
-from lnt.db import perfdb
-
 class RootSlashPatchMiddleware(object):
     def __init__(self, app):
         self.app = app
@@ -127,7 +124,6 @@
 
         self.jinja_env.globals.update(
             app=current_app,
-            perfdb=perfdb,
             old_config=self.old_config)
 
         lnt.server.ui.globals.register(self)

Removed: lnt/trunk/lnt/server/ui/graphutil.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/graphutil.py?rev=161334&view=auto
==============================================================================
--- lnt/trunk/lnt/server/ui/graphutil.py (original)
+++ lnt/trunk/lnt/server/ui/graphutil.py (removed)
@@ -1,131 +0,0 @@
-"""
-Helper functions for graphing test results.
-"""
-
-from lnt.server.ui import util
-from lnt.util import stats
-from lnt.external.stats import stats as ext_stats
-
-from lnt.db.perfdb import Machine, Run, RunInfo, Sample, Test
-
-def get_test_plots(db, machine, test_ids, run_summary, ts_summary,
-                   show_mad_error = False, show_points = False,
-                   show_all_points = False, show_stddev = False,
-                   show_linear_regression = False):
-    # Load all the samples for these tests and this machine.
-    q = db.session.query(Sample.run_id,Sample.test_id,
-                         Sample.value).join(Run)
-    q = q.filter(Run.machine_id == machine.id)
-    q = q.filter(Sample.test_id.in_(test_ids))
-    samples = list(q)
-
-    # Aggregate by test id and then run key.
-    #
-    # FIXME: Pretty expensive.
-    samples_by_test_id = {}
-    for run_id,test_id,value in samples:
-        d = samples_by_test_id.get(test_id)
-        if d is None:
-            d = samples_by_test_id[test_id] = util.multidict()
-        run_key = run_summary.get_run_order(run_id)
-        if run_key is None:
-            continue
-
-        # FIXME: What to do on failure?
-        run_key = int(run_key)
-        d[run_key] = value
-
-    # Build the graph data
-    pset_id_map = dict([(pset,i)
-                        for i,pset in enumerate(ts_summary.parameter_sets)])
-    num_plots = len(test_ids)
-    for index,test_id in enumerate(test_ids):
-        test = db.getTest(test_id)
-        pset = test.get_parameter_set()
-        name = test.name
-
-        # Get the plot for this test.
-        #
-        # FIXME: Support order by something other than time.
-        errorbar_data = []
-        points_data = []
-        data = []
-        points = []
-        for x,values in samples_by_test_id.get(test_id,{}).items():
-            min_value = min(values)
-            data.append((x, min_value))
-            if show_points:
-                if show_all_points:
-                    for v in values:
-                        points_data.append((x, v))
-                else:
-                    points_data.append((x, min_value))
-            if show_stddev:
-                mean = stats.mean(values)
-                sigma = stats.standard_deviation(values)
-                errorbar_data.append((x, mean - sigma, mean + sigma))
-            if show_mad_error:
-                med = stats.median(values)
-                mad = stats.median_absolute_deviation(values, med)
-                errorbar_data.append((x, med - mad, med + mad))
-                points.append((x, min_value, mad, med))
-        data.sort()
-        points.sort()
-
-        plot_js = ""
-
-        # Determine the base plot color.
-        col = list(util.makeDarkColor(float(index) / num_plots))
-
-        # Add regression line, if requested.
-        if show_linear_regression:
-            xs = [t for t,v in data]
-            ys = [v for t,v in data]
-
-            # We compute the regression line in terms of a normalized X scale.
-            x_min, x_max = min(xs), max(xs)
-            try:
-                norm_xs = [(x - x_min) / (x_max - x_min)
-                           for x in xs]
-            except ZeroDivisionError:
-                norm_xs = xs
-
-            try:
-                info = ext_stats.linregress(norm_xs, ys)
-            except ZeroDivisionError:
-                info = None
-            except ValueError:
-                info = None
-
-            if info is not None:
-                slope, intercept,_,_,_ = info
-
-                reglin_col = [c*.5 for c in col]
-                pts = ','.join('[%.4f,%.4f]' % pt
-                               for pt in [(x_min, 0.0 * slope + intercept),
-                                          (x_max, 1.0 * slope + intercept)])
-                style = "new Graph2D_LinePlotStyle(4, %r)" % ([.7, .7, .7],)
-                plot_js += "    graph.addPlot([%s], %s);\n" % (pts,style)
-                style = "new Graph2D_LinePlotStyle(2, %r)" % (reglin_col,)
-                plot_js += "    graph.addPlot([%s], %s);\n" % (pts,style)
-
-        pts = ','.join(['[%.4f,%.4f]' % (t,v)
-                        for t,v in data])
-        style = "new Graph2D_LinePlotStyle(1, %r)" % col
-        plot_js += "    graph.addPlot([%s], %s);\n" % (pts,style)
-
-        if points_data:
-            pts_col = (0,0,0)
-            pts = ','.join(['[%.4f,%.4f]' % (t,v)
-                            for t,v in points_data])
-            style = "new Graph2D_PointPlotStyle(1, %r)" % (pts_col,)
-            plot_js += "    graph.addPlot([%s], %s);\n" % (pts,style)
-
-        if errorbar_data:
-            bar_col = [c*.7 for c in col]
-            pts = ','.join(['[%.4f,%.4f,%.4f]' % (x,y_min,y_max)
-                            for x,y_min,y_max in errorbar_data])
-            style = "new Graph2D_ErrorBarPlotStyle(1, %r)" % (bar_col,)
-            plot_js += "    graph.addPlot([%s], %s);\n" % (pts,style)
-
-        yield (test_id, plot_js, col, data, points)

Modified: lnt/trunk/lnt/server/ui/views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/views.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/views.py (original)
+++ lnt/trunk/lnt/server/ui/views.py Mon Aug  6 15:02:49 2012
@@ -17,7 +17,6 @@
 import lnt.util
 import lnt.util.ImportData
 import lnt.util.stats
-from lnt.db import perfdb
 from lnt.server.ui.globals import db_url_for, v4_url_for
 import lnt.server.reporting.analysis
 from lnt.db import runinfo

Modified: lnt/trunk/lnt/util/ImportData.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/ImportData.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/util/ImportData.py (original)
+++ lnt/trunk/lnt/util/ImportData.py Mon Aug  6 15:02:49 2012
@@ -1,9 +1,8 @@
 import os, re, time
 
-import lnt.db.perfdb
 import lnt.testing
-from lnt import formats
-from lnt.db import runinfo
+import lnt.formats
+import lnt.db.runinfo
 from lnt.util import NTEmailReport
 
 def import_and_report(config, db_name, db, file, format, commit=False,
@@ -37,7 +36,7 @@
 
     startTime = time.time()
     try:
-        data = formats.read_any(file, format)
+        data = lnt.formats.read_any(file, format)
     except KeyboardInterrupt:
         raise
     except:
@@ -223,20 +222,20 @@
             #
             # FIXME: Think longer about mapping to test codes.
             result_info = None
-            if test_status == runinfo.REGRESSED:
+            if test_status == lnt.db.runinfo.REGRESSED:
                 result_string = 'FAIL'
-            elif test_status == runinfo.IMPROVED:
+            elif test_status == lnt.db.runinfo.IMPROVED:
                 result_string = 'IMPROVED'
                 result_info = "Test started passing."
-            elif test_status == runinfo.UNCHANGED_FAIL:
+            elif test_status == lnt.db.runinfo.UNCHANGED_FAIL:
                 result_string = 'XFAIL'
             elif perf_status == None:
                 # Missing perf status means test was just added or removed.
                 result_string = 'PASS'
-            elif perf_status == runinfo.REGRESSED:
+            elif perf_status == lnt.db.runinfo.REGRESSED:
                 result_string = 'REGRESSED'
                 result_info = 'Performance regressed.'
-            elif perf_status == runinfo.IMPROVED:
+            elif perf_status == lnt.db.runinfo.IMPROVED:
                 result_string = 'IMPROVED'
                 result_info = 'Performance improved.'
             else:

Modified: lnt/trunk/lnt/util/NTEmailReport.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/NTEmailReport.py?rev=161335&r1=161334&r2=161335&view=diff
==============================================================================
--- lnt/trunk/lnt/util/NTEmailReport.py (original)
+++ lnt/trunk/lnt/util/NTEmailReport.py Mon Aug  6 15:02:49 2012
@@ -4,17 +4,9 @@
 import urllib
 
 import StringIO
-from lnt.db import runinfo
-from lnt.db import perfdbsummary
-from lnt.server.ui import graphutil
-from lnt.server.ui import util
-from lnt.db import perfdb
-from lnt.util.NTUtil import *
 import lnt.server.db.v4db
 import lnt.server.reporting.runs
 
-from lnt.db.perfdb import Run, Sample
-
 def emailReport(result, db, run, baseurl, email_config, to, was_added=True,
                 will_commit=True):
     import email.mime.multipart
@@ -51,575 +43,12 @@
     s.sendmail(email_config.from_address, [to], msg.as_string())
     s.quit()
 
-def findPreceedingRun(query, run):
-    """findPreceedingRun - Find the most recent run in query which
-    preceeds run."""
-    best = None
-    for r in query:
-        # Restrict to nightlytest runs.
-        if 'tag' in r.info and r.info['tag'].value != 'nightlytest':
-            continue
-
-        # Select most recent run prior to the one we are reporting on.
-        if (r.start_time < run.start_time and
-            (best is None or r.start_time > best.start_time)):
-            best = r
-    return best
-
-def getSimpleReport(result, db, run, baseurl, was_added, will_commit,
-                    only_html_body = False, show_graphs = False,
-                    num_comparison_runs = 10):
-    machine = run.machine
-    tag = run.info['tag'].value
-
-    # Get the run summary.
-    run_summary = perfdbsummary.SimpleSuiteRunSummary.get_summary(db, tag)
-
-    # Ignore run's which don't appear in the summary, for whatever reason.
-    if not run_summary.contains_run(run.id):
-        return ("No report for run", "No report for run", None)
-
-    # Load the test suite summary.
-    ts_summary = perfdbsummary.get_simple_suite_summary(db, tag)
-
-    # Get the run pass/fail information.
-    sri = runinfo.SimpleRunInfo(db, ts_summary)
-
-    # Gather the runs to use for statistical data.
-    cur_id = run.id
-    comparison_window = []
-    for i in range(num_comparison_runs):
-        cur_id = run_summary.get_previous_run_on_machine(cur_id)
-        if not cur_id:
-            break
-        comparison_window.append(cur_id)
-
-    # Find previous run to compare to.
-    id = run_summary.get_previous_run_on_machine(run.id)
-    if id is not None:
-        compare_to = db.getRun(id)
-    else:
-        # FIXME: Look for run across machine.
-        compare_to = None
-
-    # Get the test status style used in each run.
-    run_status_kind = run_summary.get_run_status_kind(db, run.id)
-    if compare_to:
-        compare_to_status_kind = run_summary.get_run_status_kind(
-            db, compare_to.id)
-    else:
-        compare_to_status_kind = None
-
-    # Get the list of tests we are interested in.
-    interesting_runs = [run.id]
-    if compare_to:
-        interesting_runs.append(compare_to.id)
-    test_names = ts_summary.get_test_names_in_runs(db, interesting_runs)
-
-    # Gather the changes to report, mapped by parameter set.
-    new_failures = util.multidict()
-    new_passes = util.multidict()
-    perf_regressions = util.multidict()
-    perf_improvements = util.multidict()
-    added_tests = util.multidict()
-    removed_tests = util.multidict()
-    existing_failures = util.multidict()
-    unchanged_tests = util.multidict()
-    num_total_tests = len(test_names) * len(ts_summary.parameter_sets)
-    for name in test_names:
-        for pset in ts_summary.parameter_sets:
-            cr = sri.get_run_comparison_result(
-                run, run_status_kind, compare_to, compare_to_status_kind,
-                name, pset, comparison_window)
-            test_status = cr.get_test_status()
-            perf_status = cr.get_value_status()
-            if test_status == runinfo.REGRESSED:
-                new_failures[pset] = (name, cr)
-            elif test_status == runinfo.IMPROVED:
-                new_passes[pset] = (name, cr)
-            elif cr.current is None and cr.previous is not None:
-                removed_tests[pset] = (name, cr)
-            elif cr.current is not None and cr.previous is None:
-                added_tests[pset] = (name, cr)
-            elif test_status == runinfo.UNCHANGED_FAIL:
-                existing_failures[pset] = (name, cr)
-            elif perf_status == runinfo.REGRESSED:
-                perf_regressions[pset] = (name, cr)
-            elif perf_status == runinfo.IMPROVED:
-                perf_improvements[pset] = (name, cr)
-            else:
-                unchanged_tests[pset] = (name, cr)
-
-    # Collect the simplified results, if desired, for sending back to clients.
-    if result is not None:
-        test_results = result['test_results'] = []
-        for pset in ts_summary.parameter_sets:
-            pset_results = []
-            for name in test_names:
-                cr = sri.get_run_comparison_result(
-                    run, run_status_kind, compare_to, compare_to_status_kind,
-                    name, pset, comparison_window)
-                test_status = cr.get_test_status()
-                perf_status = cr.get_value_status()
-                # FIXME: Include additional information about performance
-                # changes.
-                pset_results.append( (name, test_status, perf_status) )
-            test_results.append({ 'pset' : pset, 'results' : pset_results })
-
-    # Generate the report.
-    report = StringIO.StringIO()
-    html_report = StringIO.StringIO()
-
-    machine = run.machine
-    subject = """%s nightly tester results""" % machine.name
-
-
-    # Generate the report header.
-    if baseurl[-1] == '/':
-        baseurl = baseurl[:-1]
-
-    report_url = """%s/simple/%s/%d/""" % (baseurl, tag, run.id)
-    print >>report, report_url
-    print >>report, """Nickname: %s:%d""" % (machine.name, machine.number)
-    if 'name' in machine.info:
-        print >>report, """Name: %s""" % (machine.info['name'].value,)
-    print >>report, """Comparing:"""
-    print >>report, """  Run: %d, Order: %s, Start Time: %s, End Time: %s""" % (
-        run.id, run.info['run_order'].value, run.start_time, run.end_time)
-    if compare_to:
-        print >>report, ("""   To: %d, Order: %s, """
-                         """Start Time: %s, End Time: %s""") % (
-            compare_to.id, compare_to.info['run_order'].value,
-            compare_to.start_time, compare_to.end_time)
-        if run.machine != compare_to.machine:
-            print >>report, """*** WARNING ***:""",
-            print >>report, """comparison is against a different machine""",
-            print >>report, """(%s:%d)""" % (compare_to.machine.name,
-                                             compare_to.machine.number)
-    else:
-        print >>report, """   To: (none)"""
-    print >>report
-
-    # Generate the HTML report header.
-    print >>html_report, """\
-<h1>%s</h1>
-<table>""" % subject
-    print >>html_report, """\
-<tr><td>URL</td><td><a href="%s">%s</a></td></tr>""" % (report_url, report_url)
-    print >>html_report, "<tr><td>Nickname</td><td>%s:%d</td></tr>" % (
-        machine.name, machine.number)
-    if 'name' in machine.info:
-        print >>html_report, """<tr><td>Name</td<td>%s</td></tr>""" % (
-            machine.info['name'].value,)
-    print >>html_report, """</table>"""
-    print >>html_report, """\
-<p>
-<table>
-  <tr>
-    <th>Run</th>
-    <th>ID</th>
-    <th>Order</th>
-    <th>Start Time</th>
-    <th>End Time</th>
-  </tr>"""
-    print >>html_report, """\
-<tr><td>Current</td><td>%d</td><td>%s</td><td>%s</td><td>%s</td></tr>""" % (
-        run.id, run.info['run_order'].value, run.start_time, run.end_time)
-    if compare_to:
-        print >>html_report, """\
-<tr><td>Previous</td><td>%d</td><td>%s</td><td>%s</td><td>%s</td></tr>""" % (
-            compare_to.id, compare_to.info['run_order'].value,
-            compare_to.start_time, compare_to.end_time)
-    else:
-        print >>html_report, """<tr><td colspan=4>No Previous Run</td></tr>"""
-    print >>html_report, """</table>"""
-    if compare_to and run.machine != compare_to.machine:
-        print >>html_report, """<p><b>*** WARNING ***:""",
-        print >>html_report, """comparison is against a different machine""",
-        print >>html_report, """(%s:%d)</b></p>""" % (compare_to.machine.name,
-                                                      compare_to.machine.number)
-
-    # Generate the summary of the changes.
-    items_info = (('New Failures', new_failures, False),
-                  ('New Passes', new_passes, False),
-                  ('Performance Regressions', perf_regressions, True),
-                  ('Performance Improvements', perf_improvements, True),
-                  ('Removed Tests', removed_tests, False),
-                  ('Added Tests', added_tests, False),
-                  ('Existing Failures', existing_failures, False),
-                  ('Unchanged Tests', unchanged_tests, False))
-    total_changes = sum([sum(map(len, items.values()))
-                         for name,items,_ in items_info
-                         if name != 'Unchanged Tests'])
-    graphs = []
-    print >>report, """==============="""
-    print >>report, """Tests Summary"""
-    print >>report, """==============="""
-    print >>report
-    print >>html_report, """
-<hr>
-<h3>Tests Summary</h3>
-<table>
-<thead><tr><th>Status Group</th><th align="right">#</th></tr></thead>
-"""
-    for name,items,_ in items_info:
-        if items:
-            num_items = sum(map(len, items.values()))
-            print >>report, '%s: %d' % (name, num_items)
-            print >>html_report, """
-<tr><td>%s</td><td align="right">%d</td></tr>""" % (name, num_items)
-    print >>report, """Total Tests: %d""" % num_total_tests
-    print >>report
-    print >>html_report, """
-<tfoot>
-  <tr><td><b>Total Tests</b></td><td align="right"><b>%d</b></td></tr>
-</tfoot>
-</table>
-""" % num_total_tests
-
-    if total_changes:
-        print >>report, """=============="""
-        print >>report, """Changes Detail"""
-        print >>report, """=============="""
-        print >>html_report, """
-<p>
-<h3>Changes Detail</h3>"""
-        for test_name,items,show_perf in items_info:
-            if not items or test_name == 'Unchanged Tests':
-                continue
-
-            show_pset = items.items()[0][0] or len(items) > 1
-            pset_names = dict(
-                (pset, 'pset.%d' % i)
-                for i,pset in enumerate(ts_summary.parameter_sets))
-            print >>report
-            print >>report, test_name
-            print >>report, '-' * len(test_name)
-            for pset,tests in items.items():
-                if show_perf:
-                    tests.sort(key = lambda (_,cr): -abs(cr.pct_delta))
-
-                # Group tests by final component.
-                def get_last_component(t):
-                    name = t[0]
-                    if '.' in name:
-                        return name.rsplit('.', 1)[1]
-                    return ''
-                grouped = util.multidict(
-                    (get_last_component(t), t)
-                    for t in tests)
-
-                for group,grouped_tests in util.sorted(grouped.items()):
-                    group_name = {
-                        "" : "(ungrouped)",
-                        "exec" : "Execution",
-                        "compile" : "Compile" }.get(group, group)
-                    if show_pset:
-                        table_name = "%s - %s" % (test_name, pset)
-                    else:
-                        table_name = test_name
-                    print >>report, "%s - %s" % (table_name, group_name)
-                    print >>html_report, """
-    <p>
-    <table class="sortable">
-    <tr><th>%s - %s </th>""" % (table_name, group_name)
-                    if show_perf:
-                        print >>html_report, """
-    <th>Δ</th><th>Previous</th><th>Current</th> <th>σ</th>"""
-                    print >>html_report, """</tr>"""
-                    for i,(name,cr) in enumerate(grouped_tests):
-                        if show_perf:
-                            if cr.stddev is not None:
-                                print >>report, (
-                                    '  %s: %.2f%%'
-                                    '(%.4f => %.4f, std. dev.: %.4f)') % (
-                                    name, 100. * cr.pct_delta,
-                                    cr.previous, cr.current, cr.stddev)
-                            else:
-                                print >>report, (
-                                    '  %s: %.2f%%'
-                                    '(%.4f => %.4f)') % (
-                                    name, 100. * cr.pct_delta,
-                                    cr.previous, cr.current)
-
-                            # Show inline charts for top 10 changes.
-                            if show_graphs and i < 10:
-                                graph_name = "graph.%d" % len(graphs)
-                                graphs.append( (graph_name,name,pset) )
-                                extra_cell_value = """
-    <br><canvas id="%s" width="400" height="100"></canvas/>
-    """ % (graph_name)
-                            else:
-                                extra_cell_value = ""
-
-                            # Link the regression to the chart of its
-                            # performance.
-                            pset_name = pset_names[pset]
-                            form_data = urllib.urlencode([(pset_name, 'on'),
-                                                          ('test.'+name, 'on')])
-                            linked_name = '<a href="%s?%s">%s</a>' % (
-                                os.path.join(report_url, "graph"),
-                                form_data, name)
-
-                            pct_value = util.PctCell(cr.pct_delta).render()
-                            if cr.stddev is not None:
-                                print >>html_report, """
-    <tr><td>%s%s</td>%s<td>%.4f</td><td>%.4f</td><td>%.4f</td></tr>""" %(
-                                    linked_name, extra_cell_value, pct_value,
-                                    cr.previous, cr.current, cr.stddev)
-                            else:
-                                print >>html_report, """
-    <tr><td>%s%s</td>%s<td>%.4f</td><td>%.4f</td><td>-</td></tr>""" %(
-                                    name, extra_cell_value, pct_value,
-                                    cr.previous, cr.current)
-                        else:
-                            print >>report, '  %s' % (name,)
-                            print >>html_report, """
-    <tr><td>%s</td></tr>""" % (name,)
-                    print >>html_report, """
-    </table>"""
-
-    # Finish up the HTML report.
-    if graphs:
-        # Get the test ids we want data for.
-        test_ids = [ts_summary.test_id_map[(name,pset)]
-                     for _,name,pset in graphs]
-
-        plots_iter = graphutil.get_test_plots(db, machine, test_ids,
-                                              run_summary, ts_summary,
-                                              show_mad_error = True,
-                                              show_points = True)
-
-        print >>html_report, """
-<script type="text/javascript">
-function init_report() {"""
-        for (graph_item, plot_info) in zip(graphs, plots_iter):
-            graph_name = graph_item[0]
-            plot_js = plot_info[1]
-            print >>html_report, """
-        graph = new Graph2D("%s");
-        graph.clearColor = [1, 1, 1];
-        %s
-        graph.draw();
-""" % (graph_name, plot_js)
-        print >>html_report, """
-}
-</script>"""
-
-    html_report = html_report.getvalue()
-    if not only_html_body:
-        # We embed the additional resources, so that the message is self
-        # contained.
-        static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
-                                   "server", "ui", "static")
-        style_css = open(os.path.join(static_path,
-                                      "style.css")).read()
-        header = """
-    <style type="text/css">
-%s
-    </style>""" % style_css
-        if graphs:
-            view2d_js = open(os.path.join(static_path,
-                                          "View2D.js")).read()
-            header += """
-    <script type="text/javascript">
-%(view2d_js)s
-    </script>""" % locals()
-
-        html_report = """
-<html>
-  <head>
-%(header)s
-    <title>%(subject)s</title>
-  </head>
-  <body onload="init_report()">
-%(html_report)s
-  </body>
-</html>""" % locals()
-
-    return subject, report.getvalue(), html_report
-
 def getReport(result, db, run, baseurl, was_added, will_commit,
               only_html_body = False, compare_to = None):
+    assert isinstance(db, lnt.server.db.v4db.V4DB)
     report = StringIO.StringIO()
 
-    # We haven't implemented V4DB support yet in reports.
-    if isinstance(db, lnt.server.db.v4db.V4DB):
-        reports = lnt.server.reporting.runs.generate_run_report(
-            run, baseurl=baseurl, only_html_body=only_html_body,
-            result=result, compare_to=compare_to)
-        return reports[:3]
-
-    # Use a simple report unless the tag indicates this is an old style nightly
-    # test run.
-    if 'tag' in run.info and run.info['tag'].value != 'nightlytest':
-        return getSimpleReport(result, db, run, baseurl, was_added, will_commit,
-                               only_html_body)
-
-    machine = run.machine
-    compareTo = None
-
-    # Find comparison run.
-    # FIXME: Share this code with similar stuff in the server UI.
-    # FIXME: Scalability.
-    compareCrossesMachine = False
-    compareTo = findPreceedingRun(db.runs(machine=machine), run)
-
-    # If we didn't find a comparison run against this machine, look
-    # for a comparison run against the same machine name, and warn the
-    # user we are crosses machines.
-    if compareTo is None:
-        compareCrossesMachine = True
-        q = db.session.query(perfdb.Run).join(perfdb.Machine)
-        q = q.filter_by(name=machine.name)
-        compareTo = findPreceedingRun(q, run)
-
-    summary = RunSummary()
-    summary.addRun(db, run)
-    if compareTo:
-        summary.addRun(db, compareTo)
-
-    def getTestValue(run, testname, keyname):
-        fullname = 'nightlytest.' + testname + '.' + keyname
-        t = summary.testMap.get(str(fullname))
-        if t is None:
-            return None
-        samples = summary.getRunSamples(run).get(t.id)
-        if not samples:
-            return None
-        return samples[0]
-    def getTestSuccess(run, testname, keyname):
-        res = getTestValue(run, testname, keyname + '.success')
-        if res is None:
-            return res
-        return not not res
-
-    newPasses = util.multidict()
-    newFailures = util.multidict()
-    addedTests = util.multidict()
-    removedTests = util.multidict()
-    allTests = set()
-    allFailures = set()
-    allFailuresByKey = util.multidict()
-    for keyname,title in kTSKeys.items():
-        for testname in summary.testNames:
-            curResult = getTestSuccess(run, testname, keyname)
-            prevResult = getTestSuccess(compareTo, testname, keyname)
-
-            if curResult is not None:
-                allTests.add((testname,keyname))
-                if curResult is False:
-                    allFailures.add((testname,keyname))
-                    allFailuresByKey[title] = testname
-
-            # Count as new pass if it passed, and previous result was failure.
-            if curResult and prevResult == False:
-                newPasses[testname] = title
-
-            # Count as new failure if it failed, and previous result was not
-            # failure.
-            if curResult == False and prevResult != False:
-                newFailures[testname] = title
-
-            if curResult is not None and prevResult is None:
-                addedTests[testname] = title
-            if curResult is None and prevResult is not None:
-                removedTests[testname] = title
-
-    changes = util.multidict()
-    for i,(name,key) in enumerate(kComparisonKinds):
-        if not key:
-            # FIXME: File Size
-            continue
-
-        for testname in summary.testNames:
-            curValue = getTestValue(run, testname, key)
-            prevValue = getTestValue(compareTo, testname, key)
-
-            # Skip missing tests.
-            if curValue is None or prevValue is None:
-                continue
-
-            pct = util.safediv(curValue, prevValue)
-            if pct is None:
-                continue
-            pctDelta = pct - 1.
-            if abs(pctDelta) < .05:
-                continue
-            if min(prevValue, curValue) <= .2:
-                continue
-
-            changes[name] = (testname, curValue, prevValue, pctDelta)
-
-    if will_commit:
-        if not was_added:
-            print >>report, ("*** NOTE ***: This was a duplicate submission, "
-                             "and did not modify the database.\n")
-    else:
-        if was_added:
-            print >>report, ("*** NOTE ***: This is a test submission, "
-                             "it will not be committed to the database.\n")
-        else:
-            print >>report, ("*** NOTE ***: This is a test submission, "
-                             "and was a duplicate of an existing run.\n")
-
-    if baseurl[-1] == '/':
-        baseurl = baseurl[:-1]
-    print >>report, """%s/nightlytest/%d/""" % (baseurl, run.id)
-    print >>report, """Nickname: %s:%d""" % (machine.name, machine.number)
-    if 'name' in machine.info:
-        print >>report, """Name: %s""" % (machine.info['name'].value,)
-    print >>report
-    print >>report, """Run: %d, Start Time: %s, End Time: %s""" % (
-        run.id, run.start_time, run.end_time)
-    if compareTo:
-        print >>report, """Comparing To: %d, Start Time: %s, End Time: %s""" % (
-            compareTo.id, compareTo.start_time, compareTo.end_time)
-        if compareCrossesMachine:
-            print >>report, """*** WARNING ***:""",
-            print >>report, """comparison is against a different machine""",
-            print >>report, """(%s:%d)""" % (compareTo.machine.name,
-                                             compareTo.machine.number)
-    else:
-        print >>report, """Comparing To: (none)"""
-    print >>report
-
-    print >>report, """--- Changes Summary ---"""
-    for title,elts in (('New Test Passes', newPasses),
-                       ('New Test Failures', newFailures),
-                       ('Added Tests', addedTests),
-                       ('Removed Tests', removedTests)):
-        print >>report, """%s: %d""" % (title,
-                                        sum([len(values)
-                                             for key,values in elts.items()]))
-    numSignificantChanges = sum([len(changelist)
-                                 for name,changelist in changes.items()])
-    print >>report, """Significant Changes: %d""" % (numSignificantChanges,)
-    print >>report
-    print >>report, """--- Tests Summary ---"""
-    print >>report, """Total Tests: %d""" % (len(allTests),)
-    print >>report, """Total Test Failures: %d""" % (len(allFailures),)
-    print >>report
-    print >>report, """Total Test Failures By Type:"""
-    for name,items in util.sorted(allFailuresByKey.items()):
-        print >>report, """  %s: %d""" % (name, len(set(items)))
-
-    print >>report
-    print >>report, """--- Changes Detail ---"""
-    for title,elts in (('New Test Passes', newPasses),
-                       ('New Test Failures', newFailures),
-                       ('Added Tests', addedTests),
-                       ('Removed Tests', removedTests)):
-        print >>report, """%s:""" % (title,)
-        print >>report, "".join("%s [%s]\n" % (key, ", ".join(values))
-                                for key,values in util.sorted(elts.items()))
-    print >>report, """Significant Changes in Test Results:"""
-    for name,changelist in changes.items():
-        print >>report, """%s:""" % name
-        for name,curValue,prevValue,delta in util.sorted(changelist):
-            print >>report, """ %s: %.2f%% (%.4f => %.4f)""" % (
-                name, delta*100, prevValue, curValue)
-
-    # FIXME: Where is the old mailer getting the arch from?
-    subject = """%s nightly tester results""" % machine.name
-    return subject, report.getvalue(), None
+    reports = lnt.server.reporting.runs.generate_run_report(
+        run, baseurl=baseurl, only_html_body=only_html_body,
+        result=result, compare_to=compare_to)
+    return reports[:3]

Removed: lnt/trunk/lnt/util/NTUtil.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/NTUtil.py?rev=161334&view=auto
==============================================================================
--- lnt/trunk/lnt/util/NTUtil.py (original)
+++ lnt/trunk/lnt/util/NTUtil.py (removed)
@@ -1,113 +0,0 @@
-from lnt.server.ui import util
-from lnt.db.perfdb import Run, Sample, Test
-
-kPrefix = 'nightlytest'
-
-# FIXME: We shouldn't need this.
-kSentinelKeyName = 'bc.compile.success'
-
-kComparisonKinds = [('File Size',None),
-                    ('CBE','cbe.exec.time'),
-                    ('LLC','llc.exec.time'),
-                    ('JIT','jit.exec.time'),
-                    ('GCCAS','gcc.compile.time'),
-                    ('Bitcode','bc.compile.size'),
-                    ('LLC compile','llc.compile.time'),
-                    ('LLC-BETA compile','llc-beta.compile.time'),
-                    ('JIT codegen','jit.compile.time'),
-                    ('LLC-BETA','llc-beta.exec.time')]
-
-kTSKeys = { 'gcc.compile' : 'GCCAS',
-            'bc.compile' : 'Bitcode',
-            'llc.compile' : 'LLC compile',
-            'llc-beta.compile' : 'LLC_BETA compile',
-            'jit.compile' : 'JIT codegen',
-            'cbe.exec' : 'CBE',
-            'llc.exec' : 'LLC',
-            'llc-beta.exec' : 'LLC-BETA',
-            'jit.exec' : 'JIT' }
-
-# This isn't very fast, compute a summary if querying the same run
-# repeatedly.
-def getTestValueInRun(db, r, t, default=None, coerce=None):
-    for value, in db.session.query(Sample.value).\
-            filter(Sample.test == t).\
-            filter(Sample.run == r):
-        if coerce is not None:
-            return coerce(value)
-        return value
-    return default
-
-def getTestNameValueInRun(db, r, testname, default=None, coerce=None):
-    for value, in db.session.query(Sample.value).join(Test).\
-            filter(Test.name == testname).\
-            filter(Sample.run == r):
-        if coerce is not None:
-            return coerce(value)
-        return value
-    return default
-
-class RunSummary:
-    def __init__(self):
-        # The union of test names seen.
-        self.testNames = set()
-        # Map of test ids to test instances.
-        self.testIds = {}
-        # Map of test names to test instances
-        self.testMap = {}
-        # Map of run to multimap of test ID to sample list.
-        self.runSamples = {}
-
-        # FIXME: Should we worry about the info parameters on a
-        # nightlytest test?
-
-    def testMatchesPredicates(self, db, t, testPredicate, infoPredicates):
-        if testPredicate:
-            if not testPredicate(t):
-                return False
-        if infoPredicates:
-            info = dict((i.key,i.value) for i in t.info.values())
-            for key,predicate in infoPredicates:
-                value = info.get(key)
-                if not predicate(t, key, value):
-                    return False
-        return True
-
-    def addRun(self, db, run, testPredicate=None, infoPredicates=None):
-        sampleMap = self.runSamples.get(run.id)
-        if sampleMap is None:
-            sampleMap = self.runSamples[run.id] = util.multidict()
-
-        q = db.session.query(Sample.value,Test).join(Test)
-        q = q.filter(Sample.run == run)
-        for s_value,t in q:
-            if not self.testMatchesPredicates(db, t, testPredicate, infoPredicates):
-                continue
-
-            sampleMap[t.id] = s_value
-            self.testMap[t.name] = t
-            self.testIds[t.id] = t
-
-            # Filter out summary things in name lists by only looking
-            # for things which have a .success entry.
-            if t.name.endswith('.success'):
-                self.testNames.add(t.name.split('.', 3)[1])
-
-    def getRunSamples(self, run):
-        if run is None:
-            return {}
-        return self.runSamples.get(run.id, {})
-
-    def getTestValueByName(self, run, testName, default, coerce=None):
-        t = self.testMap.get(testName)
-        if t is None:
-            return default
-        sampleMap = self.runSamples.get(run.id, {})
-        samples = sampleMap.get(t.id)
-        if sampleMap is None or samples is None:
-            return default
-        # FIXME: Multiple samples?
-        if coerce:
-            return coerce(samples[0].value)
-        else:
-            return samples[0].value





More information about the llvm-commits mailing list