Skip to content

Commit

Permalink
[Telemetry] Pass test_runner environment in local args instead of a g…
Browse files Browse the repository at this point in the history
…lobal variable

Also adds more fields to environment to narrow the scope of
benchmark and user story set discovery. This should avoid problems
with adding Python files to unrelated directories, and hides PageTests
from external Telemetry benchmark runners like run_gpu_tests.py and
chrome_proxy's run_benchmark.

R=dtu,nednguyen,sullivan,kbr@chromium.org,bolian
BUG=460181
TEST=tools/perf/run_benchmark; content/test/gpu/run_gpu_tests.py; tools/chrome_proxy/run_benchmark  # All return a full and correct test list.

Committed: https://crrev.com/1da5f7f70ea6dc7dd0667ea78637802c76305f5a
Cr-Commit-Position: refs/heads/master@{#318149}

Review URL: https://codereview.chromium.org/942663002

Cr-Commit-Position: refs/heads/master@{#318531}
  • Loading branch information
eakuefner authored and Commit bot committed Feb 27, 2015
1 parent af21a9a commit e97726d
Show file tree
Hide file tree
Showing 8 changed files with 91 additions and 66 deletions.
9 changes: 5 additions & 4 deletions content/test/gpu/run_gpu_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
os.pardir, os.pardir, os.pardir, 'tools', 'telemetry'))

from telemetry import benchmark_runner
from telemetry.core import environment


def _LaunchDBus():
Expand Down Expand Up @@ -85,12 +84,14 @@ def _ShutdownDBus():


if __name__ == '__main__':
base_dir = os.path.dirname(os.path.realpath(__file__))
benchmark_runner.config = environment.Environment([base_dir])
top_level_dir = os.path.dirname(os.path.realpath(__file__))
environment = benchmark_runner.Environment(
top_level_dir=top_level_dir,
benchmark_dirs=[os.path.join(top_level_dir, 'gpu_tests')])

did_launch_dbus = _LaunchDBus()
try:
retcode = benchmark_runner.main()
retcode = benchmark_runner.main(environment)
finally:
if did_launch_dbus:
_ShutdownDBus()
Expand Down
9 changes: 5 additions & 4 deletions tools/chrome_proxy/run_benchmark
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@ sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry'))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'perf'))

from telemetry import benchmark_runner
from telemetry.core import environment


if __name__ == '__main__':
base_dir = os.path.dirname(os.path.realpath(__file__))
benchmark_runner.config = environment.Environment([base_dir])
sys.exit(benchmark_runner.main())
top_level_dir = os.path.dirname(os.path.realpath(__file__))
environment = benchmark_runner.Environment(
top_level_dir=top_level_dir,
benchmark_dirs=[os.path.join(top_level_dir, 'integration_tests')])
sys.exit(benchmark_runner.main(environment))
10 changes: 6 additions & 4 deletions tools/perf/run_benchmark
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,12 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry'))

from telemetry import benchmark_runner
from telemetry.core import environment


if __name__ == '__main__':
base_dir = os.path.dirname(os.path.realpath(__file__))
benchmark_runner.config = environment.Environment([base_dir])
sys.exit(benchmark_runner.main())
top_level_dir = os.path.dirname(os.path.realpath(__file__))

environment = benchmark_runner.Environment(
top_level_dir=top_level_dir,
benchmark_dirs=[os.path.join(top_level_dir, 'benchmarks')])
sys.exit(benchmark_runner.main(environment))
81 changes: 56 additions & 25 deletions tools/telemetry/telemetry/benchmark_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,44 @@
from telemetry.core import browser_options
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.core import environment
from telemetry.core import util
from telemetry.util import find_dependencies


class Environment(object):
"""Contains information about the benchmark runtime environment.
Attributes:
top_level_dir: A dir that contains benchmark, page test, and/or user story
set dirs and associated artifacts.
benchmark_dirs: A list of dirs containing benchmarks.
benchmark_aliases: A dict of name:alias string pairs to be matched against
exactly during benchmark selection.
"""
def __init__(self, top_level_dir, benchmark_dirs=None,
benchmark_aliases=None):
self._top_level_dir = top_level_dir
self._benchmark_dirs = benchmark_dirs or []
self._benchmark_aliases = benchmark_aliases or dict()

if benchmark_aliases:
self._benchmark_aliases = benchmark_aliases
else:
self._benchmark_aliases = {}

@property
def top_level_dir(self):
return self._top_level_dir

@property
def benchmark_dirs(self):
return self._benchmark_dirs

@property
def benchmark_aliases(self):
return self._benchmark_aliases


class Help(command_line.OptparseCommand):
"""Display help information about a command"""

Expand Down Expand Up @@ -61,17 +94,17 @@ def CreateParser(cls):
return parser

@classmethod
def AddCommandLineArgs(cls, parser):
def AddCommandLineArgs(cls, parser, _):
parser.add_option('-j', '--json-output-file', type='string')
parser.add_option('-n', '--num-shards', type='int', default=1)

@classmethod
def ProcessCommandLineArgs(cls, parser, args):
def ProcessCommandLineArgs(cls, parser, args, environment):
if not args.positional_args:
args.benchmarks = _Benchmarks()
args.benchmarks = _Benchmarks(environment)
elif len(args.positional_args) == 1:
args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
exact_matches=False)
environment, exact_matches=False)
else:
parser.error('Must provide at most one benchmark name.')

Expand Down Expand Up @@ -105,13 +138,13 @@ def CreateParser(cls):
return parser

@classmethod
def AddCommandLineArgs(cls, parser):
def AddCommandLineArgs(cls, parser, environment):
benchmark.AddCommandLineArgs(parser)

# Allow benchmarks to add their own command line options.
matching_benchmarks = []
for arg in sys.argv[1:]:
matching_benchmarks += _MatchBenchmarkName(arg)
matching_benchmarks += _MatchBenchmarkName(arg, environment)

if matching_benchmarks:
# TODO(dtu): After move to argparse, add command-line args for all
Expand All @@ -123,19 +156,19 @@ def AddCommandLineArgs(cls, parser):
matching_benchmark.SetArgumentDefaults(parser)

@classmethod
def ProcessCommandLineArgs(cls, parser, args):
def ProcessCommandLineArgs(cls, parser, args, environment):
if not args.positional_args:
possible_browser = (
browser_finder.FindBrowser(args) if args.browser_type else None)
_PrintBenchmarkList(_Benchmarks(), possible_browser, sys.stderr)
_PrintBenchmarkList(_Benchmarks(environment), possible_browser)
sys.exit(-1)

input_benchmark_name = args.positional_args[0]
matching_benchmarks = _MatchBenchmarkName(input_benchmark_name)
matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
if not matching_benchmarks:
print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
print >> sys.stderr
_PrintBenchmarkList(_Benchmarks(), None, sys.stderr)
_PrintBenchmarkList(_Benchmarks(environment), None, sys.stderr)
sys.exit(-1)

if len(matching_benchmarks) > 1:
Expand Down Expand Up @@ -180,15 +213,16 @@ def _MatchingCommands(string):
if command.Name().startswith(string)]

@decorators.Cache
def _Benchmarks():
def _Benchmarks(environment):
benchmarks = []
for base_dir in config.base_paths:
benchmarks += discover.DiscoverClasses(base_dir, base_dir,
for search_dir in environment.benchmark_dirs:
benchmarks += discover.DiscoverClasses(search_dir,
environment.top_level_dir,
benchmark.Benchmark,
index_by_class_name=True).values()
return benchmarks

def _MatchBenchmarkName(input_benchmark_name, exact_matches=True):
def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
def _Matches(input_string, search_string):
if search_string.startswith(input_string):
return True
Expand All @@ -200,18 +234,18 @@ def _Matches(input_string, search_string):
# Exact matching.
if exact_matches:
# Don't add aliases to search dict, only allow exact matching for them.
if input_benchmark_name in config.benchmark_aliases:
exact_match = config.benchmark_aliases[input_benchmark_name]
if input_benchmark_name in environment.benchmark_aliases:
exact_match = environment.benchmark_aliases[input_benchmark_name]
else:
exact_match = input_benchmark_name

for benchmark_class in _Benchmarks():
for benchmark_class in _Benchmarks(environment):
if exact_match == benchmark_class.Name():
return [benchmark_class]
return []

# Fuzzy matching.
return [benchmark_class for benchmark_class in _Benchmarks()
return [benchmark_class for benchmark_class in _Benchmarks(environment)
if _Matches(input_benchmark_name, benchmark_class.Name())]


Expand Down Expand Up @@ -310,10 +344,7 @@ def _PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
print >> output_pipe


config = environment.Environment([util.GetBaseDir()])


def main():
def main(environment):
# Get the command name from the command line.
if len(sys.argv) > 1 and sys.argv[1] == '--help':
sys.argv[1] = 'help'
Expand All @@ -340,10 +371,10 @@ def main():

# Parse and run the command.
parser = command.CreateParser()
command.AddCommandLineArgs(parser)
command.AddCommandLineArgs(parser, environment)
options, args = parser.parse_args()
if commands:
args = args[1:]
options.positional_args = args
command.ProcessCommandLineArgs(parser, options)
command.ProcessCommandLineArgs(parser, options, environment)
return command().Run(options)
14 changes: 12 additions & 2 deletions tools/telemetry/telemetry/core/command_line.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,17 +64,27 @@ def CreateParser(cls):
return optparse.OptionParser('%%prog %s %s' % (cls.Name(), cls.usage),
description=cls.Description())

@classmethod
def AddCommandLineArgs(cls, parser, environment):
# pylint: disable=arguments-differ
pass

@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
# pylint: disable=arguments-differ
pass

def Run(self, args):
raise NotImplementedError()

@classmethod
def main(cls, args=None):
"""Main method to run this command as a standalone script."""
parser = cls.CreateParser()
cls.AddCommandLineArgs(parser)
cls.AddCommandLineArgs(parser, None)
options, args = parser.parse_args(args=args)
options.positional_args = args
cls.ProcessCommandLineArgs(parser, options)
cls.ProcessCommandLineArgs(parser, options, None)
return min(cls().Run(options), 255)


Expand Down
20 changes: 0 additions & 20 deletions tools/telemetry/telemetry/core/environment.py

This file was deleted.

8 changes: 4 additions & 4 deletions tools/telemetry/telemetry/unittest_util/run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def CreateParser(cls):
return parser

@classmethod
def AddCommandLineArgs(cls, parser):
def AddCommandLineArgs(cls, parser, _):
parser.add_option('--repeat-count', type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('-d', '--also-run-disabled-tests',
Expand All @@ -55,7 +55,7 @@ def AddCommandLineArgs(cls, parser):
reporting=True)

@classmethod
def ProcessCommandLineArgs(cls, parser, args):
def ProcessCommandLineArgs(cls, parser, args, _):
# We retry failures by default unless we're running a list of tests
# explicitly.
if not args.retry_limit and not args.positional_args:
Expand All @@ -75,10 +75,10 @@ def ProcessCommandLineArgs(cls, parser, args):
def main(cls, args=None, stream=None): # pylint: disable=W0221
# We override the superclass so that we can hook in the 'stream' arg.
parser = cls.CreateParser()
cls.AddCommandLineArgs(parser)
cls.AddCommandLineArgs(parser, None)
options, positional_args = parser.parse_args(args)
options.positional_args = positional_args
cls.ProcessCommandLineArgs(parser, options)
cls.ProcessCommandLineArgs(parser, options, None)

obj = cls()
if stream is not None:
Expand Down
6 changes: 3 additions & 3 deletions tools/telemetry/telemetry/util/find_dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def FindPageSetDependencies(base_dir):

# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser)
test_obj.AddCommandLineArgs(parser, None)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
Expand Down Expand Up @@ -227,7 +227,7 @@ class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""

@classmethod
def AddCommandLineArgs(cls, parser):
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
Expand All @@ -245,7 +245,7 @@ def AddCommandLineArgs(cls, parser):
help='Store files in a zip archive at ZIP.')

@classmethod
def ProcessCommandLineArgs(cls, parser, args):
def ProcessCommandLineArgs(cls, parser, args, _):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
Expand Down

0 comments on commit e97726d

Please sign in to comment.