From a9c587416e084327a0eab1ac0640f731a8729357 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 30 Jan 2018 14:02:58 -0800 Subject: [PATCH 01/21] No-op ivy resolve is ~100ms cheaper (#5389) Right now, creating an executor can take as much as 150ms. Instead, only create the executor if it's actually needed. This is a bit of an abstraction leak, but 150ms is non-trivial (and we have a *lot* of this kind of 150ms kicking about, which add up to several seconds). --- src/python/pants/backend/jvm/tasks/ivy_resolve.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/python/pants/backend/jvm/tasks/ivy_resolve.py b/src/python/pants/backend/jvm/tasks/ivy_resolve.py index b12f1f297b0..0588393f9a9 100644 --- a/src/python/pants/backend/jvm/tasks/ivy_resolve.py +++ b/src/python/pants/backend/jvm/tasks/ivy_resolve.py @@ -12,6 +12,7 @@ from pants.backend.jvm.ivy_utils import IvyUtils from pants.backend.jvm.subsystems.resolve_subsystem import JvmResolveSubsystem +from pants.backend.jvm.targets.jar_library import JarLibrary from pants.backend.jvm.tasks.classpath_products import ClasspathProducts from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin from pants.backend.jvm.tasks.nailgun_task import NailgunTask @@ -89,10 +90,16 @@ def execute(self): if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy': return - executor = self.create_java_executor() - targets = self.context.targets() compile_classpath = self.context.products.get_data('compile_classpath', init_func=ClasspathProducts.init_func(self.get_options().pants_workdir)) + + targets = self.context.targets() + if all(not isinstance(target, JarLibrary) for target in targets): + if self._report: + self.context.log.info("Not generating a report. No resolution performed.") + return + + executor = self.create_java_executor() results = self.resolve(executor=executor, targets=targets, classpath_products=compile_classpath, From bfe927d5cf5c729a8d85037047660c5d5d4c1689 Mon Sep 17 00:00:00 2001 From: Nick Howard Date: Thu, 1 Feb 2018 10:40:46 -0800 Subject: [PATCH 02/21] [v2-engine errors] Sort suggestions for typo'd targets, unique them when trace is disabled (#5413) ### Problem When there are a large number of suggestions, the lack of alphanumeric sorting is frustrating. #4788 ### Solution Sort the suggestions. Additionally, I noticed that because of where this happens, there may be multiple copies of the error message that show up. I changed the default behavior to dedup common errors. ### Result ``` ./pants list 3rdparty/python:rutaba --no-print-exception-stacktrace Exception caught: () Exception message: "rutaba" was not found in namespace "3rdparty/python". Did you mean one of: :Markdown :Pygments :ansicolors ``` Instead of ``` Exception caught: () Exception message: Build graph construction failed: ExecutionError Multiple exceptions encountered: ResolveError: "rutaba" was not found in namespace "3rdparty/python". Did you mean one of: :psutil :isort :pywatchman :pytest-cov ... :scandir :setuptools ResolveError: "rutaba" was not found in namespace "3rdparty/python". Did you mean one of: :psutil :isort :pywatchman :pytest-cov :setproctitle ... :scandir :setuptools ``` --- src/python/pants/engine/build_files.py | 3 ++- src/python/pants/engine/scheduler.py | 7 ++++--- .../pants_test/engine/legacy/test_graph.py | 21 ++++++++++++------- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/python/pants/engine/build_files.py b/src/python/pants/engine/build_files.py index 8e0bde21229..6bdbc6e6e96 100644 --- a/src/python/pants/engine/build_files.py +++ b/src/python/pants/engine/build_files.py @@ -107,7 +107,8 @@ def __hash__(self): def _raise_did_you_mean(address_family, name): - possibilities = '\n '.join(':{}'.format(a.target_name) for a in address_family.addressables) + names = [a.target_name for a in address_family.addressables] + possibilities = '\n '.join(':{}'.format(target_name) for target_name in sorted(names)) raise ResolveError('"{}" was not found in namespace "{}". ' 'Did you mean one of:\n {}' .format(name, address_family.namespace, possibilities)) diff --git a/src/python/pants/engine/scheduler.py b/src/python/pants/engine/scheduler.py index d585fa6cc75..f63e5b93b51 100644 --- a/src/python/pants/engine/scheduler.py +++ b/src/python/pants/engine/scheduler.py @@ -487,12 +487,13 @@ def products_request(self, products, subjects): cumulative_trace = '\n'.join(self.trace(request)) raise ExecutionError('Received unexpected Throw state(s):\n{}'.format(cumulative_trace)) - if len(throw_root_states) == 1: + unique_exceptions = set(t.exc for t in throw_root_states) + if len(unique_exceptions) == 1: raise throw_root_states[0].exc else: raise ExecutionError('Multiple exceptions encountered:\n {}' - .format('\n '.join('{}: {}'.format(type(t.exc).__name__, str(t.exc)) - for t in throw_root_states))) + .format('\n '.join('{}: {}'.format(type(t).__name__, str(t)) + for t in unique_exceptions))) # Everything is a Return: we rely on the fact that roots are ordered to preserve subject # order in output lists. diff --git a/tests/python/pants_test/engine/legacy/test_graph.py b/tests/python/pants_test/engine/legacy/test_graph.py index 4e70922b0e6..a70eb257110 100644 --- a/tests/python/pants_test/engine/legacy/test_graph.py +++ b/tests/python/pants_test/engine/legacy/test_graph.py @@ -42,14 +42,15 @@ def _make_setup_args(self, specs): return options @contextmanager - def graph_helper(self, build_file_aliases=None, build_file_imports_behavior='allow'): + def graph_helper(self, build_file_aliases=None, build_file_imports_behavior='allow', include_trace_on_error=True): with temporary_dir() as work_dir: path_ignore_patterns = ['.*'] graph_helper = EngineInitializer.setup_legacy_graph(path_ignore_patterns, work_dir, build_file_imports_behavior, build_file_aliases=build_file_aliases, - native=self._native) + native=self._native, + include_trace_on_error=include_trace_on_error) yield graph_helper @contextmanager @@ -72,15 +73,21 @@ def create_target_roots(self, specs): class GraphTargetScanFailureTests(GraphTestBase): def test_with_missing_target_in_existing_build_file(self): + # When a target is missing, + # the suggestions should be in order + # and there should only be one copy of the error if tracing is off. with self.assertRaises(AddressLookupError) as cm: - with self.graph_helper() as graph_helper: + with self.graph_helper(include_trace_on_error=False) as graph_helper: self.create_graph_from_specs(graph_helper, ['3rdparty/python:rutabaga']) self.fail('Expected an exception.') - self.assertIn('"rutabaga" was not found in namespace "3rdparty/python". Did you mean one of:\n' - ' :psutil\n' - ' :isort', - str(cm.exception)) + error_message = str(cm.exception) + expected_message = '"rutabaga" was not found in namespace "3rdparty/python".' \ + ' Did you mean one of:\n' \ + ' :Markdown\n' \ + ' :Pygments\n' + self.assertIn(expected_message, error_message) + self.assertTrue(error_message.count(expected_message) == 1) def test_with_missing_directory_fails(self): with self.assertRaises(AddressLookupError) as cm: From 7a1c105fa2d0318c5533d4fac98fb6bed911ef85 Mon Sep 17 00:00:00 2001 From: Benjy Weinberger Date: Thu, 1 Feb 2018 12:43:30 -0800 Subject: [PATCH 03/21] Remove a remaining old-python-pipeline task from contrib/python. (#5411) Move the new version of that task from tasks2 to tasks, and leave a deprecation in the old module. Also rename a target that had a non-canonical name for no good reason. --- .../python/pants/contrib/python/checks/BUILD | 2 +- .../pants/contrib/python/checks/register.py | 2 +- .../pants/contrib/python/checks/tasks/BUILD | 1 - .../python/checks/tasks/python_eval.py | 241 +++++++++++------ .../pants/contrib/python/checks/tasks2/BUILD | 16 +- .../python/checks/tasks2/python_eval.py | 246 +----------------- .../templates/python_eval/eval.py.mustache | 45 ---- .../contrib/python/checks/tasks/BUILD | 4 +- .../python/checks/tasks/test_python_eval.py | 50 ++-- .../contrib/python/checks/tasks2/BUILD | 13 - .../python/checks/tasks2/test_python_eval.py | 188 ------------- 11 files changed, 205 insertions(+), 603 deletions(-) delete mode 100644 contrib/python/src/python/pants/contrib/python/checks/tasks2/templates/python_eval/eval.py.mustache delete mode 100644 contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/BUILD delete mode 100644 contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/test_python_eval.py diff --git a/contrib/python/src/python/pants/contrib/python/checks/BUILD b/contrib/python/src/python/pants/contrib/python/checks/BUILD index c8d4eef16ac..cf9c9243459 100644 --- a/contrib/python/src/python/pants/contrib/python/checks/BUILD +++ b/contrib/python/src/python/pants/contrib/python/checks/BUILD @@ -5,7 +5,7 @@ contrib_plugin( name='plugin', dependencies=[ 'contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle:all', - 'contrib/python/src/python/pants/contrib/python/checks/tasks:python', + 'contrib/python/src/python/pants/contrib/python/checks/tasks', 'contrib/python/src/python/pants/contrib/python/checks/tasks2', 'src/python/pants/goal:task_registrar', ], diff --git a/contrib/python/src/python/pants/contrib/python/checks/register.py b/contrib/python/src/python/pants/contrib/python/checks/register.py index aba4ad326ab..36948d330ef 100644 --- a/contrib/python/src/python/pants/contrib/python/checks/register.py +++ b/contrib/python/src/python/pants/contrib/python/checks/register.py @@ -7,8 +7,8 @@ from pants.goal.task_registrar import TaskRegistrar as task -from pants.contrib.python.checks.tasks2.python_eval import PythonEval from pants.contrib.python.checks.tasks.checkstyle.checker import PythonCheckStyleTask +from pants.contrib.python.checks.tasks.python_eval import PythonEval def register_goals(): diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks/BUILD b/contrib/python/src/python/pants/contrib/python/checks/tasks/BUILD index 449af3bf742..d133ea85be9 100644 --- a/contrib/python/src/python/pants/contrib/python/checks/tasks/BUILD +++ b/contrib/python/src/python/pants/contrib/python/checks/tasks/BUILD @@ -2,7 +2,6 @@ # Licensed under the Apache License, Version 2.0 (see LICENSE). python_library( - name='python', dependencies=[ ':resources', 'src/python/pants/backend/python/targets:python', diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks/python_eval.py b/contrib/python/src/python/pants/contrib/python/checks/tasks/python_eval.py index ea13159d3c7..110315a01c3 100644 --- a/contrib/python/src/python/pants/contrib/python/checks/tasks/python_eval.py +++ b/contrib/python/src/python/pants/contrib/python/checks/tasks/python_eval.py @@ -5,18 +5,32 @@ from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) +import hashlib import os import pkgutil +from pants.backend.python.interpreter_cache import PythonInterpreterCache +from pants.backend.python.subsystems.python_setup import PythonSetup from pants.backend.python.targets.python_binary import PythonBinary from pants.backend.python.targets.python_library import PythonLibrary -from pants.backend.python.tasks.python_task import PythonTask +from pants.backend.python.targets.python_target import PythonTarget +from pants.backend.python.tasks.pex_build_util import (dump_requirements, dump_sources, + has_python_requirements, has_python_sources) +from pants.backend.python.tasks.python_execution_task_base import WrappedPEX +from pants.backend.python.tasks.resolve_requirements_task_base import ResolveRequirementsTaskBase from pants.base.exceptions import TaskError from pants.base.generator import Generator, TemplateData from pants.base.workunit import WorkUnit, WorkUnitLabel +from pants.python.python_repos import PythonRepos +from pants.task.lint_task_mixin import LintTaskMixin +from pants.util.dirutil import safe_concurrent_creation, safe_mkdir +from pants.util.memo import memoized_property +from pex.pex import PEX +from pex.pex_builder import PEXBuilder +from pex.pex_info import PexInfo -class PythonEval(PythonTask): +class PythonEval(LintTaskMixin, ResolveRequirementsTaskBase): class Error(TaskError): """A richer failure exception type useful for tests.""" @@ -27,8 +41,18 @@ def __init__(self, *args, **kwargs): self.compiled = compiled self.failed = failed + _EXEC_NAME = '__pants_executable__' _EVAL_TEMPLATE_PATH = os.path.join('templates', 'python_eval', 'eval.py.mustache') + @classmethod + def subsystem_dependencies(cls): + return super(PythonEval, cls).subsystem_dependencies() + (PythonRepos, PythonSetup) + + @classmethod + def prepare(cls, options, round_manager): + # We don't need an interpreter selected for all targets in play, so prevent one being selected. + pass + @staticmethod def _is_evalable(target): return isinstance(target, (PythonLibrary, PythonBinary)) @@ -36,23 +60,39 @@ def _is_evalable(target): @classmethod def register_options(cls, register): super(PythonEval, cls).register_options(register) - register('--skip', type=bool, - help='If enabled, skip eval of python targets.') register('--fail-slow', type=bool, help='Compile all targets and present the full list of errors.') register('--closure', type=bool, + removal_version='1.7.0.dev0', removal_hint='Use --transitive instead.', help='Eval all targets in the closure individually instead of just the targets ' 'specified on the command line.') def execute(self): - if self.get_options().skip: - return - - targets = self.context.targets() if self.get_options().closure else self.context.target_roots - with self.invalidated(filter(self._is_evalable, targets), + # The default for --closure is False, while the default for --transitive is True, so we + # can't just OR the two values, and have to explicitly detect when --transitive is not + # explicitly specified. + if self.get_options().is_default('transitive'): + if self.get_options().skip: + targets = [] + else: + targets = (self.context.targets(self._is_evalable) if self.get_options().closure + else filter(self._is_evalable, self.context.target_roots)) + else: + # TODO(benjy): After removing --closure, targets should always be set to this, and the + # entire other branch of this if statement (and the if statement itself) should be removed. + targets = self.get_targets(self._is_evalable) + with self.invalidated(targets, + invalidate_dependents=True, topological_order=True) as invalidation_check: compiled = self._compile_targets(invalidation_check.invalid_vts) return compiled # Collected and returned for tests + # TODO: BAD! Find another way to detect task action in tests. + + @memoized_property + def _interpreter_cache(self): + return PythonInterpreterCache(PythonSetup.global_instance(), + PythonRepos.global_instance(), + logger=self.context.log.debug) def _compile_targets(self, invalid_vts): with self.context.new_workunit(name='eval-targets', labels=[WorkUnitLabel.MULTITOOL]): @@ -60,9 +100,9 @@ def _compile_targets(self, invalid_vts): failed = [] for vt in invalid_vts: target = vt.target - return_code = self._compile_target(target) + return_code = self._compile_target(vt) if return_code == 0: - vt.update() # Ensure partial progress is marked valid + vt.update() # Ensure partial progress is marked valid. compiled.append(target) else: if self.get_options().fail_slow: @@ -80,81 +120,134 @@ def _compile_targets(self, invalid_vts): return compiled - def _compile_target(self, target): - # "Compiles" a target by forming an isolated chroot of its sources and transitive deps and then - # attempting to import each of the target's sources in the case of a python library or else the - # entry point in the case of a python binary. - # - # For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like: - # - # if __name__ == '__main__': - # import lib.core - # import lib.util - # - # For a binary with entry point lib.bin:main the "compiler" main file would look like: - # - # if __name__ == '__main__': - # from lib.bin import main - # - # In either case the main file is executed within the target chroot to reveal missing BUILD - # dependencies. + def _compile_target(self, vt): + """'Compiles' a python target. - with self.context.new_workunit(name=target.address.spec): - modules = [] - if isinstance(target, PythonBinary): - source = 'entry_point {}'.format(target.entry_point) - components = target.entry_point.rsplit(':', 1) - if not all([x.strip() for x in components]): - raise TaskError('Invalid entry point {} for target {}'.format( - target.entry_point, target.address.spec)) - module = components[0] - if len(components) == 2: - function = components[1] - data = TemplateData(source=source, - import_statement='from {} import {}'.format(module, function)) - else: - data = TemplateData(source=source, import_statement='import {}'.format(module)) - modules.append(data) - else: - for path in target.sources_relative_to_source_root(): - if path.endswith('.py'): - if os.path.basename(path) == '__init__.py': - module_path = os.path.dirname(path) - else: - module_path, _ = os.path.splitext(path) - source = 'file {}'.format(os.path.join(target.target_base, path)) - module = module_path.replace(os.path.sep, '.') - if module: - data = TemplateData(source=source, import_statement='import {}'.format(module)) - modules.append(data) + 'Compiling' means forming an isolated chroot of its sources and transitive deps and then + attempting to import each of the target's sources in the case of a python library or else the + entry point in the case of a python binary. + For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like: + + if __name__ == '__main__': + import lib.core + import lib.util + + For a binary with entry point lib.bin:main the "compiler" main file would look like: + + if __name__ == '__main__': + from lib.bin import main + + In either case the main file is executed within the target chroot to reveal missing BUILD + dependencies. + """ + target = vt.target + with self.context.new_workunit(name=target.address.spec): + modules = self._get_modules(target) if not modules: # Nothing to eval, so a trivial compile success. return 0 - interpreter = self.select_interpreter_for_targets([target]) + interpreter = self._get_interpreter_for_target_closure(target) + reqs_pex = self._resolve_requirements_for_versioned_target_closure(interpreter, vt) + srcs_pex = self._source_pex_for_versioned_target_closure(interpreter, vt) + + # Create the executable pex. + exec_pex_parent = os.path.join(self.workdir, 'executable_pex') + executable_file_content = self._get_executable_file_content(exec_pex_parent, modules) + hasher = hashlib.sha1() + hasher.update(executable_file_content) + exec_file_hash = hasher.hexdigest() + exec_pex_path = os.path.realpath(os.path.join(exec_pex_parent, exec_file_hash)) + if not os.path.isdir(exec_pex_path): + with safe_concurrent_creation(exec_pex_path) as safe_path: + # Write the entry point. + safe_mkdir(safe_path) + with open(os.path.join(safe_path, '{}.py'.format(self._EXEC_NAME)), 'w') as outfile: + outfile.write(executable_file_content) + pex_info = (target.pexinfo if isinstance(target, PythonBinary) else None) or PexInfo() + # Override any user-specified entry point, under the assumption that the + # executable_file_content does what the user intends (including, probably, calling that + # underlying entry point). + pex_info.entry_point = self._EXEC_NAME + builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info) + builder.freeze() + + exec_pex = PEX(exec_pex_path, interpreter) + extra_pex_paths = [pex.path() for pex in filter(None, [reqs_pex, srcs_pex])] + pex = WrappedPEX(exec_pex, interpreter, extra_pex_paths) - if isinstance(target, PythonBinary): - pexinfo, platforms = target.pexinfo, target.platforms - else: - pexinfo, platforms = None, None - - generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH), - chroot_parent=self.chroot_cache_dir, modules=modules) - executable_file_content = generator.render() - - chroot = self.cached_chroot(interpreter=interpreter, - pex_info=pexinfo, - targets=[target], - platforms=platforms, - executable_file_content=executable_file_content) - pex = chroot.pex() with self.context.new_workunit(name='eval', labels=[WorkUnitLabel.COMPILER, WorkUnitLabel.RUN, WorkUnitLabel.TOOL], - cmd=' '.join(pex.cmdline())) as workunit: + cmd=' '.join(exec_pex.cmdline())) as workunit: returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) if returncode != 0: self.context.log.error('Failed to eval {}'.format(target.address.spec)) return returncode + + @staticmethod + def _get_modules(target): + modules = [] + if isinstance(target, PythonBinary): + source = 'entry_point {}'.format(target.entry_point) + components = target.entry_point.rsplit(':', 1) + if not all([x.strip() for x in components]): + raise TaskError('Invalid entry point {} for target {}'.format( + target.entry_point, target.address.spec)) + module = components[0] + if len(components) == 2: + function = components[1] + data = TemplateData(source=source, + import_statement='from {} import {}'.format(module, function)) + else: + data = TemplateData(source=source, import_statement='import {}'.format(module)) + modules.append(data) + else: + for path in target.sources_relative_to_source_root(): + if path.endswith('.py'): + if os.path.basename(path) == '__init__.py': + module_path = os.path.dirname(path) + else: + module_path, _ = os.path.splitext(path) + source = 'file {}'.format(os.path.join(target.target_base, path)) + module = module_path.replace(os.path.sep, '.') + if module: + data = TemplateData(source=source, import_statement='import {}'.format(module)) + modules.append(data) + return modules + + def _get_executable_file_content(self, exec_pex_parent, modules): + generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH), + chroot_parent=exec_pex_parent, modules=modules) + return generator.render() + + def _get_interpreter_for_target_closure(self, target): + targets = [t for t in target.closure() if isinstance(t, PythonTarget)] + return self._interpreter_cache.select_interpreter_for_targets(targets) + + def _resolve_requirements_for_versioned_target_closure(self, interpreter, vt): + reqs_pex_path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity), + vt.cache_key.hash)) + if not os.path.isdir(reqs_pex_path): + req_libs = filter(has_python_requirements, vt.target.closure()) + with safe_concurrent_creation(reqs_pex_path) as safe_path: + builder = PEXBuilder(safe_path, interpreter=interpreter, copy=True) + dump_requirements(builder, interpreter, req_libs, self.context.log) + builder.freeze() + return PEX(reqs_pex_path, interpreter=interpreter) + + def _source_pex_for_versioned_target_closure(self, interpreter, vt): + source_pex_path = os.path.realpath(os.path.join(self.workdir, vt.cache_key.hash)) + if not os.path.isdir(source_pex_path): + with safe_concurrent_creation(source_pex_path) as safe_path: + self._build_source_pex(interpreter, safe_path, vt.target.closure()) + return PEX(source_pex_path, interpreter=interpreter) + + def _build_source_pex(self, interpreter, path, targets): + builder = PEXBuilder(path=path, interpreter=interpreter, copy=True) + for target in targets: + if has_python_sources(target): + dump_sources(builder, target, self.context.log) + builder.freeze() diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks2/BUILD b/contrib/python/src/python/pants/contrib/python/checks/tasks2/BUILD index 1ffa583b44d..e500a478052 100644 --- a/contrib/python/src/python/pants/contrib/python/checks/tasks2/BUILD +++ b/contrib/python/src/python/pants/contrib/python/checks/tasks2/BUILD @@ -3,19 +3,7 @@ python_library( dependencies=[ - ':resources', - '3rdparty/python:pex', - 'src/python/pants/backend/python/targets:python', - 'src/python/pants/backend/python/tasks', - 'src/python/pants/base:exceptions', - 'src/python/pants/base:generator', - 'src/python/pants/base:workunit', - 'src/python/pants/task', - 'src/python/pants/util:dirutil', + 'contrib/python/src/python/pants/contrib/python/checks/tasks', + 'src/python/pants/base:deprecated', ] ) - -resources( - name='resources', - sources=globs('templates/python_eval/*.mustache'), -) diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks2/python_eval.py b/contrib/python/src/python/pants/contrib/python/checks/tasks2/python_eval.py index 110315a01c3..4d80c66c03d 100644 --- a/contrib/python/src/python/pants/contrib/python/checks/tasks2/python_eval.py +++ b/contrib/python/src/python/pants/contrib/python/checks/tasks2/python_eval.py @@ -5,249 +5,11 @@ from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) -import hashlib -import os -import pkgutil +from pants.base.deprecated import deprecated_module -from pants.backend.python.interpreter_cache import PythonInterpreterCache -from pants.backend.python.subsystems.python_setup import PythonSetup -from pants.backend.python.targets.python_binary import PythonBinary -from pants.backend.python.targets.python_library import PythonLibrary -from pants.backend.python.targets.python_target import PythonTarget -from pants.backend.python.tasks.pex_build_util import (dump_requirements, dump_sources, - has_python_requirements, has_python_sources) -from pants.backend.python.tasks.python_execution_task_base import WrappedPEX -from pants.backend.python.tasks.resolve_requirements_task_base import ResolveRequirementsTaskBase -from pants.base.exceptions import TaskError -from pants.base.generator import Generator, TemplateData -from pants.base.workunit import WorkUnit, WorkUnitLabel -from pants.python.python_repos import PythonRepos -from pants.task.lint_task_mixin import LintTaskMixin -from pants.util.dirutil import safe_concurrent_creation, safe_mkdir -from pants.util.memo import memoized_property -from pex.pex import PEX -from pex.pex_builder import PEXBuilder -from pex.pex_info import PexInfo +from pants.contrib.python.checks.tasks.python_eval import PythonEval -class PythonEval(LintTaskMixin, ResolveRequirementsTaskBase): - class Error(TaskError): - """A richer failure exception type useful for tests.""" +deprecated_module('1.7.0.dev0', 'Use pants.contrib.python.checks.tasks instead') - def __init__(self, *args, **kwargs): - compiled = kwargs.pop('compiled') - failed = kwargs.pop('failed') - super(PythonEval.Error, self).__init__(*args, **kwargs) - self.compiled = compiled - self.failed = failed - - _EXEC_NAME = '__pants_executable__' - _EVAL_TEMPLATE_PATH = os.path.join('templates', 'python_eval', 'eval.py.mustache') - - @classmethod - def subsystem_dependencies(cls): - return super(PythonEval, cls).subsystem_dependencies() + (PythonRepos, PythonSetup) - - @classmethod - def prepare(cls, options, round_manager): - # We don't need an interpreter selected for all targets in play, so prevent one being selected. - pass - - @staticmethod - def _is_evalable(target): - return isinstance(target, (PythonLibrary, PythonBinary)) - - @classmethod - def register_options(cls, register): - super(PythonEval, cls).register_options(register) - register('--fail-slow', type=bool, - help='Compile all targets and present the full list of errors.') - register('--closure', type=bool, - removal_version='1.7.0.dev0', removal_hint='Use --transitive instead.', - help='Eval all targets in the closure individually instead of just the targets ' - 'specified on the command line.') - - def execute(self): - # The default for --closure is False, while the default for --transitive is True, so we - # can't just OR the two values, and have to explicitly detect when --transitive is not - # explicitly specified. - if self.get_options().is_default('transitive'): - if self.get_options().skip: - targets = [] - else: - targets = (self.context.targets(self._is_evalable) if self.get_options().closure - else filter(self._is_evalable, self.context.target_roots)) - else: - # TODO(benjy): After removing --closure, targets should always be set to this, and the - # entire other branch of this if statement (and the if statement itself) should be removed. - targets = self.get_targets(self._is_evalable) - with self.invalidated(targets, - invalidate_dependents=True, - topological_order=True) as invalidation_check: - compiled = self._compile_targets(invalidation_check.invalid_vts) - return compiled # Collected and returned for tests - # TODO: BAD! Find another way to detect task action in tests. - - @memoized_property - def _interpreter_cache(self): - return PythonInterpreterCache(PythonSetup.global_instance(), - PythonRepos.global_instance(), - logger=self.context.log.debug) - - def _compile_targets(self, invalid_vts): - with self.context.new_workunit(name='eval-targets', labels=[WorkUnitLabel.MULTITOOL]): - compiled = [] - failed = [] - for vt in invalid_vts: - target = vt.target - return_code = self._compile_target(vt) - if return_code == 0: - vt.update() # Ensure partial progress is marked valid. - compiled.append(target) - else: - if self.get_options().fail_slow: - failed.append(target) - else: - raise self.Error('Failed to eval {}'.format(target.address.spec), - compiled=compiled, - failed=[target]) - - if failed: - msg = 'Failed to evaluate {} targets:\n {}'.format( - len(failed), - '\n '.join(t.address.spec for t in failed)) - raise self.Error(msg, compiled=compiled, failed=failed) - - return compiled - - def _compile_target(self, vt): - """'Compiles' a python target. - - 'Compiling' means forming an isolated chroot of its sources and transitive deps and then - attempting to import each of the target's sources in the case of a python library or else the - entry point in the case of a python binary. - - For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like: - - if __name__ == '__main__': - import lib.core - import lib.util - - For a binary with entry point lib.bin:main the "compiler" main file would look like: - - if __name__ == '__main__': - from lib.bin import main - - In either case the main file is executed within the target chroot to reveal missing BUILD - dependencies. - """ - target = vt.target - with self.context.new_workunit(name=target.address.spec): - modules = self._get_modules(target) - if not modules: - # Nothing to eval, so a trivial compile success. - return 0 - - interpreter = self._get_interpreter_for_target_closure(target) - reqs_pex = self._resolve_requirements_for_versioned_target_closure(interpreter, vt) - srcs_pex = self._source_pex_for_versioned_target_closure(interpreter, vt) - - # Create the executable pex. - exec_pex_parent = os.path.join(self.workdir, 'executable_pex') - executable_file_content = self._get_executable_file_content(exec_pex_parent, modules) - hasher = hashlib.sha1() - hasher.update(executable_file_content) - exec_file_hash = hasher.hexdigest() - exec_pex_path = os.path.realpath(os.path.join(exec_pex_parent, exec_file_hash)) - if not os.path.isdir(exec_pex_path): - with safe_concurrent_creation(exec_pex_path) as safe_path: - # Write the entry point. - safe_mkdir(safe_path) - with open(os.path.join(safe_path, '{}.py'.format(self._EXEC_NAME)), 'w') as outfile: - outfile.write(executable_file_content) - pex_info = (target.pexinfo if isinstance(target, PythonBinary) else None) or PexInfo() - # Override any user-specified entry point, under the assumption that the - # executable_file_content does what the user intends (including, probably, calling that - # underlying entry point). - pex_info.entry_point = self._EXEC_NAME - builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info) - builder.freeze() - - exec_pex = PEX(exec_pex_path, interpreter) - extra_pex_paths = [pex.path() for pex in filter(None, [reqs_pex, srcs_pex])] - pex = WrappedPEX(exec_pex, interpreter, extra_pex_paths) - - with self.context.new_workunit(name='eval', - labels=[WorkUnitLabel.COMPILER, WorkUnitLabel.RUN, - WorkUnitLabel.TOOL], - cmd=' '.join(exec_pex.cmdline())) as workunit: - returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) - workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) - if returncode != 0: - self.context.log.error('Failed to eval {}'.format(target.address.spec)) - return returncode - - @staticmethod - def _get_modules(target): - modules = [] - if isinstance(target, PythonBinary): - source = 'entry_point {}'.format(target.entry_point) - components = target.entry_point.rsplit(':', 1) - if not all([x.strip() for x in components]): - raise TaskError('Invalid entry point {} for target {}'.format( - target.entry_point, target.address.spec)) - module = components[0] - if len(components) == 2: - function = components[1] - data = TemplateData(source=source, - import_statement='from {} import {}'.format(module, function)) - else: - data = TemplateData(source=source, import_statement='import {}'.format(module)) - modules.append(data) - else: - for path in target.sources_relative_to_source_root(): - if path.endswith('.py'): - if os.path.basename(path) == '__init__.py': - module_path = os.path.dirname(path) - else: - module_path, _ = os.path.splitext(path) - source = 'file {}'.format(os.path.join(target.target_base, path)) - module = module_path.replace(os.path.sep, '.') - if module: - data = TemplateData(source=source, import_statement='import {}'.format(module)) - modules.append(data) - return modules - - def _get_executable_file_content(self, exec_pex_parent, modules): - generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH), - chroot_parent=exec_pex_parent, modules=modules) - return generator.render() - - def _get_interpreter_for_target_closure(self, target): - targets = [t for t in target.closure() if isinstance(t, PythonTarget)] - return self._interpreter_cache.select_interpreter_for_targets(targets) - - def _resolve_requirements_for_versioned_target_closure(self, interpreter, vt): - reqs_pex_path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity), - vt.cache_key.hash)) - if not os.path.isdir(reqs_pex_path): - req_libs = filter(has_python_requirements, vt.target.closure()) - with safe_concurrent_creation(reqs_pex_path) as safe_path: - builder = PEXBuilder(safe_path, interpreter=interpreter, copy=True) - dump_requirements(builder, interpreter, req_libs, self.context.log) - builder.freeze() - return PEX(reqs_pex_path, interpreter=interpreter) - - def _source_pex_for_versioned_target_closure(self, interpreter, vt): - source_pex_path = os.path.realpath(os.path.join(self.workdir, vt.cache_key.hash)) - if not os.path.isdir(source_pex_path): - with safe_concurrent_creation(source_pex_path) as safe_path: - self._build_source_pex(interpreter, safe_path, vt.target.closure()) - return PEX(source_pex_path, interpreter=interpreter) - - def _build_source_pex(self, interpreter, path, targets): - builder = PEXBuilder(path=path, interpreter=interpreter, copy=True) - for target in targets: - if has_python_sources(target): - dump_sources(builder, target, self.context.log) - builder.freeze() +PythonEval = PythonEval diff --git a/contrib/python/src/python/pants/contrib/python/checks/tasks2/templates/python_eval/eval.py.mustache b/contrib/python/src/python/pants/contrib/python/checks/tasks2/templates/python_eval/eval.py.mustache deleted file mode 100644 index 118b16e6c9b..00000000000 --- a/contrib/python/src/python/pants/contrib/python/checks/tasks2/templates/python_eval/eval.py.mustache +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import print_function - -import inspect -import os -import sys -import traceback - - -def backtrace_to_here(): - trace = inspect.trace(context=1) - trace.pop(0) # discard this helper function frame - - pre_processed = [] - for info in trace: - frame = info[0] - tb = inspect.getframeinfo(frame) - filename = tb.filename - if filename.startswith('{{chroot_parent}}'): - relpath_parent = os.path.relpath(filename, '{{chroot_parent}}') - relpath = relpath_parent.split(os.sep, 1)[1] - filename = os.path.join('[srcroot]', relpath) - line_text = tb.code_context[tb.index] - pre_processed.append((filename, tb.lineno, tb.function, line_text)) - - return ''.join(traceback.format_list(pre_processed)).rstrip() - - -def log(message=''): - print(message, file=sys.stderr) - - -if __name__ == '__main__': - # TODO(John Sirois): Consider collecting all import errors before exiting non-zero. - {{#modules}} - try: - {{import_statement}} - except ImportError as e: - log() - log("Failed to eval '{{source}}':") - log(backtrace_to_here()) - log(str(e)) - sys.exit(1) - - {{/modules}} - sys.exit(0) diff --git a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/BUILD b/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/BUILD index 7bdab4dea99..8127c3647cd 100644 --- a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/BUILD +++ b/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/BUILD @@ -5,7 +5,9 @@ python_tests( name='python_eval', sources=['test_python_eval.py'], dependencies=[ - 'contrib/python/src/python/pants/contrib/python/checks/tasks:python', + 'src/python/pants/backend/python/subsystems', + 'contrib/python/src/python/pants/contrib/python/checks/tasks', + 'src/python/pants/python', 'tests/python/pants_test/backend/python/tasks:python_task_test_base', ] ) diff --git a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/test_python_eval.py b/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/test_python_eval.py index e06725f4097..f193519e4d0 100644 --- a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/test_python_eval.py +++ b/contrib/python/tests/python/pants_test/contrib/python/checks/tasks/test_python_eval.py @@ -7,9 +7,11 @@ from textwrap import dedent +from pants.backend.python.subsystems.python_setup import PythonSetup +from pants.python.python_repos import PythonRepos from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase -from pants.contrib.python.checks.tasks.python_eval import PythonEval +from pants.contrib.python.checks.tasks2.python_eval import PythonEval class PythonEvalTest(PythonTaskTestBase): @@ -74,42 +76,44 @@ class BazD(object): self.h_binary = self.create_python_binary('src/python/h', 'h', 'a.a') def _create_task(self, target_roots, options=None): + # Until the --closure options is removed, we must explicitly set --transitive + # to True, even though that's the default. See comment in the task code for details. + # TODO(benjy): Once --closure is removed, delete these three lines. + options = options or {} + if 'transitive' not in options: + options['transitive'] = True + if options: self.set_options(**options) - return self.create_task(self.context(target_roots=target_roots)) + return self.create_task(self.context(target_roots=target_roots, + for_subsystems=[PythonSetup, PythonRepos])) def test_noop(self): - python_eval = self._create_task(target_roots=[]) + python_eval = self._create_task(target_roots=[], options={'transitive': False}) compiled = python_eval.execute() self.assertEqual([], compiled) def test_compile(self): - python_eval = self._create_task(target_roots=[self.a_library]) + python_eval = self._create_task(target_roots=[self.a_library], options={'transitive': False}) compiled = python_eval.execute() self.assertEqual([self.a_library], compiled) - def test_skip(self): - self.set_options(skip=True) - python_eval = self._create_task(target_roots=[self.a_library]) - compiled = python_eval.execute() - self.assertIsNone(compiled) - def test_compile_incremental(self): - python_eval = self._create_task(target_roots=[self.a_library]) + python_eval = self._create_task(target_roots=[self.a_library], options={'transitive': False}) compiled = python_eval.execute() self.assertEqual([self.a_library], compiled) - python_eval = self._create_task(target_roots=[self.a_library]) + python_eval = self._create_task(target_roots=[self.a_library], options={'transitive': False}) compiled = python_eval.execute() self.assertEqual([], compiled) def test_compile_closure(self): - python_eval = self._create_task(target_roots=[self.d_library], options={'closure': True}) + python_eval = self._create_task(target_roots=[self.d_library]) compiled = python_eval.execute() self.assertEqual({self.d_library, self.a_library}, set(compiled)) def test_compile_fail_closure(self): - python_eval = self._create_task(target_roots=[self.b_library], options={'closure': True}) + python_eval = self._create_task(target_roots=[self.b_library]) with self.assertRaises(PythonEval.Error) as e: python_eval.execute() @@ -117,7 +121,7 @@ def test_compile_fail_closure(self): self.assertEqual([self.b_library], e.exception.failed) def test_compile_incremental_progress(self): - python_eval = self._create_task(target_roots=[self.b_library], options={'closure': True}) + python_eval = self._create_task(target_roots=[self.b_library]) with self.assertRaises(PythonEval.Error) as e: python_eval.execute() @@ -125,13 +129,13 @@ def test_compile_incremental_progress(self): self.assertEqual([self.b_library], e.exception.failed) self._create_graph(broken_b_library=False) - python_eval = self._create_task(target_roots=[self.b_library], options={'closure': True}) + python_eval = self._create_task(target_roots=[self.b_library]) compiled = python_eval.execute() self.assertEqual([self.b_library], compiled) def test_compile_fail_missing_build_dep(self): - python_eval = self._create_task(target_roots=[self.b_library]) + python_eval = self._create_task(target_roots=[self.b_library], options={'transitive': False}) with self.assertRaises(python_eval.Error) as e: python_eval.execute() @@ -139,7 +143,7 @@ def test_compile_fail_missing_build_dep(self): self.assertEqual([self.b_library], e.exception.failed) def test_compile_fail_compile_time_check_decorator(self): - python_eval = self._create_task(target_roots=[self.b_library]) + python_eval = self._create_task(target_roots=[self.b_library], options={'transitive': False}) with self.assertRaises(PythonEval.Error) as e: python_eval.execute() @@ -148,7 +152,7 @@ def test_compile_fail_compile_time_check_decorator(self): def test_compile_failslow(self): python_eval = self._create_task(target_roots=[self.a_library, self.b_library, self.d_library], - options={'fail_slow': True}) + options={'fail_slow': True, 'transitive': False}) with self.assertRaises(PythonEval.Error) as e: python_eval.execute() @@ -156,19 +160,19 @@ def test_compile_failslow(self): self.assertEqual([self.b_library], e.exception.failed) def test_entry_point_module(self): - python_eval = self._create_task(target_roots=[self.e_binary]) + python_eval = self._create_task(target_roots=[self.e_binary], options={'transitive': False}) compiled = python_eval.execute() self.assertEqual([self.e_binary], compiled) def test_entry_point_function(self): - python_eval = self._create_task(target_roots=[self.f_binary]) + python_eval = self._create_task(target_roots=[self.f_binary], options={'transitive': False}) compiled = python_eval.execute() self.assertEqual([self.f_binary], compiled) def test_entry_point_does_not_exist(self): - python_eval = self._create_task(target_roots=[self.g_binary]) + python_eval = self._create_task(target_roots=[self.g_binary], options={'transitive': False}) with self.assertRaises(PythonEval.Error) as e: python_eval.execute() @@ -176,7 +180,7 @@ def test_entry_point_does_not_exist(self): self.assertEqual([self.g_binary], e.exception.failed) def test_entry_point_missing_build_dep(self): - python_eval = self._create_task(target_roots=[self.h_binary]) + python_eval = self._create_task(target_roots=[self.h_binary], options={'transitive': False}) with self.assertRaises(PythonEval.Error) as e: python_eval.execute() diff --git a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/BUILD b/contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/BUILD deleted file mode 100644 index e9a772b6d60..00000000000 --- a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). -# Licensed under the Apache License, Version 2.0 (see LICENSE). - -python_tests( - name='python_eval', - sources=['test_python_eval.py'], - dependencies=[ - 'src/python/pants/backend/python/subsystems', - 'contrib/python/src/python/pants/contrib/python/checks/tasks2', - 'src/python/pants/python', - 'tests/python/pants_test/backend/python/tasks:python_task_test_base', - ] -) diff --git a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/test_python_eval.py b/contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/test_python_eval.py deleted file mode 100644 index f193519e4d0..00000000000 --- a/contrib/python/tests/python/pants_test/contrib/python/checks/tasks2/test_python_eval.py +++ /dev/null @@ -1,188 +0,0 @@ -# coding=utf-8 -# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). -# Licensed under the Apache License, Version 2.0 (see LICENSE). - -from __future__ import (absolute_import, division, generators, nested_scopes, print_function, - unicode_literals, with_statement) - -from textwrap import dedent - -from pants.backend.python.subsystems.python_setup import PythonSetup -from pants.python.python_repos import PythonRepos -from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase - -from pants.contrib.python.checks.tasks2.python_eval import PythonEval - - -class PythonEvalTest(PythonTaskTestBase): - - @classmethod - def task_type(cls): - return PythonEval - - def setUp(self): - super(PythonEvalTest, self).setUp() - self._create_graph(broken_b_library=True) - - def _create_graph(self, broken_b_library): - self.reset_build_graph() - - self.a_library = self.create_python_library('src/python/a', 'a', {'a.py': dedent(""" - import inspect - - - def compile_time_check_decorator(cls): - if not inspect.isclass(cls): - raise TypeError('This decorator can only be applied to classes, given {}'.format(cls)) - return cls - """)}) - - self.b_library = self.create_python_library('src/python/b', 'b', {'b.py': dedent(""" - from a.a import compile_time_check_decorator - - - @compile_time_check_decorator - class BarB(object): - pass - """)}) - - # TODO: Presumably this was supposed to be c_library, not override b_library. Unravel and fix. - self.b_library = self.create_python_library('src/python/c', 'c', {'c.py': dedent(""" - from a.a import compile_time_check_decorator - - - @compile_time_check_decorator - {}: - pass - """.format('def baz_c()' if broken_b_library else 'class BazC(object)') - )}, dependencies=['//src/python/a']) - - self.d_library = self.create_python_library('src/python/d', 'd', { 'd.py': dedent(""" - from a.a import compile_time_check_decorator - - - @compile_time_check_decorator - class BazD(object): - pass - """)}, dependencies=['//src/python/a']) - - self.e_binary = self.create_python_binary('src/python/e', 'e', 'a.a', - dependencies=['//src/python/a']) - self.f_binary = self.create_python_binary('src/python/f', 'f', - 'a.a:compile_time_check_decorator', - dependencies=['//src/python/a']) - self.g_binary = self.create_python_binary('src/python/g', 'g', 'a.a:does_not_exist', - dependencies=['//src/python/a']) - self.h_binary = self.create_python_binary('src/python/h', 'h', 'a.a') - - def _create_task(self, target_roots, options=None): - # Until the --closure options is removed, we must explicitly set --transitive - # to True, even though that's the default. See comment in the task code for details. - # TODO(benjy): Once --closure is removed, delete these three lines. - options = options or {} - if 'transitive' not in options: - options['transitive'] = True - - if options: - self.set_options(**options) - return self.create_task(self.context(target_roots=target_roots, - for_subsystems=[PythonSetup, PythonRepos])) - - def test_noop(self): - python_eval = self._create_task(target_roots=[], options={'transitive': False}) - compiled = python_eval.execute() - self.assertEqual([], compiled) - - def test_compile(self): - python_eval = self._create_task(target_roots=[self.a_library], options={'transitive': False}) - compiled = python_eval.execute() - self.assertEqual([self.a_library], compiled) - - def test_compile_incremental(self): - python_eval = self._create_task(target_roots=[self.a_library], options={'transitive': False}) - compiled = python_eval.execute() - self.assertEqual([self.a_library], compiled) - - python_eval = self._create_task(target_roots=[self.a_library], options={'transitive': False}) - compiled = python_eval.execute() - self.assertEqual([], compiled) - - def test_compile_closure(self): - python_eval = self._create_task(target_roots=[self.d_library]) - compiled = python_eval.execute() - self.assertEqual({self.d_library, self.a_library}, set(compiled)) - - def test_compile_fail_closure(self): - python_eval = self._create_task(target_roots=[self.b_library]) - - with self.assertRaises(PythonEval.Error) as e: - python_eval.execute() - self.assertEqual([self.a_library], e.exception.compiled) - self.assertEqual([self.b_library], e.exception.failed) - - def test_compile_incremental_progress(self): - python_eval = self._create_task(target_roots=[self.b_library]) - - with self.assertRaises(PythonEval.Error) as e: - python_eval.execute() - self.assertEqual([self.a_library], e.exception.compiled) - self.assertEqual([self.b_library], e.exception.failed) - - self._create_graph(broken_b_library=False) - python_eval = self._create_task(target_roots=[self.b_library]) - - compiled = python_eval.execute() - self.assertEqual([self.b_library], compiled) - - def test_compile_fail_missing_build_dep(self): - python_eval = self._create_task(target_roots=[self.b_library], options={'transitive': False}) - - with self.assertRaises(python_eval.Error) as e: - python_eval.execute() - self.assertEqual([], e.exception.compiled) - self.assertEqual([self.b_library], e.exception.failed) - - def test_compile_fail_compile_time_check_decorator(self): - python_eval = self._create_task(target_roots=[self.b_library], options={'transitive': False}) - - with self.assertRaises(PythonEval.Error) as e: - python_eval.execute() - self.assertEqual([], e.exception.compiled) - self.assertEqual([self.b_library], e.exception.failed) - - def test_compile_failslow(self): - python_eval = self._create_task(target_roots=[self.a_library, self.b_library, self.d_library], - options={'fail_slow': True, 'transitive': False}) - - with self.assertRaises(PythonEval.Error) as e: - python_eval.execute() - self.assertEqual({self.a_library, self.d_library}, set(e.exception.compiled)) - self.assertEqual([self.b_library], e.exception.failed) - - def test_entry_point_module(self): - python_eval = self._create_task(target_roots=[self.e_binary], options={'transitive': False}) - - compiled = python_eval.execute() - self.assertEqual([self.e_binary], compiled) - - def test_entry_point_function(self): - python_eval = self._create_task(target_roots=[self.f_binary], options={'transitive': False}) - - compiled = python_eval.execute() - self.assertEqual([self.f_binary], compiled) - - def test_entry_point_does_not_exist(self): - python_eval = self._create_task(target_roots=[self.g_binary], options={'transitive': False}) - - with self.assertRaises(PythonEval.Error) as e: - python_eval.execute() - self.assertEqual([], e.exception.compiled) - self.assertEqual([self.g_binary], e.exception.failed) - - def test_entry_point_missing_build_dep(self): - python_eval = self._create_task(target_roots=[self.h_binary], options={'transitive': False}) - - with self.assertRaises(PythonEval.Error) as e: - python_eval.execute() - self.assertEqual([], e.exception.compiled) - self.assertEqual([self.h_binary], e.exception.failed) From f2b8c21f69eb6e62cbfdd43681ddb69317a63095 Mon Sep 17 00:00:00 2001 From: John Sirois Date: Thu, 1 Feb 2018 13:17:24 -0800 Subject: [PATCH 04/21] Simplify `JUnitRun` internals. (#5410) In addition to simplifying some code this helps pull `JUnitRun` into the same shape as `PytestRun` to allow factoring out their shared and tricky code supporting `--fast`/`--no-fast` and `--chroot` options. Work towards #5073 and #5307. --- .../pants/backend/jvm/tasks/junit_run.py | 33 +++++++++---------- .../backend/jvm/tasks/test_junit_run.py | 3 +- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/python/pants/backend/jvm/tasks/junit_run.py b/src/python/pants/backend/jvm/tasks/junit_run.py index 5a9db8fd5d9..8ea48415f33 100644 --- a/src/python/pants/backend/jvm/tasks/junit_run.py +++ b/src/python/pants/backend/jvm/tasks/junit_run.py @@ -406,7 +406,11 @@ def _per_target(self): def _batched(self): return self._batch_size != self._BATCH_ALL - def _run_junit(self, test_registry, output_dir, coverage): + def _run_junit(self, test_targets, output_dir, coverage): + test_registry = self._collect_test_targets(test_targets) + if test_registry.empty: + return TestResult.rc(0) + coverage.instrument(output_dir) def parse_error_handler(parse_error): @@ -590,7 +594,8 @@ def _iter_partitions(self, targets, output_dir): for target in targets: yield (target,), os.path.join(output_dir, target.id) else: - yield tuple(targets), output_dir + if targets: + yield tuple(targets), output_dir def _execute(self, all_targets): with self._isolation(all_targets) as (output_dir, reports, coverage): @@ -599,7 +604,9 @@ def _execute(self, all_targets): for (partition, partition_output_dir) in self._iter_partitions(self._get_test_targets(), output_dir): try: - rv = self._run_partition(partition, partition_output_dir, coverage) + rv = self._run_partition(test_targets=partition, + output_dir=partition_output_dir, + coverage=coverage) except ErrorWhileTesting as e: rv = TestResult.from_error(e) @@ -642,22 +649,14 @@ def _execute(self, all_targets): if error: raise error - def _run_partition(self, targets, output_dir, coverage): - with self.invalidated(targets=targets, + def _run_partition(self, test_targets, output_dir, coverage): + with self.invalidated(targets=test_targets, # Re-run tests when the code they test (and depend on) changes. invalidate_dependents=True) as invalidation_check: - all_test_tgts, invalid_test_tgts = [], [] - is_test_target = self._test_target_filter() - for vts in invalidation_check.all_vts: - test_targets = [tgt for tgt in vts.targets if is_test_target(tgt)] - all_test_tgts.extend(test_targets) - if not vts.valid: - invalid_test_tgts.extend(test_targets) - - test_registry = self._collect_test_targets(invalid_test_tgts) - if test_registry.empty: - return TestResult.rc(0) + invalid_test_tgts = [invalid_test_tgt + for vts in invalidation_check.invalid_vts + for invalid_test_tgt in vts.targets] # Processing proceeds through: # 1.) output -> output_dir @@ -665,7 +664,7 @@ def _run_partition(self, targets, output_dir, coverage): # 3.) [iff invalid == 0 and all > 0] cache -> workdir: Done transparently by `invalidated`. # 1.) Write all results that will be potentially cached to output_dir. - result = self._run_junit(test_registry, output_dir, coverage).checked() + result = self._run_junit(invalid_test_tgts, output_dir, coverage).checked() cache_vts = self._vts_for_partition(invalidation_check) if invalidation_check.all_vts == invalidation_check.invalid_vts: diff --git a/tests/python/pants_test/backend/jvm/tasks/test_junit_run.py b/tests/python/pants_test/backend/jvm/tasks/test_junit_run.py index 241f50bfd33..10c2a110ef5 100644 --- a/tests/python/pants_test/backend/jvm/tasks/test_junit_run.py +++ b/tests/python/pants_test/backend/jvm/tasks/test_junit_run.py @@ -195,8 +195,7 @@ def test_empty_sources(self): r'must include a non-empty set of sources'): task.execute() - # We should skip the execution (and caching) phase when there are no test sources. - @ensure_cached(JUnitRun, expected_num_artifacts=0) + @ensure_cached(JUnitRun, expected_num_artifacts=1) def test_allow_empty_sources(self): self.add_to_build_file('foo', dedent(""" junit_tests( From 30015e7669fcefc1ccbd0a76f915813725fd65ce Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 1 Feb 2018 14:33:19 -0800 Subject: [PATCH 05/21] Console tasks can output nothing without erroring (#5412) --- src/python/pants/task/console_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/pants/task/console_task.py b/src/python/pants/task/console_task.py index adfcf926b12..058523ac659 100644 --- a/src/python/pants/task/console_task.py +++ b/src/python/pants/task/console_task.py @@ -54,7 +54,7 @@ def execute(self): with self._guard_sigpipe(): try: targets = self.context.targets() - for value in self.console_output(targets): + for value in self.console_output(targets) or tuple(): self._outstream.write(value.encode('utf-8')) self._outstream.write(self._console_separator) finally: From 609866760e22030d0835aaff74e3cdab5a6ecc1f Mon Sep 17 00:00:00 2001 From: John Sirois Date: Thu, 1 Feb 2018 14:46:01 -0800 Subject: [PATCH 06/21] Set the log level when capturing logs in tests. (#5418) This allows the test writer to ensure their log statements fire when expected in the face of non-local manipulation of the global python logging subsystem. Fixes #5417 --- tests/python/pants_test/base_test.py | 17 ++++++++++++----- .../pants_test/pantsd/test_process_manager.py | 3 ++- tests/python/pants_test/util/test_osutil.py | 6 ++++-- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/tests/python/pants_test/base_test.py b/tests/python/pants_test/base_test.py index 852cd2a5a27..596d1f768a2 100644 --- a/tests/python/pants_test/base_test.py +++ b/tests/python/pants_test/base_test.py @@ -483,9 +483,16 @@ def warnings(self): return self._messages_for_level('WARNING') @contextmanager - def captured_logging(self): + def captured_logging(self, level=None): + root_logger = logging.getLogger() + + old_level = root_logger.level + root_logger.setLevel(level or logging.NOTSET) + handler = self.LoggingRecorder() - logger = logging.getLogger('') - logger.addHandler(handler) - yield handler - logger.removeHandler(handler) + root_logger.addHandler(handler) + try: + yield handler + finally: + root_logger.setLevel(old_level) + root_logger.removeHandler(handler) diff --git a/tests/python/pants_test/pantsd/test_process_manager.py b/tests/python/pants_test/pantsd/test_process_manager.py index ba7abdfbb2e..adcee500f57 100644 --- a/tests/python/pants_test/pantsd/test_process_manager.py +++ b/tests/python/pants_test/pantsd/test_process_manager.py @@ -6,6 +6,7 @@ unicode_literals, with_statement) import errno +import logging import os import sys from contextlib import contextmanager @@ -141,7 +142,7 @@ def test_readwrite_metadata_by_name(self): def test_deadline_until(self): with self.assertRaises(self.pmm.Timeout): - with self.captured_logging() as captured: + with self.captured_logging(logging.INFO) as captured: self.pmm._deadline_until(lambda: False, 'the impossible', timeout=.5, info_interval=.1) self.assertTrue(4 <= len(captured.infos()) <= 6, 'Expected between 4 and 6 infos, got: {}'.format(captured.infos())) diff --git a/tests/python/pants_test/util/test_osutil.py b/tests/python/pants_test/util/test_osutil.py index c9379470258..320a4cd1b37 100644 --- a/tests/python/pants_test/util/test_osutil.py +++ b/tests/python/pants_test/util/test_osutil.py @@ -5,6 +5,8 @@ from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) +import logging + from pants.util.osutil import OS_ALIASES, known_os_names, normalize_os_name from pants_test.base_test import BaseTest @@ -22,14 +24,14 @@ def test_keys_in_aliases(self): def test_no_warnings_on_known_names(self): for name in known_os_names(): - with self.captured_logging() as captured: + with self.captured_logging(logging.WARNING) as captured: normalize_os_name(name) self.assertEqual(0, len(captured.warnings()), 'Recieved unexpected warnings: {}'.format(captured.warnings())) def test_warnings_on_unknown_names(self): name = 'I really hope no one ever names an operating system with this string.' - with self.captured_logging() as captured: + with self.captured_logging(logging.WARNING) as captured: normalize_os_name(name) self.assertEqual(1, len(captured.warnings()), 'Expected exactly one warning, but got: {}'.format(captured.warnings())) From a6a98fea5e37788fdb52d0791111802167f72ed5 Mon Sep 17 00:00:00 2001 From: John Sirois Date: Thu, 1 Feb 2018 15:50:15 -0800 Subject: [PATCH 07/21] Factor up shared test partitioning code. (#5416) This factors the handling of `--fast`/`--no-fast` partitioning as well as the attendant results summarization and caching and invalidation handling to a mixin that `PytestRun` and `JUnitRun` share. Fixes #5307 --- .../pants/backend/jvm/tasks/junit_run.py | 168 +++---------- .../pants/backend/python/tasks/pytest_run.py | 208 +++------------- .../pants/task/testrunner_task_mixin.py | 231 ++++++++++++++++++ 3 files changed, 305 insertions(+), 302 deletions(-) diff --git a/src/python/pants/backend/jvm/tasks/junit_run.py b/src/python/pants/backend/jvm/tasks/junit_run.py index 8ea48415f33..3ee60582133 100644 --- a/src/python/pants/backend/jvm/tasks/junit_run.py +++ b/src/python/pants/backend/jvm/tasks/junit_run.py @@ -29,17 +29,16 @@ from pants.backend.jvm.tasks.reports.junit_html_report import JUnitHtmlReport, NoJunitHtmlReport from pants.base.build_environment import get_buildroot from pants.base.deprecated import deprecated_conditional -from pants.base.exceptions import ErrorWhileTesting, TargetDefinitionException, TaskError +from pants.base.exceptions import TargetDefinitionException, TaskError from pants.base.workunit import WorkUnitLabel from pants.build_graph.files import Files from pants.build_graph.target import Target from pants.build_graph.target_scopes import Scopes -from pants.invalidation.cache_manager import VersionedTargetSet from pants.java.distribution.distribution import DistributionLocator from pants.java.executor import SubprocessExecutor from pants.java.junit.junit_xml_parser import RegistryOfTests, Test, parse_failed_targets from pants.process.lock import OwnerPrintingInterProcessFileLock -from pants.task.testrunner_task_mixin import TestResult, TestRunnerTaskMixin +from pants.task.testrunner_task_mixin import PartitionedTestRunnerTaskMixin, TestResult from pants.util import desktop from pants.util.argutil import ensure_arg, remove_arg from pants.util.contextutil import environment_as, temporary_dir @@ -131,7 +130,7 @@ def iter_possible_tests(self, context): yield Test(classname=self._classname, methodname=self._methodname) -class JUnitRun(TestRunnerTaskMixin, JvmToolTaskMixin, JvmTask): +class JUnitRun(PartitionedTestRunnerTaskMixin, JvmToolTaskMixin, JvmTask): """ :API: public """ @@ -146,10 +145,6 @@ def implementation_version(cls): def register_options(cls, register): super(JUnitRun, cls).register_options(register) - register('--fast', type=bool, default=True, fingerprint=True, - help='Run all tests in a single junit invocation. If turned off, each test target ' - 'will run in its own junit invocation, which will be slower, but isolates ' - 'tests from process-wide state created by tests in other targets.') register('--batch-size', advanced=True, type=int, default=cls._BATCH_ALL, fingerprint=True, help='Run at most this many tests in a single test process.') register('--test', type=list, fingerprint=True, @@ -174,11 +169,6 @@ def register_options(cls, register): help='Set the working directory. If no argument is passed, use the build root. ' 'If cwd is set on a target, it will supersede this option. It is an error to ' 'use this option in combination with `--chroot`') - register('--chroot', advanced=True, fingerprint=True, type=bool, default=False, - help='Run tests in a chroot. Any loose files tests depend on via `{}` dependencies ' - 'will be copied to the chroot. If cwd is set on a target, it will supersede this' - 'option. It is an error to use this option in combination with `--cwd`' - .format(Files.alias())) register('--strict-jvm-version', type=bool, advanced=True, fingerprint=True, help='If true, will strictly require running junits with the same version of java as ' 'the platform -target level. Otherwise, the platform -target level will be ' @@ -234,13 +224,12 @@ def __init__(self, *args, **kwargs): options = self.get_options() self._tests_to_run = options.test self._batch_size = options.batch_size - self._fail_fast = options.fail_fast - if options.cwd and options.chroot: + if options.cwd and self.run_tests_in_chroot: raise self.OptionError('Cannot set both `cwd` ({}) and ask for a `chroot` at the same time.' .format(options.cwd)) - if options.chroot: + if self.run_tests_in_chroot: self._working_dir = None else: self._working_dir = options.cwd or get_buildroot() @@ -252,7 +241,7 @@ def __init__(self, *args, **kwargs): self._legacy_report_layout = options.legacy_report_layout @memoized_method - def _args(self, output_dir): + def _args(self, fail_fast, output_dir): args = self.args[:] options = self.get_options() @@ -263,7 +252,7 @@ def _args(self, output_dir): else: args.append('-output-mode=NONE') - if self._fail_fast: + if fail_fast: args.append('-fail-fast') args.append('-outdir') args.append(output_dir) @@ -398,15 +387,11 @@ def _chroot(self, targets, workdir): ) yield chroot - @property - def _per_target(self): - return not self.get_options().fast - @property def _batched(self): return self._batch_size != self._BATCH_ALL - def _run_junit(self, test_targets, output_dir, coverage): + def run_tests(self, fail_fast, test_targets, output_dir, coverage): test_registry = self._collect_test_targets(test_targets) if test_registry.empty: return TestResult.rc(0) @@ -447,7 +432,7 @@ def parse_error_handler(parse_error): distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version) # Override cmdline args with values from junit_test() target that specify concurrency: - args = self._args(batch_output_dir) + [u'-xmlreport'] + args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport'] if concurrency is not None: args = remove_arg(args, '-default-parallel') @@ -495,7 +480,7 @@ def parse_error_handler(parse_error): self.report_all_info_for_single_test(self.options_scope, test_target, test_name, test_info) - if result != 0 and self._fail_fast: + if result != 0 and fail_fast: break if result == 0: @@ -573,119 +558,34 @@ def _validate_target(self, target): msg = 'JUnitTests target must include a non-empty set of sources.' raise TargetDefinitionException(target, msg) - @staticmethod - def _vts_for_partition(invalidation_check): - return VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts) - - def check_artifact_cache_for(self, invalidation_check): - # We generate artifacts, namely coverage reports, that cover the full target set. - return [self._vts_for_partition(invalidation_check)] - - @staticmethod - def _collect_files(directory): + def collect_files(self, output_dir, coverage): def files_iter(): - for dir_path, _, file_names in os.walk(directory): + for dir_path, _, file_names in os.walk(output_dir): for filename in file_names: yield os.path.join(dir_path, filename) return list(files_iter()) - def _iter_partitions(self, targets, output_dir): - if self._per_target: - for target in targets: - yield (target,), os.path.join(output_dir, target.id) - else: - if targets: - yield tuple(targets), output_dir - - def _execute(self, all_targets): - with self._isolation(all_targets) as (output_dir, reports, coverage): - results = {} - failure = False - for (partition, partition_output_dir) in self._iter_partitions(self._get_test_targets(), - output_dir): - try: - rv = self._run_partition(test_targets=partition, - output_dir=partition_output_dir, - coverage=coverage) - except ErrorWhileTesting as e: - rv = TestResult.from_error(e) - - results[partition] = rv - if not rv.success: - failure = True - if self._fail_fast: - break - - for partition in sorted(results): - rv = results[partition] - if len(partition) == 1 or rv.success: - log = self.context.log.info if rv.success else self.context.log.error - for target in partition: - log('{0:80}.....{1:>10}'.format(target.address.reference(), rv)) - else: - # There is not much useful we can display in summary for a multi-target partition with - # failures without parsing those failures to link them to individual targets; ie: targets - # 2 and 8 failed in this partition of 10 targets. - # TODO(John Sirois): Punting here works since we have in practice just 2 partitionings: - # 1. All targets in singleton partitions - # 2. All targets in 1 partition - # If we get to the point where we have multiple partitions with multiple targets, some - # sort of summary for the multi-target partitions will probably be needed. - pass - - msgs = [str(_rv) for _rv in results.values() if not _rv.success] - failed_targets = [target - for _rv in results.values() if not _rv.success - for target in _rv.failed_targets] - if len(failed_targets) > 0: - error = ErrorWhileTesting('\n'.join(msgs), failed_targets=failed_targets) - elif failure: - # A low-level test execution failure occurred before tests were run. - error = TaskError() - else: - error = None - - reports.generate(output_dir, exc=error) - if error: - raise error - - def _run_partition(self, test_targets, output_dir, coverage): - with self.invalidated(targets=test_targets, - # Re-run tests when the code they test (and depend on) changes. - invalidate_dependents=True) as invalidation_check: - - invalid_test_tgts = [invalid_test_tgt - for vts in invalidation_check.invalid_vts - for invalid_test_tgt in vts.targets] - - # Processing proceeds through: - # 1.) output -> output_dir - # 2.) [iff all == invalid] output_dir -> cache: We do this manually for now. - # 3.) [iff invalid == 0 and all > 0] cache -> workdir: Done transparently by `invalidated`. - - # 1.) Write all results that will be potentially cached to output_dir. - result = self._run_junit(invalid_test_tgts, output_dir, coverage).checked() - - cache_vts = self._vts_for_partition(invalidation_check) - if invalidation_check.all_vts == invalidation_check.invalid_vts: - # 2.) All tests in the partition were invalid, cache successful test results. - if result.success and self.artifact_cache_writes_enabled(): - self.update_artifact_cache([(cache_vts, self._collect_files(output_dir))]) - elif not invalidation_check.invalid_vts: - # 3.) The full partition was valid, our results will have been staged for/by caching - # if not already local. - pass + @contextmanager + def partitions(self, per_target, all_targets, test_targets): + with self._isolation(per_target, all_targets) as (output_dir, reports, coverage): + if per_target: + def iter_partitions(): + for test_target in test_targets: + partition = (test_target,) + args = (os.path.join(output_dir, test_target.id), coverage) + yield partition, args else: - # The partition was partially invalid. - - # We don't cache results; so others will need to re-run this partition. - # NB: We will presumably commit this change now though and so others will get this - # partition in a state that executes successfully; so when the 1st of the others - # executes against this partition; they will hit `all_vts == invalid_vts` and - # cache the results. That 1st of others is hopefully CI! - cache_vts.force_invalidate() - - return result + def iter_partitions(): + if test_targets: + partition = tuple(test_targets) + args = (output_dir, coverage) + yield partition, args + + try: + yield iter_partitions + finally: + _, error, _ = sys.exc_info() + reports.generate(output_dir, exc=error) class Reports(object): def __init__(self, junit_html_report, coverage): @@ -707,9 +607,9 @@ def _maybe_open_report(self, report_file_path): raise TaskError(e) @contextmanager - def _isolation(self, all_targets): + def _isolation(self, per_target, all_targets): run_dir = '_runs' - mode_dir = 'isolated' if self._per_target else 'combined' + mode_dir = 'isolated' if per_target else 'combined' batch_dir = str(self._batch_size) if self._batched else 'all' output_dir = os.path.join(self.workdir, run_dir, diff --git a/src/python/pants/backend/python/tasks/pytest_run.py b/src/python/pants/backend/python/tasks/pytest_run.py index dc41fab548f..a05fe72fe10 100644 --- a/src/python/pants/backend/python/tasks/pytest_run.py +++ b/src/python/pants/backend/python/tasks/pytest_run.py @@ -27,11 +27,9 @@ from pants.base.fingerprint_strategy import DefaultFingerprintStrategy from pants.base.hash_utils import Sharder from pants.base.workunit import WorkUnitLabel -from pants.build_graph.files import Files from pants.build_graph.target import Target -from pants.invalidation.cache_manager import VersionedTargetSet from pants.task.task import Task -from pants.task.testrunner_task_mixin import TestResult, TestRunnerTaskMixin +from pants.task.testrunner_task_mixin import PartitionedTestRunnerTaskMixin, TestResult from pants.util.contextutil import pushd, temporary_dir, temporary_file from pants.util.dirutil import mergetree, safe_mkdir, safe_mkdir_for from pants.util.memo import memoized_method, memoized_property @@ -89,7 +87,7 @@ def _map_exit_code(cls, value): return 0 if value in cls._SUCCESS_EXIT_CODES else value -class PytestRun(TestRunnerTaskMixin, Task): +class PytestRun(PartitionedTestRunnerTaskMixin, Task): @classmethod def implementation_version(cls): @@ -98,15 +96,6 @@ def implementation_version(cls): @classmethod def register_options(cls, register): super(PytestRun, cls).register_options(register) - register('--fast', type=bool, default=True, fingerprint=True, - help='Run all tests in a single pytest invocation. If turned off, each test target ' - 'will run in its own pytest invocation, which will be slower, but isolates ' - 'tests from process-wide state created by tests in other targets.') - - register('--chroot', advanced=True, fingerprint=True, type=bool, default=False, - help='Run tests in a chroot. Any loose files tests depend on via `{}` dependencies ' - 'will be copied to the chroot.' - .format(Files.alias())) # NB: We always produce junit xml privately, and if this option is specified, we then copy # it to the user-specified directory, post any interaction with the cache to retrieve the @@ -157,12 +146,6 @@ def target_filter(target): def _validate_target(self, target): pass - def _execute(self, all_targets): - test_targets = self._get_test_targets() - if test_targets: - self.context.release_lock() - self._run_tests(test_targets) - class InvalidShardSpecification(TaskError): """Indicates an invalid `--test-shard` option.""" @@ -504,72 +487,34 @@ def _get_target_from_test(self, test_info, targets): file_info = test_info['file'] return relsrc_to_target.get(file_info) - def _iter_partitions(self, targets): - # TODO(John Sirois): Consume `py.test` pexes matched to the partitioning in effect after - # https://github.com/pantsbuild/pants/pull/4638 lands. - if self.get_options().fast: + @contextmanager + def partitions(self, per_target, all_targets, test_targets): + if per_target: + def iter_partitions(): + for test_target in test_targets: + yield (test_target,) + else: targets_by_target_base = OrderedDict() - for target in targets: - targets_for_base = targets_by_target_base.get(target.target_base) + for test_target in test_targets: + targets_for_base = targets_by_target_base.get(test_target.target_base) if targets_for_base is None: targets_for_base = [] - targets_by_target_base[target.target_base] = targets_for_base - targets_for_base.append(target) - for targets in targets_by_target_base.values(): - yield tuple(targets) - else: - for target in targets: - yield (target,) + targets_by_target_base[test_target.target_base] = targets_for_base + targets_for_base.append(test_target) - def _run_tests(self, targets): - results = {} - failure = False - for partition in self._iter_partitions(targets): - try: - rv = self._do_run_tests(partition) - except ErrorWhileTesting as e: - rv = PytestResult.from_error(e) - results[partition] = rv - if not rv.success: - failure = True - if self.get_options().fail_fast: - break - - for partition in sorted(results): - rv = results[partition] - if len(partition) == 1 or rv.success: - log = self.context.log.info if rv.success else self.context.log.error - for target in partition: - log('{0:80}.....{1:>10}'.format(target.address.reference(), rv)) - else: - # There is not much useful we can display in summary for a multi-target partition with - # failures without parsing those failures to link them to individual targets; ie: targets - # 2 and 8 failed in this partition of 10 targets. - # TODO(John Sirois): Punting here works since we have in practice just 2 partitionings: - # 1. All targets in singleton partitions - # 2. All targets in 1 partition - # If we get to the point where we have multiple partitions with multiple targets, some sort - # of summary for the multi-target partitions will probably be needed. - pass - - failed_targets = [target - for _rv in results.values() if not _rv.success - for target in _rv.failed_targets] - if failed_targets: - raise ErrorWhileTesting(failed_targets=failed_targets) - elif failure: - # A low-level test execution failure occurred before tests were run. - raise TaskError() + def iter_partitions(): + for test_targets in targets_by_target_base.values(): + yield tuple(test_targets) - @staticmethod - def _vts_for_partition(invalidation_check): - return VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts) + workdir = self.workdir - def check_artifact_cache_for(self, invalidation_check): - # We generate artifacts, namely junit.xml and coverage reports, that cover the full target set - # whether that is all targets in the context (`--fast`) or each target - # individually (`--no-fast`). - return [self._vts_for_partition(invalidation_check)] + def iter_partitions_with_args(): + for partition in iter_partitions(): + workdirs = _Workdirs.for_partition(workdir, partition) + args = (workdirs,) + yield partition, args + + yield iter_partitions_with_args # TODO(John Sirois): Its probably worth generalizing a means to mark certain options or target # attributes as making results un-cacheable. See: https://github.com/pantsbuild/pants/issues/4748 @@ -577,7 +522,7 @@ class NeverCacheFingerprintStrategy(DefaultFingerprintStrategy): def compute_fingerprint(self, target): return uuid.uuid4() - def _fingerprint_strategy(self): + def fingerprint_strategy(self): if self.get_options().profile: # A profile is machine-specific and we assume anyone wanting a profile wants to run it here # and now and not accept some old result, even if on the same inputs. @@ -585,79 +530,19 @@ def _fingerprint_strategy(self): else: return None # Accept the default fingerprint strategy. - # Some notes on invalidation vs caching as used in `_do_run_tests` below. Here invalidation - # refers to executing task work in `Task.invalidated` blocks against invalid targets. Caching - # refers to storing the results of that work in the artifact cache using - # `VersionedTargetSet.results_dir`. One further bit of terminology is partition, which is the - # name for the set of targets passed to the `Task.invalidated` block: - # - # + Caching results for len(partition) > 1: This is trivial iff we always run all targets in - # the partition, but running just invalid targets in the partition is a nicer experience (you - # can whittle away at failures in a loop of `::`-style runs). Running just invalid though - # requires being able to merge prior results for the partition; ie: knowing the details of - # junit xml, coverage data, or using tools that do, to merge data files. The alternative is - # to always run all targets in a partition if even 1 target is invalid. In this way data files - # corresponding to the full partition are always generated, and so on a green partition, the - # cached data files will always represent the full green run. - # - # The compromise taken here is to only cache when `all_vts == invalid_vts`; ie when the partition - # goes green and the run was against the full partition. A common scenario would then be: - # - # 1. Mary makes changes / adds new code and iterates `./pants test tests/python/stuff::` - # gradually getting greener until finally all test targets in the `tests/python/stuff::` set - # pass. She commits the green change, but there is no cached result for it since green state - # for the partition was approached incrementally. - # 2. Jake pulls in Mary's green change and runs `./pants test tests/python/stuff::`. There is a - # cache miss and he does a full local run, but since `tests/python/stuff::` is green, - # `all_vts == invalid_vts` and the result is now cached for others. - # - # In this scenario, Jake will likely be a CI process, in which case human others will see a - # cached result from Mary's commit. It's important to note, that the CI process must run the same - # partition as the end user for that end user to benefit and hit the cache. This is unlikely since - # the only natural partitions under CI are single target ones (`--no-fast` or all targets - # `--fast ::`. Its unlikely an end user in a large repo will want to run `--fast ::` since `::` - # is probably a much wider swath of code than they're working on. As such, although `--fast` - # caching is supported, its unlikely to be effective. Caching is best utilized when CI and users - # run `--no-fast`. - def _do_run_tests(self, partition): - with self.invalidated(partition, - fingerprint_strategy=self._fingerprint_strategy(), - # Re-run tests when the code they test (and depend on) changes. - invalidate_dependents=True) as invalidation_check: - - invalid_tgts = [invalid_tgt - for vts in invalidation_check.invalid_vts - for invalid_tgt in vts.targets] - - # Processing proceeds through: - # 1.) output -> workdir - # 2.) [iff all == invalid] workdir -> cache: We do this manually for now. - # 3.) [iff invalid == 0 and all > 0] cache -> workdir: Done transparently by `invalidated`. - - # 1.) Write all results that will be potentially cached to workdir. - workdirs = _Workdirs.for_partition(self.workdir, partition) - result = self._run_pytest_checked(workdirs, invalid_tgts) - - cache_vts = self._vts_for_partition(invalidation_check) - if invalidation_check.all_vts == invalidation_check.invalid_vts: - # 2.) The full partition was invalid, cache successful test results. - if result.success and self.artifact_cache_writes_enabled(): - self.update_artifact_cache([(cache_vts, workdirs.files())]) - elif not invalidation_check.invalid_vts: - # 3.) The full partition was valid, our results will have been staged for/by caching if not - # already local. - pass - else: - # The partition was partially invalid. + def run_tests(self, fail_fast, test_targets, workdirs): + try: + return self._run_pytest(fail_fast, test_targets, workdirs) + finally: + # Unconditionally pluck any results that an end user might need to interact with from the + # workdir to the locations they expect. + self._expose_results(test_targets, workdirs) - # We don't cache results; so others will need to re-run this partition. - # NB: We will presumably commit this change now though and so others will get this - # partition in a state that executes successfully; so when the 1st of the others - # executes against this partition; they will hit `all_vts == invalid_vts` and - # cache the results. That 1st of others is hopefully CI! - cache_vts.force_invalidate() + def result_from_error(self, error): + return PytestResult.from_error(error) - return result + def collect_files(self, workdirs): + return workdirs.files() def _expose_results(self, invalid_tgts, workdirs): external_junit_xml_dir = self.get_options().junit_xml_dir @@ -680,20 +565,11 @@ def _expose_results(self, invalid_tgts, workdirs): target_dir = os.path.join(pants_distdir, 'coverage', relpath) mergetree(workdirs.coverage_path, target_dir) - def _run_pytest_checked(self, workdirs, targets): - result = self._run_pytest(workdirs, targets) - - # Unconditionally pluck any results that an end user might need to interact with from the - # workdir to the locations they expect. - self._expose_results(targets, workdirs) - - return result.checked() - - def _run_pytest(self, workdirs, targets): + def _run_pytest(self, fail_fast, targets, workdirs): if not targets: return PytestResult.rc(0) - if self._run_in_chroot: + if self.run_tests_in_chroot: path_func = lambda rel_src: rel_src else: source_chroot = os.path.relpath(self._source_chroot_path(targets), get_buildroot()) @@ -721,7 +597,7 @@ def _run_pytest(self, workdirs, targets): # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726 args = ['--junitxml', junitxml_path, '--confcutdir', get_buildroot(), '--continue-on-collection-errors'] - if self.get_options().fail_fast: + if fail_fast: args.extend(['-x']) if self._debug: args.extend(['-s']) @@ -778,13 +654,9 @@ def _pex_run(self, pex, workunit_name, args, env): process = self._spawn(pex, workunit, args, setsid=False, env=env) return process.wait() - @property - def _run_in_chroot(self): - return self.get_options().chroot - @contextmanager def _maybe_run_in_chroot(self, targets): - if self._run_in_chroot: + if self.run_tests_in_chroot: with pushd(self._source_chroot_path(targets)): yield else: diff --git a/src/python/pants/task/testrunner_task_mixin.py b/src/python/pants/task/testrunner_task_mixin.py index eff8d94e0ce..28f14f9cec0 100644 --- a/src/python/pants/task/testrunner_task_mixin.py +++ b/src/python/pants/task/testrunner_task_mixin.py @@ -12,6 +12,9 @@ from threading import Timer from pants.base.exceptions import ErrorWhileTesting, TaskError +from pants.build_graph.files import Files +from pants.invalidation.cache_manager import VersionedTargetSet +from pants.task.task import Task from pants.util.process_handler import subprocess @@ -377,3 +380,231 @@ def _execute(self, all_targets): :param all_targets: list of the targets whose tests are to be run """ raise NotImplementedError + + +class PartitionedTestRunnerTaskMixin(TestRunnerTaskMixin, Task): + """A mixin for test tasks that support running tests over both individual targets and batches. + + Provides support for partitioning via `--fast` (batches) and `--no-fast` (per target) options and + helps ensure correct caching behavior in either mode. + + It's expected that mixees implement proper chrooting (see `run_tests_in_chroot`) to support + correct successful test result caching. + """ + + @classmethod + def register_options(cls, register): + super(PartitionedTestRunnerTaskMixin, cls).register_options(register) + + # TODO(John Sirois): Implement sanity checks on options wrt caching: + # https://github.com/pantsbuild/pants/issues/5073 + + register('--fast', type=bool, default=True, fingerprint=True, + help='Run all tests in a single pytest invocation. If turned off, each test target ' + 'will run in its own pytest invocation, which will be slower, but isolates ' + 'tests from process-wide state created by tests in other targets.') + register('--chroot', advanced=True, fingerprint=True, type=bool, default=False, + help='Run tests in a chroot. Any loose files tests depend on via `{}` dependencies ' + 'will be copied to the chroot.' + .format(Files.alias())) + + @staticmethod + def _vts_for_partition(invalidation_check): + return VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts) + + def check_artifact_cache_for(self, invalidation_check): + # Tests generate artifacts, namely junit.xml and coverage reports, that cover the full target + # set whether that is all targets in the context (`--fast`) or each target individually + # (`--no-fast`). + return [self._vts_for_partition(invalidation_check)] + + @property + def run_tests_in_chroot(self): + """Return `True` if tests should be run in a chroot. + + Chrooted tests are expected to be run with $PWD set to a directory with only files explicitly + (transitively) depended on by the test targets present. + + :rtype: bool + """ + return self.get_options().chroot + + def _execute(self, all_targets): + test_targets = self._get_test_targets() + if not test_targets: + return + + self.context.release_lock() + + per_target = not self.get_options().fast + fail_fast = self.get_options().fail_fast + + results = {} + failure = False + with self.partitions(per_target, all_targets, test_targets) as partitions: + for (partition, args) in partitions(): + try: + rv = self._run_partition(fail_fast, partition, *args) + except ErrorWhileTesting as e: + rv = self.result_from_error(e) + + results[partition] = rv + if not rv.success: + failure = True + if fail_fast: + break + + for partition in sorted(results): + rv = results[partition] + if len(partition) == 1 or rv.success: + log = self.context.log.info if rv.success else self.context.log.error + for target in partition: + log('{0:80}.....{1:>10}'.format(target.address.reference(), rv)) + else: + # There is not much useful we can display in summary for a multi-target partition with + # failures without parsing those failures to link them to individual targets; ie: targets + # 2 and 8 failed in this partition of 10 targets. + # TODO(John Sirois): Punting here works for our 2 common partitionings: + # 1. All targets in singleton partitions + # 2. All targets in 1 partition + # PytestRun supports multiple partitions with multiple targets each when there sre + # multiple python source roots, and so some sort of summary for the multi-target + # partitions is needed: https://github.com/pantsbuild/pants/issues/5415 + pass + + msgs = [str(_rv) for _rv in results.values() if not _rv.success] + failed_targets = [target + for _rv in results.values() if not _rv.success + for target in _rv.failed_targets] + if len(failed_targets) > 0: + raise ErrorWhileTesting('\n'.join(msgs), failed_targets=failed_targets) + elif failure: + # A low-level test execution failure occurred before tests were run. + raise TaskError() + + # Some notes on invalidation vs caching as used in `run_partition` below. Here invalidation + # refers to executing task work in `Task.invalidated` blocks against invalid targets. Caching + # refers to storing the results of that work in the artifact cache using + # `VersionedTargetSet.results_dir`. One further bit of terminology is partition, which is the + # name for the set of targets passed to the `Task.invalidated` block: + # + # + Caching results for len(partition) > 1: This is trivial iff we always run all targets in + # the partition, but running just invalid targets in the partition is a nicer experience (you + # can whittle away at failures in a loop of `::`-style runs). Running just invalid though + # requires being able to merge prior results for the partition; ie: knowing the details of + # junit xml, coverage data, or using tools that do, to merge data files. The alternative is + # to always run all targets in a partition if even 1 target is invalid. In this way data files + # corresponding to the full partition are always generated, and so on a green partition, the + # cached data files will always represent the full green run. + # + # The compromise taken here is to only cache when `all_vts == invalid_vts`; ie when the partition + # goes green and the run was against the full partition. A common scenario would then be: + # + # 1. Mary makes changes / adds new code and iterates `./pants test tests/python/stuff::` + # gradually getting greener until finally all test targets in the `tests/python/stuff::` set + # pass. She commits the green change, but there is no cached result for it since green state + # for the partition was approached incrementally. + # 2. Jake pulls in Mary's green change and runs `./pants test tests/python/stuff::`. There is a + # cache miss and he does a full local run, but since `tests/python/stuff::` is green, + # `all_vts == invalid_vts` and the result is now cached for others. + # + # In this scenario, Jake will likely be a CI process, in which case human others will see a + # cached result from Mary's commit. It's important to note, that the CI process must run the same + # partition as the end user for that end user to benefit and hit the cache. This is unlikely since + # the only natural partitions under CI are single target ones (`--no-fast` or all targets + # `--fast ::`. Its unlikely an end user in a large repo will want to run `--fast ::` since `::` + # is probably a much wider swath of code than they're working on. As such, although `--fast` + # caching is supported, its unlikely to be effective. Caching is best utilized when CI and users + # run `--no-fast`. + def _run_partition(self, fail_fast, test_targets, *args): + with self.invalidated(targets=test_targets, + fingerprint_strategy=self.fingerprint_strategy(), + # Re-run tests when the code they test (and depend on) changes. + invalidate_dependents=True) as invalidation_check: + + invalid_test_tgts = [invalid_test_tgt + for vts in invalidation_check.invalid_vts + for invalid_test_tgt in vts.targets] + + # Processing proceeds through: + # 1.) output -> output_dir + # 2.) [iff all == invalid] output_dir -> cache: We do this manually for now. + # 3.) [iff invalid == 0 and all > 0] cache -> workdir: Done transparently by `invalidated`. + + # 1.) Write all results that will be potentially cached to output_dir. + result = self.run_tests(fail_fast, invalid_test_tgts, *args).checked() + + cache_vts = self._vts_for_partition(invalidation_check) + if invalidation_check.all_vts == invalidation_check.invalid_vts: + # 2.) All tests in the partition were invalid, cache successful test results. + if result.success and self.artifact_cache_writes_enabled(): + self.update_artifact_cache([(cache_vts, self.collect_files(*args))]) + elif not invalidation_check.invalid_vts: + # 3.) The full partition was valid, our results will have been staged for/by caching + # if not already local. + pass + else: + # The partition was partially invalid. + + # We don't cache results; so others will need to re-run this partition. + # NB: We will presumably commit this change now though and so others will get this + # partition in a state that executes successfully; so when the 1st of the others + # executes against this partition; they will hit `all_vts == invalid_vts` and + # cache the results. That 1st of others is hopefully CI! + cache_vts.force_invalidate() + + return result + + def result_from_error(self, error): + """Convert an error into a test result. + + :param error: The error to convert into a test result. + :type error: :class:`pants.base.exceptions.TaskError` + :returns: An unsuccessful test result. + :rtype: :class:`TestResult` + """ + return TestResult.from_error(error) + + def fingerprint_strategy(self): + """Return a fingerprint strategy for target fingerprinting. + + :returns: A fingerprint strategy instance; by default, `None`; ie let the invalidation and + caching framework use the default target fingerprinter. + :rtype: :class:`pants.base.fingerprint_strategy.FingerprintStrategy` + """ + return None + + @abstractmethod + def partitions(self, per_target, all_targets, test_targets): + """Return a context manager that can be called to iterate of target partitions. + + The iterator should return a 2-tuple with the partitions targets in the first slot and a tuple + of extra arguments needed to `run_tests` and `collect_files`. + + :rtype: A context manager that is callable with no arguments; returning an iterator over + (partition, tuple(args)) + """ + + @abstractmethod + def run_tests(self, fail_fast, test_targets, *args): + """Runs tests in the given invalid test targets. + + :param bool fail_fast: `True` if the test run should fail as fast as possible. + :param test_targets: The test targets to run tests for. + :type test_targets: list of :class:`pants.build_graph.target.Target`s of the type iterated by + `partitions`. + :param *args: Extra args associated with the partition of test targets being run as returned by + the `partitions` iterator. + :returns: A test result summarizing the result of this test run. + :rtype: :class:`TestResult` + """ + + @abstractmethod + def collect_files(self, *args): + """Collects output files from a test run that should be cached. + + :param *args: Extra args associated with the partition of test targets being run as returned by + the `partitions` iterator. + :returns: A list of paths to files that should be cached. + :rtype: list of str + """ From 2b2f59d9f0a385e9369e23ae9784e1eaf854ed98 Mon Sep 17 00:00:00 2001 From: Stu Hood Date: Thu, 1 Feb 2018 18:51:24 -0800 Subject: [PATCH 08/21] Improve python/rust boundary error handling (#5414) ### Problem For calls from python to rust, we didn't have a convention in place (with the exception of calls that returned `Node` values) for returning the equivalent of a rust `Result` type to python. ### Solution Repurpose an existing Externs struct (`RunnableComplete`) as `PyResult`, and use it to remove a panicing TODO that was easy to trip on. Additionally, rename `invoke_runnable` to `call`, and add `eval`. ### Result Begins to fix #4208 (although we should still close it), and avoids a too-common panic. --- src/python/pants/engine/native.py | 52 +++++++----- src/python/pants/engine/scheduler.py | 10 ++- src/rust/engine/src/context.rs | 2 +- src/rust/engine/src/externs.rs | 82 ++++++++++++------- src/rust/engine/src/lib.rs | 20 +++-- src/rust/engine/src/nodes.rs | 18 ++-- src/rust/engine/src/rule_graph.rs | 14 +--- src/rust/engine/src/scheduler.rs | 33 ++++---- tests/python/pants_test/engine/test_engine.py | 21 ++++- .../pants_test/engine/test_scheduler.py | 4 +- 10 files changed, 146 insertions(+), 110 deletions(-) diff --git a/src/python/pants/engine/native.py b/src/python/pants/engine/native.py index eb9a5da2a4c..6955475deab 100644 --- a/src/python/pants/engine/native.py +++ b/src/python/pants/engine/native.py @@ -79,10 +79,9 @@ } BufferBuffer; typedef struct { - Value value; _Bool is_throw; - Buffer traceback; -} RunnableComplete; + Value value; +} PyResult; typedef void ExternContext; @@ -103,7 +102,8 @@ typedef ValueBuffer (*extern_ptr_project_multi)(ExternContext*, Value*, uint8_t*, uint64_t); typedef Value (*extern_ptr_project_ignoring_type)(ExternContext*, Value*, uint8_t*, uint64_t); typedef Value (*extern_ptr_create_exception)(ExternContext*, uint8_t*, uint64_t); -typedef RunnableComplete (*extern_ptr_invoke_runnable)(ExternContext*, Value*, Value*, uint64_t, _Bool); +typedef PyResult (*extern_ptr_call)(ExternContext*, Value*, Value*, uint64_t); +typedef PyResult (*extern_ptr_eval)(ExternContext*, uint8_t*, uint64_t); typedef void Tasks; typedef void Scheduler; @@ -127,6 +127,8 @@ CFFI_HEADERS = ''' void externs_set(ExternContext*, extern_ptr_log, + extern_ptr_call, + extern_ptr_eval, extern_ptr_key_for, extern_ptr_val_for, extern_ptr_clone_val, @@ -142,7 +144,6 @@ extern_ptr_project_ignoring_type, extern_ptr_project_multi, extern_ptr_create_exception, - extern_ptr_invoke_runnable, TypeId); Tasks* tasks_create(void); @@ -196,7 +197,7 @@ void graph_visualize(Scheduler*, ExecutionRequest*, char*); void graph_trace(Scheduler*, ExecutionRequest*, char*); -void execution_add_root_select(Scheduler*, ExecutionRequest*, Key, TypeConstraint); +PyResult execution_add_root_select(Scheduler*, ExecutionRequest*, Key, TypeConstraint); RawNodes* execution_execute(Scheduler*, ExecutionRequest*); Value validator_run(Scheduler*); @@ -216,6 +217,8 @@ CFFI_EXTERNS = ''' extern "Python" { void extern_log(ExternContext*, uint8_t, uint8_t*, uint64_t); + PyResult extern_call(ExternContext*, Value*, Value*, uint64_t); + PyResult extern_eval(ExternContext*, uint8_t*, uint64_t); Key extern_key_for(ExternContext*, Value*); Value extern_val_for(ExternContext*, Key*); Value extern_clone_val(ExternContext*, Value*); @@ -231,7 +234,6 @@ Value extern_project_ignoring_type(ExternContext*, Value*, uint8_t*, uint64_t); ValueBuffer extern_project_multi(ExternContext*, Value*, uint8_t*, uint64_t); Value extern_create_exception(ExternContext*, uint8_t*, uint64_t); - RunnableComplete extern_invoke_runnable(ExternContext*, Value*, Value*, uint64_t, _Bool); } ''' @@ -283,6 +285,17 @@ def _initialize_externs(ffi): def to_py_str(msg_ptr, msg_len): return bytes(ffi.buffer(msg_ptr, msg_len)).decode('utf-8') + def call(c, func, args): + try: + val = func(*args) + is_throw = False + except Exception as e: + val = e + is_throw = True + e._formatted_exc = traceback.format_exc() + + return PyResult(is_throw, c.to_value(val)) + @ffi.def_extern() def extern_log(context_handle, level, msg_ptr, msg_len): """Given a log level and utf8 message string, log it.""" @@ -416,22 +429,18 @@ def extern_create_exception(context_handle, msg_ptr, msg_len): return c.to_value(Exception(msg)) @ffi.def_extern() - def extern_invoke_runnable(context_handle, func, args_ptr, args_len, cacheable): - """Given a destructured rawRunnable, run it.""" + def extern_call(context_handle, func, args_ptr, args_len): + """Given a callable, call it.""" c = ffi.from_handle(context_handle) runnable = c.from_value(func) args = tuple(c.from_value(arg) for arg in ffi.unpack(args_ptr, args_len)) + return call(c, runnable, args) - try: - val = runnable(*args) - is_throw = False - traceback_str = '' - except Exception as e: - val = e - is_throw = True - traceback_str = traceback.format_exc() - - return RunnableComplete(c.to_value(val), is_throw, c.utf8_buf(traceback_str)) + @ffi.def_extern() + def extern_eval(context_handle, python_code_str_ptr, python_code_str_len): + """Given an evalable string, eval it and return a Value for its result.""" + c = ffi.from_handle(context_handle) + return call(c, eval, [to_py_str(python_code_str_ptr, python_code_str_len)]) class Value(datatype('Value', ['handle'])): @@ -454,7 +463,7 @@ class TypeId(datatype('TypeId', ['id_'])): """Corresponds to the native object of the same name.""" -class RunnableComplete(datatype('RunnableComplete', ['value', 'is_throw', 'traceback'])): +class PyResult(datatype('PyResult', ['is_throw', 'value'])): """Corresponds to the native object of the same name.""" @@ -632,6 +641,8 @@ def init_externs(): context = ExternContext(self.ffi) self.lib.externs_set(context.handle, self.ffi_lib.extern_log, + self.ffi_lib.extern_call, + self.ffi_lib.extern_eval, self.ffi_lib.extern_key_for, self.ffi_lib.extern_val_for, self.ffi_lib.extern_clone_val, @@ -647,7 +658,6 @@ def init_externs(): self.ffi_lib.extern_project_ignoring_type, self.ffi_lib.extern_project_multi, self.ffi_lib.extern_create_exception, - self.ffi_lib.extern_invoke_runnable, TypeId(context.to_id(str))) return context diff --git a/src/python/pants/engine/scheduler.py b/src/python/pants/engine/scheduler.py index f63e5b93b51..692fd56c62c 100644 --- a/src/python/pants/engine/scheduler.py +++ b/src/python/pants/engine/scheduler.py @@ -265,10 +265,12 @@ def exec_reset(self): self._native.lib.execution_reset(self._scheduler) def add_root_selection(self, execution_request, subject, product): - self._native.lib.execution_add_root_select(self._scheduler, - execution_request, - self._to_key(subject), - self._to_constraint(product)) + res = self._native.lib.execution_add_root_select(self._scheduler, + execution_request, + self._to_key(subject), + self._to_constraint(product)) + if res.is_throw: + raise self._from_value(res.value) def visualize_to_dir(self): return self._native.visualize_to_dir diff --git a/src/rust/engine/src/context.rs b/src/rust/engine/src/context.rs index e4b36c03d34..76db784e182 100644 --- a/src/rust/engine/src/context.rs +++ b/src/rust/engine/src/context.rs @@ -65,7 +65,7 @@ impl Core { panic!("Could not initialize Snapshot directory: {:?}", e); }); tasks.singleton_replace( - externs::invoke_unsafe( + externs::unsafe_call( &types.construct_snapshots, &vec![ externs::store_bytes(snapshots.snapshot_path().as_os_str().as_bytes()), diff --git a/src/rust/engine/src/externs.rs b/src/rust/engine/src/externs.rs index 94c4e6cefc3..974fc752949 100644 --- a/src/rust/engine/src/externs.rs +++ b/src/rust/engine/src/externs.rs @@ -19,6 +19,12 @@ pub fn log(level: LogLevel, msg: &str) { }) } +pub fn eval(python: &str) -> Result { + with_externs(|e| { + (e.eval)(e.context, python.as_ptr(), python.len() as u64) + }).into() +} + pub fn key_for(val: &Value) -> Key { with_externs(|e| (e.key_for)(e.context, val)) } @@ -162,30 +168,22 @@ pub fn create_exception(msg: &str) -> Value { }) } -pub fn invoke_runnable(func: &Value, args: &[Value], cacheable: bool) -> Result { - let result = with_externs(|e| { - (e.invoke_runnable)(e.context, func, args.as_ptr(), args.len() as u64, cacheable) - }); - if result.is_throw { - let traceback = result.traceback.to_string().unwrap_or_else(|e| { - format!( - "", - result.traceback, - e - ) - }); - Err(Failure::Throw(result.value, traceback)) - } else { - Ok(result.value) - } +pub fn call_method(value: &Value, method: &str, args: &[Value]) -> Result { + call(&project_ignoring_type(&value, method), args) +} + +pub fn call(func: &Value, args: &[Value]) -> Result { + with_externs(|e| { + (e.call)(e.context, func, args.as_ptr(), args.len() as u64) + }).into() } /// /// NB: Panics on failure. Only recommended for use with built-in functions, such as /// those configured in types::Types. /// -pub fn invoke_unsafe(func: &Function, args: &Vec) -> Value { - invoke_runnable(&val_for_id(func.0), args, false).unwrap_or_else(|e| { +pub fn unsafe_call(func: &Function, args: &Vec) -> Value { + call(&val_for_id(func.0), args).unwrap_or_else(|e| { panic!("Core function `{}` failed: {:?}", id_to_str(func.0), e); }) } @@ -224,6 +222,8 @@ pub type ExternContext = raw::c_void; pub struct Externs { context: *const ExternContext, log: LogExtern, + call: CallExtern, + eval: EvalExtern, key_for: KeyForExtern, val_for: ValForExtern, clone_val: CloneValExtern, @@ -240,7 +240,6 @@ pub struct Externs { id_to_str: IdToStrExtern, val_to_str: ValToStrExtern, create_exception: CreateExceptionExtern, - invoke_runnable: InvokeRunnable, // TODO: This type is also declared on `types::Types`. py_str_type: TypeId, } @@ -253,6 +252,8 @@ impl Externs { pub fn new( ext_context: *const ExternContext, log: LogExtern, + call: CallExtern, + eval: EvalExtern, key_for: KeyForExtern, val_for: ValForExtern, clone_val: CloneValExtern, @@ -268,12 +269,13 @@ impl Externs { project_ignoring_type: ProjectIgnoringTypeExtern, project_multi: ProjectMultiExtern, create_exception: CreateExceptionExtern, - invoke_runnable: InvokeRunnable, py_str_type: TypeId, ) -> Externs { Externs { context: ext_context, log: log, + call: call, + eval: eval, key_for: key_for, val_for: val_for, clone_val: clone_val, @@ -290,7 +292,6 @@ impl Externs { id_to_str: id_to_str, val_to_str: val_to_str, create_exception: create_exception, - invoke_runnable: invoke_runnable, py_str_type: py_str_type, } } @@ -342,10 +343,32 @@ pub enum LogLevel { #[repr(C)] #[derive(Debug)] -pub struct RunnableComplete { - value: Value, +pub struct PyResult { is_throw: bool, - traceback: Buffer, + value: Value, +} + +impl From for Result { + fn from(result: PyResult) -> Self { + if result.is_throw { + let traceback = project_str(&result.value, "_formatted_exc"); + Err(Failure::Throw(result.value, traceback)) + } else { + Ok(result.value) + } + } +} + +impl From> for PyResult { + fn from(res: Result<(), String>) -> Self { + match res { + Ok(()) => PyResult { is_throw: false, value: eval("None").unwrap() }, + Err(msg) => PyResult { + is_throw: true, + value: create_exception(&msg), + }, + } + } } // Points to an array containing a series of values allocated by Python. @@ -460,12 +483,11 @@ pub type CreateExceptionExtern = extern "C" fn(*const ExternContext, str_len: u64) -> Value; -pub type InvokeRunnable = extern "C" fn(*const ExternContext, - *const Value, - *const Value, - u64, - bool) - -> RunnableComplete; +pub type CallExtern = extern "C" fn(*const ExternContext, *const Value, *const Value, u64) + -> PyResult; + +pub type EvalExtern = extern "C" fn(*const ExternContext, python_ptr: *const u8, python_len: u64) + -> PyResult; pub fn with_vec(c_ptr: *mut C, c_len: usize, f: F) -> T where diff --git a/src/rust/engine/src/lib.rs b/src/rust/engine/src/lib.rs index f6102e48231..b489adb8259 100644 --- a/src/rust/engine/src/lib.rs +++ b/src/rust/engine/src/lib.rs @@ -36,10 +36,10 @@ use std::path::{Path, PathBuf}; use context::Core; use core::{Failure, Function, Key, TypeConstraint, TypeId, Value}; use externs::{Buffer, BufferBuffer, CloneValExtern, DropHandlesExtern, CreateExceptionExtern, - ExternContext, Externs, IdToStrExtern, InvokeRunnable, LogExtern, KeyForExtern, - ProjectExtern, ProjectMultiExtern, ProjectIgnoringTypeExtern, SatisfiedByExtern, - StoreI32Extern, SatisfiedByTypeExtern, StoreListExtern, StoreBytesExtern, - TypeIdBuffer, ValForExtern, ValToStrExtern}; + ExternContext, Externs, IdToStrExtern, CallExtern, EvalExtern, LogExtern, + KeyForExtern, ProjectExtern, ProjectMultiExtern, ProjectIgnoringTypeExtern, + PyResult, SatisfiedByExtern, StoreI32Extern, SatisfiedByTypeExtern, StoreListExtern, + StoreBytesExtern, TypeIdBuffer, ValForExtern, ValToStrExtern}; use rule_graph::{GraphMaker, RuleGraph}; use scheduler::{ExecutionRequest, RootResult, Scheduler}; use tasks::Tasks; @@ -119,6 +119,8 @@ impl RawNodes { pub extern "C" fn externs_set( ext_context: *const ExternContext, log: LogExtern, + call: CallExtern, + eval: EvalExtern, key_for: KeyForExtern, val_for: ValForExtern, clone_val: CloneValExtern, @@ -134,12 +136,13 @@ pub extern "C" fn externs_set( project_ignoring_type: ProjectIgnoringTypeExtern, project_multi: ProjectMultiExtern, create_exception: CreateExceptionExtern, - invoke_runnable: InvokeRunnable, py_str_type: TypeId, ) { externs::set_externs(Externs::new( ext_context, log, + call, + eval, key_for, val_for, clone_val, @@ -155,7 +158,6 @@ pub extern "C" fn externs_set( project_ignoring_type, project_multi, create_exception, - invoke_runnable, py_str_type, )); } @@ -255,10 +257,12 @@ pub extern "C" fn execution_add_root_select( execution_request_ptr: *mut ExecutionRequest, subject: Key, product: TypeConstraint, -) { +) -> PyResult { with_scheduler(scheduler_ptr, |scheduler| { with_execution_request(execution_request_ptr, |execution_request| { - scheduler.add_root_select(execution_request, subject, product); + scheduler + .add_root_select(execution_request, subject, product) + .into() }) }) } diff --git a/src/rust/engine/src/nodes.rs b/src/rust/engine/src/nodes.rs index 2bcd9dde97e..e3cc075630a 100644 --- a/src/rust/engine/src/nodes.rs +++ b/src/rust/engine/src/nodes.rs @@ -310,7 +310,7 @@ impl Select { // TODO: request the Node that invokes the process, rather than invoke directly let result = process_executor::local::run_command_locally(request).unwrap(); vec![ - future::ok(externs::invoke_unsafe( + future::ok(externs::unsafe_call( &context.core.types.construct_process_result, &vec![ externs::store_bytes(&result.stdout), @@ -972,7 +972,7 @@ impl Snapshot { .iter() .map(|ps| Self::store_path_stat(context, ps)) .collect(); - externs::invoke_unsafe( + externs::unsafe_call( &context.core.types.construct_snapshot, &vec![ externs::store_bytes(&item.fingerprint.0), @@ -987,12 +987,12 @@ impl Snapshot { fn store_dir(context: &Context, item: &Dir) -> Value { let args = vec![Self::store_path(item.0.as_path())]; - externs::invoke_unsafe(&context.core.types.construct_dir, &args) + externs::unsafe_call(&context.core.types.construct_dir, &args) } fn store_file(context: &Context, item: &File) -> Value { let args = vec![Self::store_path(item.path.as_path())]; - externs::invoke_unsafe(&context.core.types.construct_file, &args) + externs::unsafe_call(&context.core.types.construct_file, &args) } fn store_path_stat(context: &Context, item: &PathStat) -> Value { @@ -1004,11 +1004,11 @@ impl Snapshot { vec![Self::store_path(path), Self::store_file(context, stat)] } }; - externs::invoke_unsafe(&context.core.types.construct_path_stat, &args) + externs::unsafe_call(&context.core.types.construct_path_stat, &args) } fn store_file_content(context: &Context, item: &FileContent) -> Value { - externs::invoke_unsafe( + externs::unsafe_call( &context.core.types.construct_file_content, &vec![ Self::store_path(&item.path), @@ -1022,7 +1022,7 @@ impl Snapshot { .iter() .map(|e| Self::store_file_content(context, e)) .collect(); - externs::invoke_unsafe( + externs::unsafe_call( &context.core.types.construct_files_content, &vec![externs::store_list(entries.iter().collect(), false)], ) @@ -1117,9 +1117,7 @@ impl Node for Task { let task = self.task.clone(); deps .then(move |deps_result| match deps_result { - Ok(deps) => { - externs::invoke_runnable(&externs::val_for_id(task.func.0), &deps, task.cacheable) - } + Ok(deps) => externs::call(&externs::val_for_id(task.func.0), &deps), Err(err) => Err(err), }) .to_boxed() diff --git a/src/rust/engine/src/rule_graph.rs b/src/rust/engine/src/rule_graph.rs index 6d792d82b27..7512da8d631 100644 --- a/src/rust/engine/src/rule_graph.rs +++ b/src/rust/engine/src/rule_graph.rs @@ -648,8 +648,9 @@ pub struct RuleGraph { } fn type_constraint_str(type_constraint: TypeConstraint) -> String { - let val = to_val(type_constraint); - call_on_val(&val, "graph_str") + let str_val = externs::call_method(&to_val(type_constraint), "graph_str", &[]) + .expect("string from calling repr"); + externs::val_to_str(&str_val) } fn to_val(type_constraint: TypeConstraint) -> Value { @@ -664,15 +665,6 @@ fn to_val_from_id(id: Id) -> Value { externs::val_for_id(id) } -fn call_on_val(value: &Value, method: &str) -> String { - let rpr_val = externs::project_ignoring_type(&value, method); - - let invoke_result = - externs::invoke_runnable(&rpr_val, &[], false).expect("string from calling repr"); - externs::val_to_str(&invoke_result) -} - - fn function_str(func: &Function) -> String { let as_val = to_val_from_func(func); val_name(&as_val) diff --git a/src/rust/engine/src/scheduler.rs b/src/rust/engine/src/scheduler.rs index d7e0fa2a6fd..6477c18a047 100644 --- a/src/rust/engine/src/scheduler.rs +++ b/src/rust/engine/src/scheduler.rs @@ -63,43 +63,38 @@ impl Scheduler { request: &mut ExecutionRequest, subject: Key, product: TypeConstraint, - ) { + ) -> Result<(), String> { let edges = self.find_root_edges_or_update_rule_graph( subject.type_id().clone(), - selectors::Selector::Select(selectors::Select::without_variant(product)), - ); + selectors::Selector::Select( + selectors::Select::without_variant(product), + ), + )?; request.roots.push(Select::new( product, subject, Default::default(), &edges, )); + Ok(()) } fn find_root_edges_or_update_rule_graph( &self, subject_type: TypeId, selector: selectors::Selector, - ) -> rule_graph::RuleEdges { - // TODO what to do if there isn't a match, ie if there is a root type that hasn't been specified - // TODO up front. - // TODO Handle the case where the requested root is not in the list of roots that the graph was - // created with. - // - // Options - // 1. Toss the graph and make a subgraph specific graph, blowing up if that fails. - // I can do this with minimal changes. - // 2. Update the graph & check result, - + ) -> Result { self .core .rule_graph .find_root_edges(subject_type.clone(), selector.clone()) - .expect(&format!( - "Edges to have been found TODO handle this selector: {:?}, subject {:?}", - rule_graph::selector_str(&selector), - subject_type - )) + .ok_or_else(|| { + format!( + "No installed rules can satisfy {} for a root subject of type {}.", + rule_graph::selector_str(&selector), + rule_graph::type_str(subject_type) + ) + }) } /// diff --git a/tests/python/pants_test/engine/test_engine.py b/tests/python/pants_test/engine/test_engine.py index 63c4843a808..d03ca891b29 100644 --- a/tests/python/pants_test/engine/test_engine.py +++ b/tests/python/pants_test/engine/test_engine.py @@ -125,8 +125,8 @@ def test_include_trace_error_raises_error_with_trace(self): Computing Task(, , =A) Throw(An exception for B) Traceback (most recent call last): - File LOCATION-INFO, in extern_invoke_runnable - val = runnable(*args) + File LOCATION-INFO, in call + val = func(*args) File LOCATION-INFO, in nested_raise fn_raises(x) File LOCATION-INFO, in fn_raises @@ -156,8 +156,8 @@ def test_trace_does_not_include_cancellations(self): Computing Task(, , =C) Throw(An exception for B) Traceback (most recent call last): - File LOCATION-INFO, in extern_invoke_runnable - val = runnable(*args) + File LOCATION-INFO, in call + val = func(*args) File LOCATION-INFO, in nested_raise fn_raises(x) File LOCATION-INFO, in fn_raises @@ -165,3 +165,16 @@ def test_trace_does_not_include_cancellations(self): Exception: An exception for B ''').lstrip()+'\n', remove_locations_from_traceback(str(cm.exception))) + + def test_illegal_root_selection(self): + rules = [ + RootRule(B), + ] + + scheduler = self.scheduler(rules, include_trace_on_error=False) + + # No rules are available to compute A. + with self.assertRaises(Exception) as cm: + list(scheduler.product_request(A, subjects=[(B())])) + + self.assert_equal_with_printing('No installed rules can satisfy Select(A) for a root subject of type B.', str(cm.exception)) diff --git a/tests/python/pants_test/engine/test_scheduler.py b/tests/python/pants_test/engine/test_scheduler.py index 60c485ec836..6e7334c4cef 100644 --- a/tests/python/pants_test/engine/test_scheduler.py +++ b/tests/python/pants_test/engine/test_scheduler.py @@ -269,8 +269,8 @@ def test_trace_includes_rule_exception_traceback(self): Computing Task(, , =A) Throw(An exception for B) Traceback (most recent call last): - File LOCATION-INFO, in extern_invoke_runnable - val = runnable(*args) + File LOCATION-INFO, in call + val = func(*args) File LOCATION-INFO, in nested_raise fn_raises(x) File LOCATION-INFO, in fn_raises From 865005f55ab91ca49b92a3a08d42b246aa1a3d11 Mon Sep 17 00:00:00 2001 From: Stu Hood Date: Fri, 2 Feb 2018 09:53:33 -0800 Subject: [PATCH 09/21] Fix tasks2 deprecations to each have their own module. (#5421) ### Problem The `tasks2` deprecations from #5363 did not each have their own modules, and thus were not importable ([see](https://github.com/pantsbuild/pants/pull/5363/files#r164906101)). ### Solution Move the deprecations into their own modules/files. --- src/python/pants/backend/python/BUILD | 11 +---------- src/python/pants/backend/python/tasks2/BUILD | 9 +++++++++ src/python/pants/backend/python/tasks2/__init__.py | 0 .../pants/backend/python/tasks2/gather_sources.py | 14 ++++++++++++++ .../pants/backend/python/tasks2/pytest_prep.py | 14 ++++++++++++++ .../pants/backend/python/tasks2/pytest_run.py | 14 ++++++++++++++ .../backend/python/tasks2/python_binary_create.py | 14 ++++++++++++++ .../pants/backend/python/tasks2/python_repl.py | 14 ++++++++++++++ .../pants/backend/python/tasks2/python_run.py | 14 ++++++++++++++ .../{tasks2.py => tasks2/resolve_requirements.py} | 12 +----------- .../backend/python/tasks2/select_interpreter.py | 14 ++++++++++++++ src/python/pants/backend/python/tasks2/setup_py.py | 14 ++++++++++++++ 12 files changed, 123 insertions(+), 21 deletions(-) create mode 100644 src/python/pants/backend/python/tasks2/BUILD create mode 100644 src/python/pants/backend/python/tasks2/__init__.py create mode 100644 src/python/pants/backend/python/tasks2/gather_sources.py create mode 100644 src/python/pants/backend/python/tasks2/pytest_prep.py create mode 100644 src/python/pants/backend/python/tasks2/pytest_run.py create mode 100644 src/python/pants/backend/python/tasks2/python_binary_create.py create mode 100644 src/python/pants/backend/python/tasks2/python_repl.py create mode 100644 src/python/pants/backend/python/tasks2/python_run.py rename src/python/pants/backend/python/{tasks2.py => tasks2/resolve_requirements.py} (50%) create mode 100644 src/python/pants/backend/python/tasks2/select_interpreter.py create mode 100644 src/python/pants/backend/python/tasks2/setup_py.py diff --git a/src/python/pants/backend/python/BUILD b/src/python/pants/backend/python/BUILD index 65eb888634d..79bbbb292e5 100644 --- a/src/python/pants/backend/python/BUILD +++ b/src/python/pants/backend/python/BUILD @@ -11,7 +11,7 @@ python_library( ':python_requirements', 'src/python/pants/backend/python/targets:python', 'src/python/pants/backend/python/tasks', - 'src/python/pants/backend/python:tasks2', + 'src/python/pants/backend/python/tasks2', 'src/python/pants/build_graph', 'src/python/pants/goal:task_registrar', ] @@ -133,15 +133,6 @@ python_library( ] ) -python_library( - name = 'tasks2', - sources = ['tasks2.py'], - dependencies = [ - 'src/python/pants/backend/python/tasks', - 'src/python/pants/base:deprecated', - ] -) - python_library( name = 'thrift_builder', sources = ['thrift_builder.py'], diff --git a/src/python/pants/backend/python/tasks2/BUILD b/src/python/pants/backend/python/tasks2/BUILD new file mode 100644 index 00000000000..796622d8372 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/BUILD @@ -0,0 +1,9 @@ +# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +python_library( + dependencies=[ + 'src/python/pants/backend/python/tasks', + 'src/python/pants/base:deprecated', + ], +) diff --git a/src/python/pants/backend/python/tasks2/__init__.py b/src/python/pants/backend/python/tasks2/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/python/pants/backend/python/tasks2/gather_sources.py b/src/python/pants/backend/python/tasks2/gather_sources.py new file mode 100644 index 00000000000..37f2c3347f8 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/gather_sources.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import GatherSources +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +GatherSources = GatherSources diff --git a/src/python/pants/backend/python/tasks2/pytest_prep.py b/src/python/pants/backend/python/tasks2/pytest_prep.py new file mode 100644 index 00000000000..757878484e4 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/pytest_prep.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import PytestPrep +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +PytestPrep = PytestPrep diff --git a/src/python/pants/backend/python/tasks2/pytest_run.py b/src/python/pants/backend/python/tasks2/pytest_run.py new file mode 100644 index 00000000000..1f3867af8ce --- /dev/null +++ b/src/python/pants/backend/python/tasks2/pytest_run.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import PytestRun +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +PytestRun = PytestRun diff --git a/src/python/pants/backend/python/tasks2/python_binary_create.py b/src/python/pants/backend/python/tasks2/python_binary_create.py new file mode 100644 index 00000000000..7153a914a76 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/python_binary_create.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import PythonBinaryCreate +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +PythonBinaryCreate = PythonBinaryCreate diff --git a/src/python/pants/backend/python/tasks2/python_repl.py b/src/python/pants/backend/python/tasks2/python_repl.py new file mode 100644 index 00000000000..cdae4f98e27 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/python_repl.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import PythonRepl +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +PythonRepl = PythonRepl diff --git a/src/python/pants/backend/python/tasks2/python_run.py b/src/python/pants/backend/python/tasks2/python_run.py new file mode 100644 index 00000000000..009b9474957 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/python_run.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import PythonRun +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +PythonRun = PythonRun diff --git a/src/python/pants/backend/python/tasks2.py b/src/python/pants/backend/python/tasks2/resolve_requirements.py similarity index 50% rename from src/python/pants/backend/python/tasks2.py rename to src/python/pants/backend/python/tasks2/resolve_requirements.py index ad292e6c29b..4140c5e47b2 100644 --- a/src/python/pants/backend/python/tasks2.py +++ b/src/python/pants/backend/python/tasks2/resolve_requirements.py @@ -5,20 +5,10 @@ from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) -from pants.backend.python.tasks import (GatherSources, PytestPrep, PytestRun, PythonBinaryCreate, - PythonRepl, PythonRun, ResolveRequirements, - SelectInterpreter, SetupPy) +from pants.backend.python.tasks import ResolveRequirements from pants.base.deprecated import deprecated_module deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') -SelectInterpreter = SelectInterpreter ResolveRequirements = ResolveRequirements -GatherSources = GatherSources -PythonRun = PythonRun -PytestPrep = PytestPrep -PytestRun = PytestRun -PythonRepl = PythonRepl -SetupPy = SetupPy -PythonBinaryCreate = PythonBinaryCreate diff --git a/src/python/pants/backend/python/tasks2/select_interpreter.py b/src/python/pants/backend/python/tasks2/select_interpreter.py new file mode 100644 index 00000000000..f70a459630f --- /dev/null +++ b/src/python/pants/backend/python/tasks2/select_interpreter.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import SelectInterpreter +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +SelectInterpreter = SelectInterpreter diff --git a/src/python/pants/backend/python/tasks2/setup_py.py b/src/python/pants/backend/python/tasks2/setup_py.py new file mode 100644 index 00000000000..76b00423824 --- /dev/null +++ b/src/python/pants/backend/python/tasks2/setup_py.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants.backend.python.tasks import SetupPy +from pants.base.deprecated import deprecated_module + + +deprecated_module('1.7.0.dev0', 'Use pants.backend.python.tasks instead') + +SetupPy = SetupPy From 28af149db8c504f18227e42ab82cc89aec14a5da Mon Sep 17 00:00:00 2001 From: Stu Hood Date: Fri, 2 Feb 2018 10:35:15 -0800 Subject: [PATCH 10/21] Enable workdir-max-build-entries by default. (#5423) ### Problem There is a setting to enable some automatic cleanup of the workdir, but it is not enabled by default. Twitter has used it for a while with no issues. ### Solution Enable `--workdir-max-build-entries=8` by default. --- src/python/pants/option/global_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/pants/option/global_options.py b/src/python/pants/option/global_options.py index 89657d622fc..143e220a162 100644 --- a/src/python/pants/option/global_options.py +++ b/src/python/pants/option/global_options.py @@ -237,7 +237,7 @@ def register_options(cls, register): 'to process the non-erroneous subset of the input.') register('--cache-key-gen-version', advanced=True, default='200', recursive=True, help='The cache key generation. Bump this to invalidate every artifact for a scope.') - register('--workdir-max-build-entries', advanced=True, type=int, default=None, + register('--workdir-max-build-entries', advanced=True, type=int, default=8, help='Maximum number of previous builds to keep per task target pair in workdir. ' 'If set, minimum 2 will always be kept to support incremental compilation.') register('--max-subprocess-args', advanced=True, type=int, default=100, recursive=True, From c9feaa83ae04012d1ee090de89703a2ad6c73134 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 2 Feb 2018 20:04:40 -0800 Subject: [PATCH 11/21] Get version from version file not by running pants (#5428) This avoids needing to compile the whole rust engine and stuff, when just bundling up a pex. --- build-support/bin/release.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-support/bin/release.sh b/build-support/bin/release.sh index 11742a098e3..df01e7aaec3 100755 --- a/build-support/bin/release.sh +++ b/build-support/bin/release.sh @@ -16,7 +16,7 @@ function run_local_pants() { # NB: Pants core does not have the ability to change its own version, so we compute the # suffix here and mutate the VERSION_FILE to affect the current version. readonly HEAD_SHA=$(git rev-parse --verify HEAD) -readonly PANTS_STABLE_VERSION="$(run_local_pants --version 2>/dev/null)" +readonly PANTS_STABLE_VERSION="$(cat "${ROOT}/src/python/pants/VERSION")" readonly PANTS_UNSTABLE_VERSION="${PANTS_STABLE_VERSION}+${HEAD_SHA:0:8}" readonly DEPLOY_DIR="${ROOT}/dist/deploy" From e0aa9eb2b7f7315d738aab6c2dcb1a41b55ef831 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 2 Feb 2018 22:19:37 -0800 Subject: [PATCH 12/21] Release script allows wheel listing (#5431) --- build-support/bin/release.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build-support/bin/release.sh b/build-support/bin/release.sh index df01e7aaec3..d6732932584 100755 --- a/build-support/bin/release.sh +++ b/build-support/bin/release.sh @@ -764,7 +764,7 @@ function usage() { fi } -while getopts "hdntcloep" opt; do +while getopts "hdntcloepw" opt; do case ${opt} in h) usage ;; d) debug="true" ;; @@ -774,6 +774,7 @@ while getopts "hdntcloep" opt; do o) list_owners ; exit $? ;; e) fetch_and_check_prebuilt_wheels ; exit $? ;; p) build_pex ; exit $? ;; + w) list_prebuilt_wheels ; exit $? ;; *) usage "Invalid option: -${OPTARG}" ;; esac done From de2add4efcad3ed8060a7bc6390bf08f425abbaa Mon Sep 17 00:00:00 2001 From: Benjy Weinberger Date: Sun, 4 Feb 2018 12:28:23 +0200 Subject: [PATCH 13/21] Deprecate IDE project generation tasks. (#5432) They aren't used or updated, yet they impose a significant maintenance burden because they have tentacles into so many parts of the codebase. Also removes all their integration tests, as those are very heavy, and not worth the trouble for code that is about to be deleted. --- pants.ini | 8 - .../pants/backend/project_info/register.py | 8 - .../pants/backend/project_info/tasks/BUILD | 1 + .../backend/project_info/tasks/ide_gen.py | 6 + .../backend/project_info/tasks/BUILD | 33 - .../tasks/test_eclipse_integration.py | 68 -- .../tasks/test_ensime_integration.py | 36 - .../tasks/test_idea_integration.py | 621 ------------------ 8 files changed, 7 insertions(+), 774 deletions(-) delete mode 100644 tests/python/pants_test/backend/project_info/tasks/test_eclipse_integration.py delete mode 100644 tests/python/pants_test/backend/project_info/tasks/test_ensime_integration.py delete mode 100644 tests/python/pants_test/backend/project_info/tasks/test_idea_integration.py diff --git a/pants.ini b/pants.ini index 8b2fe4cace3..a37c14af344 100644 --- a/pants.ini +++ b/pants.ini @@ -272,14 +272,6 @@ platforms: { } -[idea] -python_source_paths: ["src/python"] -python_test_paths: ["tests/python"] -python_lib_paths: ["3rdparty/python"] -scala_maximum_heap_size_mb: 1024 -java_maximum_heap_size_mb: 1024 - - [pants-releases] branch_notes: { 'master': 'src/python/pants/notes/master.rst', diff --git a/src/python/pants/backend/project_info/register.py b/src/python/pants/backend/project_info/register.py index 4b393ef67b0..1cc13c86a52 100644 --- a/src/python/pants/backend/project_info/register.py +++ b/src/python/pants/backend/project_info/register.py @@ -7,11 +7,8 @@ from pants.backend.project_info.tasks.dependencies import Dependencies from pants.backend.project_info.tasks.depmap import Depmap -from pants.backend.project_info.tasks.eclipse_gen import EclipseGen -from pants.backend.project_info.tasks.ensime_gen import EnsimeGen from pants.backend.project_info.tasks.export import Export from pants.backend.project_info.tasks.filedeps import FileDeps -from pants.backend.project_info.tasks.idea_gen import IdeaGen from pants.backend.project_info.tasks.idea_plugin_gen import IdeaPluginGen from pants.goal.task_registrar import TaskRegistrar as task @@ -20,13 +17,8 @@ def build_file_aliases(): pass -# TODO https://github.com/pantsbuild/pants/issues/604 register_goals def register_goals(): - # IDE support. - task(name='idea', action=IdeaGen).install() task(name='idea-plugin', action=IdeaPluginGen).install() - task(name='eclipse', action=EclipseGen).install() - task(name='ensime', action=EnsimeGen).install() task(name='export', action=Export).install() task(name='depmap', action=Depmap).install() diff --git a/src/python/pants/backend/project_info/tasks/BUILD b/src/python/pants/backend/project_info/tasks/BUILD index 03aa7de8c3b..adf3e71660f 100644 --- a/src/python/pants/backend/project_info/tasks/BUILD +++ b/src/python/pants/backend/project_info/tasks/BUILD @@ -126,6 +126,7 @@ python_library( 'src/python/pants/backend/python/targets', 'src/python/pants/base:build_environment', 'src/python/pants/base:build_file', + 'src/python/pants/base:deprecated', 'src/python/pants/base:exceptions', 'src/python/pants/build_graph', 'src/python/pants/util:desktop', diff --git a/src/python/pants/backend/project_info/tasks/ide_gen.py b/src/python/pants/backend/project_info/tasks/ide_gen.py index 4a916d0b18e..8a7dcb8727f 100644 --- a/src/python/pants/backend/project_info/tasks/ide_gen.py +++ b/src/python/pants/backend/project_info/tasks/ide_gen.py @@ -27,6 +27,7 @@ from pants.backend.python.targets.python_tests import PythonTests from pants.base.build_environment import get_buildroot from pants.base.build_file import BuildFile +from pants.base.deprecated import deprecated_module from pants.base.exceptions import TaskError from pants.base.project_tree_factory import get_project_tree from pants.build_graph.address import BuildFileAddress @@ -35,6 +36,11 @@ from pants.util.dirutil import safe_mkdir, safe_walk +deprecated_module('1.7.0.dev0', + 'IDE project generation functionality will be removed entirely. Use the' + 'Pants IntelliJ plugin, or discuss alternative solutions with the Pants team.') + + logger = logging.getLogger(__name__) diff --git a/tests/python/pants_test/backend/project_info/tasks/BUILD b/tests/python/pants_test/backend/project_info/tasks/BUILD index 0eb1f3a4d49..bca24001ae7 100644 --- a/tests/python/pants_test/backend/project_info/tasks/BUILD +++ b/tests/python/pants_test/backend/project_info/tasks/BUILD @@ -37,26 +37,6 @@ python_tests( ] ) -python_tests( - name = 'eclipse_integration', - sources = ['test_eclipse_integration.py'], - dependencies = [ - 'src/python/pants/util:contextutil', - 'tests/python/pants_test:int-test', - ], - tags = {'integration'}, -) - -python_tests( - name = 'ensime_integration', - sources = ['test_ensime_integration.py'], - dependencies = [ - 'src/python/pants/util:contextutil', - 'tests/python/pants_test:int-test', - ], - tags = {'integration'}, -) - python_tests( name = 'export', sources = ['test_export.py'], @@ -126,19 +106,6 @@ python_tests( ] ) -python_tests( - name = 'idea_integration', - sources = ['test_idea_integration.py'], - dependencies = [ - ':resolve_jars_test_mixin', - '3rdparty/python/twitter/commons:twitter.common.collections', - 'src/python/pants/util:contextutil', - 'tests/python/pants_test:int-test', - ], - tags = {'integration'}, - timeout = 300, -) - python_tests( name = 'idea_plugin_integration', sources = ['test_idea_plugin_integration.py'], diff --git a/tests/python/pants_test/backend/project_info/tasks/test_eclipse_integration.py b/tests/python/pants_test/backend/project_info/tasks/test_eclipse_integration.py deleted file mode 100644 index c944f050163..00000000000 --- a/tests/python/pants_test/backend/project_info/tasks/test_eclipse_integration.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). -# Licensed under the Apache License, Version 2.0 (see LICENSE). - -from __future__ import (absolute_import, division, generators, nested_scopes, print_function, - unicode_literals, with_statement) - -import os - -from pants.util.contextutil import temporary_dir -from pants_test.pants_run_integration_test import PantsRunIntegrationTest - - -class EclipseIntegrationTest(PantsRunIntegrationTest): - - def _eclipse_test(self, specs, project_dir=os.path.join('.pants.d', 'tmp', 'test-eclipse'), - project_name='project'): - """Helper method that tests eclipse generation on the input spec list.""" - - if not os.path.exists(project_dir): - os.makedirs(project_dir) - with temporary_dir(root_dir=project_dir) as path: - pants_run = self.run_pants(['eclipse', '--project-dir={dir}'.format(dir=path)] + specs) - self.assert_success(pants_run) - - expected_files = ('.classpath', '.project',) - workdir = os.path.join(path, project_name) - self.assertTrue(os.path.exists(workdir), - 'Failed to find project_dir at {dir}.'.format(dir=workdir)) - self.assertTrue(all(os.path.exists(os.path.join(workdir, name)) - for name in expected_files)) - # return contents of .classpath so we can verify it - with open(os.path.join(workdir, '.classpath')) as classpath_f: - classpath = classpath_f.read() - # should be at least one input; if not we may have the wrong target path - self.assertIn(' entries in the project file""" - return self._get_new_module_root_manager(dom).getElementsByTagName('sourceFolder') - - def _get_excludeFolders(self, dom): - """Navigate the dom to return the list of all entries in the project file""" - return self._get_new_module_root_manager(dom).getElementsByTagName('excludeFolder') - - def _get_external_libraries(self, dom, type='CLASSES'): - """Return 'root' elements under , , or for """ - module = dom.getElementsByTagName('module')[0] - components = module.getElementsByTagName('component') - for component in components: - if component.getAttribute('name') == 'NewModuleRootManager': - for orderEntry in component.getElementsByTagName('orderEntry'): - for library in orderEntry.getElementsByTagName('library'): - if library.getAttribute('name') == 'external': - for library_type in library.getElementsByTagName(type): - return library_type.getElementsByTagName('root') - return None - - def _get_compiler_configuration(self, dom): - project = dom.getElementsByTagName('project')[0] - for component in project.getElementsByTagName('component'): - if component.getAttribute('name') == 'CompilerConfiguration': - return component - return None - - # Testing IDEA integration on lots of different targets which require different functionalities to - # make sure that everything that needs to happen for idea gen does happen. - # TODO(Garrett Malmquist): Actually validate the contents of the project files, rather than just - # checking if they exist. - def test_idea_on_alternate_project_dir(self): - alt_dir = os.path.join('.pants.d', 'tmp', 'some', 'random', 'directory', 'for', 'idea', 'stuff') - self._idea_test(['examples/src/java/org/pantsbuild/example/hello::'], project_dir=alt_dir) - - def test_idea_alternate_name(self): - alt_name = "alt-name" - self._idea_test(['examples/src/java/org/pantsbuild/example/hello::'], project_name=alt_name) - - def test_idea_on_protobuf(self): - self._idea_test(['examples/src/java/org/pantsbuild/example/protobuf::']) - - def test_idea_on_jaxb(self): # Make sure it works without ::, pulling deps as necessary. - self._idea_test(['examples/src/java/org/pantsbuild/example/jaxb/main']) - - def test_idea_on_unicode(self): - self._idea_test(['testprojects/src/java/org/pantsbuild/testproject/unicode::']) - - def test_idea_on_hello(self): - def do_check(path): - """Check to see that the project contains the expected source folders.""" - found_source_content = False - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - expected_paths = ["file://" + os.path.join(get_buildroot(), _path) for _path in [ - 'examples/src/java/org/pantsbuild/example/hello', - 'examples/src/java/org/pantsbuild/example/hello/greet', - 'examples/src/java/org/pantsbuild/example/hello/main', - 'examples/src/java/org/pantsbuild/example/hello/simple', - 'examples/src/resources/org/pantsbuild/example/hello', - ]] - expected_java_resource = ["file://" + os.path.join(get_buildroot(), _path) for _path in [ - 'examples/src/resources/org/pantsbuild/example/hello', - ]] - remaining = set(expected_paths) - for sourceFolder in self._get_sourceFolders(dom): - found_source_content = True - self.assertEquals("False", sourceFolder.getAttribute('isTestSource')) - url = sourceFolder.getAttribute('url') - # Check is resource attribute is set correctly - if url in expected_java_resource: - self.assertEquals(sourceFolder.getAttribute('type'), IdeaIntegrationTest.RESOURCE, - msg="Type {c_type} does not match expected type {a_type} " - "for {url}".format(c_type=IdeaIntegrationTest.RESOURCE, url=url, - a_type=sourceFolder.getAttribute('type'))) - self.assertIn(url, remaining, - msg="Couldn't find url={url} in {expected}".format(url=url, - expected=expected_paths)) - remaining.remove(url) - self.assertTrue(found_source_content) - - self._idea_test(['examples/src/java/org/pantsbuild/example/hello::'], check_func=do_check) - - def test_idea_on_annotations(self): - self._idea_test(['examples/src/java/org/pantsbuild/example/annotation::']) - - def test_idea_on_all_examples(self): - self._idea_test(['examples/src/java/org/pantsbuild/example::']) - - def _check_javadoc_and_sources(self, path, library_name, with_sources=True, with_javadoc=True): - """ - :param path: path to the idea project directory - :param library_name: name of the library to check for (e.g. guava) - """ - def _get_module_library_orderEntry(dom): - module = dom.getElementsByTagName('module')[0] - components = module.getElementsByTagName('component') - for component in components: - if component.getAttribute('name') == 'NewModuleRootManager': - for orderEntry in component.getElementsByTagName('orderEntry'): - if orderEntry.getAttribute('type') == 'module-library': - for library in orderEntry.getElementsByTagName('library'): - if library.getAttribute('name') == 'external': - return library - return None - - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - libraryElement = _get_module_library_orderEntry(dom) - sources = libraryElement.getElementsByTagName('SOURCES')[0] - sources_found = False - roots = sources.getElementsByTagName('root') - for root in roots: - url = root.getAttribute('url') - if re.match(r'.*\bexternal-libsources\b.*{library_name}\b.*-sources\.jar\b.*$' - .format(library_name=library_name), url): - sources_found = True - break - if with_sources: - self.assertTrue(sources_found) - else: - self.assertFalse(sources_found) - - javadoc = libraryElement.getElementsByTagName('JAVADOC')[0] - javadoc_found = False - for root in javadoc.getElementsByTagName('root'): - url = root.getAttribute('url') - if re.match(r'.*\bexternal-libjavadoc\b.*{library_name}\b.*-javadoc\.jar\b.*$' - .format(library_name=library_name), url): - javadoc_found = True - break - if with_javadoc: - self.assertTrue(javadoc_found) - else: - self.assertFalse(javadoc_found) - - # NOTE(Garrett Malmquist): The test below assumes that the annotation example's dependency on - # guava will never be removed. If it ever is, these tests will need to be changed to check for a - # different 3rdparty jar library. - # Testing for: - # - # - # ... - # - # - # - # - # - # - # - # - def test_idea_external_javadoc_and_sources(self): - def do_check(path): - self._check_javadoc_and_sources(path, 'guava') - - def do_check_no_sources(path): - self._check_javadoc_and_sources(path, 'guava', with_sources=False) - - def do_check_no_javadoc(path): - self._check_javadoc_and_sources(path, 'guava', with_javadoc=False) - - self._idea_test(['examples/src/java/org/pantsbuild/example/annotation::'], - check_func=do_check) - self._idea_test(['examples/src/java/org/pantsbuild/example/annotation::', '--idea-no-source-jars'], - check_func=do_check_no_sources) - self._idea_test(['examples/src/java/org/pantsbuild/example/annotation::', '--idea-no-javadoc-jars'], - check_func=do_check_no_javadoc) - - def test_idea_on_java_sources(self): - self._idea_test(['testprojects/src/scala/org/pantsbuild/testproject/javasources::']) - - def test_idea_missing_sources(self): - """Test what happens if we try to fetch sources from a jar that doesn't have any.""" - self._idea_test(['testprojects/src/java/org/pantsbuild/testproject/missing_sources']) - - def test_idea_on_thriftdeptest(self): - self._idea_test(['testprojects/src/java/org/pantsbuild/testproject/thriftdeptest::']) - - def test_idea_on_scaladepsonboth(self): - self._idea_test(['testprojects/src/scala/org/pantsbuild/testproject/scaladepsonboth::']) - - def test_idea_on_maven_layout(self): - def do_check(path): - """ - The contents of the .iml file should have sourceFolder entries that all look like: - - - - - ... - """ - found_source_content = False - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - for sourceFolder in self._get_sourceFolders(dom): - found_source_content = True - url = sourceFolder.getAttribute('url') - is_test_source = sourceFolder.getAttribute('isTestSource') - if url.endswith("src/main/java") or url.endswith("src/main/resources"): - self.assertEquals("False", is_test_source, - msg="wrong test flag: url={url} isTestSource={is_test_source}" - .format(url=url, is_test_source=is_test_source)) - elif url.endswith("src/test/java") or url.endswith("src/test/resources"): - self.assertEquals("True", is_test_source, - msg="wrong test flag: url={url} isTestSource={is_test_source}" - .format(url=url, is_test_source=is_test_source)) - else: - self.fail("Unexpected sourceContent tag: url={url} isTestSource={is_test_source}" - .format(url=url, is_test_source=is_test_source)) - self.assertTrue(found_source_content) - - self._idea_test(['testprojects/maven_layout/resource_collision::', '--idea-use-source-root'], - check_func=do_check) - - def _test_idea_language_level(self, targets, expected): - def do_check(path): - iml_file = os.path.join(path, 'project.iml') - dom = minidom.parse(iml_file) - module = dom.getElementsByTagName('module')[0] - for component in module.getElementsByTagName('component'): - if component.getAttribute('name') == 'NewModuleRootManager': - received = component.getAttribute('LANGUAGE_LEVEL') - self.assertEquals(expected, received, - 'Language level was {0} instead of {1}.'.format(received, expected)) - break - self._idea_test(targets, check_func=do_check) - - def test_idea_language_level_homogenous(self): - self._test_idea_language_level( - targets=['testprojects/src/java/org/pantsbuild/testproject/targetlevels/java7'], - expected='JDK_1_7' - ) - - def test_idea_language_level_heterogenous(self): - self._test_idea_language_level( - targets=['testprojects/src/java/org/pantsbuild/testproject/targetlevels/java7', - 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java8'], - expected='JDK_1_8' - ) - - def test_idea_exclude_maven_targets(self): - def do_check(path): - """Expect to see at least these two excludeFolder entries: - - - - - And this source entry: - - """ - found_source_content = False - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - for sourceFolder in self._get_sourceFolders(dom): - found_source_content = True - url = sourceFolder.getAttribute('url') - self.assertTrue(url.endswith("testprojects/maven_layout/maven_and_pants/src/main/java"), - msg="Unexpected url={url}".format(url=url)) - self.assertEquals("False", sourceFolder.getAttribute('isTestSource')) - self.assertTrue(found_source_content) - - expected = ["testprojects/maven_layout/protolib-test/target", - "testprojects/maven_layout/maven_and_pants/target"] - found_exclude_folders = [excludeFolder.getAttribute('url') - for excludeFolder in self._get_excludeFolders(dom)] - for suffix in expected: - found = False - for url in found_exclude_folders: - if url.endswith(suffix): - found = True - break - self.assertTrue(found, msg="suffix {suffix} not found in {foundExcludeFolders}" - .format(suffix=suffix, foundExcludeFolders=found_exclude_folders)) - # Test together with --idea-use-source-root because that makes sense in a Maven environment - self._idea_test(['testprojects/maven_layout/maven_and_pants::', '--idea-exclude-maven-target', - '--idea-use-source-root'], - check_func=do_check) - - def test_idea_excludeFolders(self): - def assertExpectedInExcludeFolders(path, expected): - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - found_exclude_folders = [excludeFolder.getAttribute('url') - for excludeFolder in self._get_excludeFolders(dom)] - for suffix in expected: - found = False - for url in found_exclude_folders: - if url.endswith(suffix): - found = True - break - self.assertTrue(found, msg="suffix {suffix} not found in {foundExcludeFolders}" - .format(suffix=suffix, foundExcludeFolders=found_exclude_folders)) - - def do_check_default(path): - assertExpectedInExcludeFolders(path, ["/compile", "/ivy", "/python", "/resources"]) - - def do_check_override(path): - assertExpectedInExcludeFolders(path, ["exclude-folder-sentinel"]) - - self._idea_test(['examples/src/java/org/pantsbuild/example/hello::'], check_func=do_check_default) - self._idea_test(['examples/src/java/org/pantsbuild/example/hello::'], check_func=do_check_override, - config={ - 'idea': {'exclude_folders': ['exclude-folder-sentinel']} - }) - - def test_all_targets(self): - self._idea_test([ - 'src::', 'tests::', 'examples::', 'testprojects::', - '--gen-protoc-import-from-root', - # IdeaGen resolves source jars by default, which causes a collision with these targets because - # they explicitly include jersey.jersey's source jar. - '--exclude-target-regexp=testprojects/3rdparty/managed:jersey.jersey.sources', - '--exclude-target-regexp=testprojects/3rdparty/managed:example-dependee', - # This target fails by design. - '--exclude-target-regexp=testprojects/src/antlr/python/test:antlr_failure', - '--jvm-platform-default-platform=java7' - ]) - - def test_ivy_classifiers(self): - def do_check(path): - """Check to see that the project contains the expected jar files.""" - def check_jars(jars, expected_jars): - # Make sure the .jar files are present on disk - for jar in expected_jars: - self.assertTrue(os.path.exists(jar), '{} does not exist.'.format(jar)) - to_find = set('jar://{}!/'.format(jar) for jar in expected_jars) - for external_library in jars: - to_find.discard(external_library.getAttribute('url')) - self.assertEqual(set(), to_find) - - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - base = os.path.join(get_buildroot(), path) - check_jars(self._get_external_libraries(dom, type='CLASSES'), - [os.path.join(base, 'external-libs', jar_path) - for jar_path in ['avro-1.7.7.jar', 'avro-1.7.7-tests.jar']]) - check_jars(self._get_external_libraries(dom, type='SOURCES'), - [os.path.join(base, 'external-libsources', 'avro-1.7.7-sources.jar')]) - check_jars(self._get_external_libraries(dom, type='JAVADOC'), - [os.path.join(base, 'external-libjavadoc', 'avro-1.7.7-javadoc.jar')]) - - self._idea_test(['testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier::'], - check_func=do_check) - - def test_resource_only_content_type(self): - """Folders containing just resources() targets should be marked as Resources in IntelliJ""" - - def do_check(path): - """The contents of the .iml file should certain sourceFolder entries: - - - - - ... - """ - found = set() - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - for sourceFolder in self._get_sourceFolders(dom): - url = sourceFolder.getAttribute('url') - is_test_source = sourceFolder.getAttribute('isTestSource') - type_attr = sourceFolder.getAttribute('type') - url = re.sub(r'^.*/testprojects/', 'testprojects/', url) - found.add(url) - if url == 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/code': - self.assertEquals('', type_attr) - self.assertEquals('False', is_test_source) - if url == 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/code': - self.assertEquals('', type_attr) - self.assertEquals('True', is_test_source) - if url == 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only': - self.assertEquals('java-resource', type_attr) - self.assertEquals('False', is_test_source) - # TODO(Eric Ayers) tests/resources/.../idearesourcesonly : this directory has no - # junit_tests depending on a target, so it is assumed to be plain resources. - # Since this is under .../tests, humans know this is supposed to be a test only - # resource. Right now we don't have a good way of communicating - # that to the idea goal other than inferring from the presence of junit_tests in - # source_root, which may not be a reliable indicator. - if url == 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/resources_only': - self.assertEquals('java-resource', type_attr) - self.assertEquals('False', is_test_source) - if url == 'testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly': - self.assertEquals('java-resource', type_attr) - self.assertEquals('False', is_test_source) - if url == 'testprojects/tests/resources/org/pantsbuild/testproject/idearesourcesonly': - self.assertEquals('java-test-resource', type_attr) - self.assertEquals('True', is_test_source) - - self.assertEquals(set([ - 'testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly', - 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/code', - 'testprojects/tests/resources/org/pantsbuild/testproject/idearesourcesonly', - 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/code', - 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only', - 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/resources_only' - ]), found) - - self._idea_test([ - 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly::', - 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly::' - ], check_func=do_check) - - def test_resource_and_code_content_type(self): - """Folders containing just resources() targets should be marked as Resources in IntelliJ. - - This test introduces more cases, like code co-mingled with resources targets, and test targets - that rely on non-test resources folders - """ - - def do_check(path): - """The contents of the .iml file should certain sourceFolder entries: - - - - - ... - """ - found = set() - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - for sourceFolder in self._get_sourceFolders(dom): - url = sourceFolder.getAttribute('url') - is_test_source = sourceFolder.getAttribute('isTestSource') - type_attr = sourceFolder.getAttribute('type') - url = re.sub(r'^.*/testprojects/', 'testprojects/', url) - found.add(url) - if url == 'testprojects/src/resources/org/pantsbuild/testproject/ideacodeandresources': - self.assertEquals('java-resource', type_attr) - self.assertEquals('False', is_test_source) - if url == 'testprojects/tests/resources/org/pantsbuild/testproject/ideacodeandresources': - self.assertEquals('java-test-resource', type_attr) - self.assertEquals('True', is_test_source) - if url == 'testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources': - self.assertEquals('', type_attr) - self.assertEquals('False', is_test_source) - if url == 'testprojects/tests/java/org/pantsbuild/testproject/ideacodeandresources': - self.assertEquals('', type_attr) - self.assertEquals('True', is_test_source) - - self.assertEquals(set([ - 'testprojects/src/resources/org/pantsbuild/testproject/ideacodeandresources', - 'testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources', - 'testprojects/tests/resources/org/pantsbuild/testproject/ideacodeandresources', - 'testprojects/tests/java/org/pantsbuild/testproject/ideacodeandresources', - ]), found) - - self._idea_test([ - 'testprojects/src/java/org/pantsbuild/testproject/ideacodeandresources::', - 'testprojects/tests/java/org/pantsbuild/testproject/ideacodeandresources::' - ], check_func=do_check) - - def test_library_and_test_code(self): - """Folders containing both junit_tests() and java_library() targets should be Test folders.""" - - def do_check(path): - """The contents of the .iml file should certain sourceFolder entries: - - - """ - found = set() - iml_file = os.path.join(path, 'project.iml') - self.assertTrue(os.path.exists(iml_file)) - dom = minidom.parse(iml_file) - for sourceFolder in self._get_sourceFolders(dom): - url = sourceFolder.getAttribute('url') - is_test_source = sourceFolder.getAttribute('isTestSource') - type_attr = sourceFolder.getAttribute('type') - url = re.sub(r'^.*/testprojects/', 'testprojects/', url) - found.add(url) - if url == 'testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib': - self.assertEquals('', type_attr) - self.assertEquals('True', is_test_source) - - self.assertEquals(set([ - 'testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib', - ]), found) - - self._idea_test([ - 'testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib::' - ], check_func=do_check) - - def test_annotation_processor(self): - """Turn on annotation processor support for AutoValue in IntellliJ""" - - def do_check(path): - iml_file = os.path.join(path, 'project.iml') - iml_dom = minidom.parse(iml_file) - found_paths = set() - for sourceFolder in self._get_sourceFolders(iml_dom): - url = sourceFolder.getAttribute('url') - source_path = re.sub(r'^.*/generated', 'generated', url) - found_paths.add(source_path) - self.assertIn("generated", found_paths) - self.assertIn("generated_tests", found_paths) - - ipr_file = os.path.join(path, 'project.ipr') - ipr_dom = minidom.parse(ipr_file) - annotation_processing = self._get_compiler_configuration(ipr_dom).getElementsByTagName( - 'annotationProcessing')[0] - profile = annotation_processing.getElementsByTagName('profile')[0] - self.assertEquals('True', profile.getAttribute('enabled')) - self.assertEquals('true', profile.getAttribute('default')) - self.assertEquals('Default', profile.getAttribute('name')) - processor_path = profile.getElementsByTagName('processorPath')[0] - self.assertEquals('true', processor_path.getAttribute('useClasspath')) - source_output_dir = profile.getElementsByTagName('sourceOutputDir')[0] - self.assertEquals('../../../generated', source_output_dir.getAttribute('name')) - source_test_output_dir = profile.getElementsByTagName('sourceTestOutputDir')[0] - self.assertEquals('../../../generated_tests', source_test_output_dir.getAttribute('name')) - found_processors = set() - for processor in profile.getElementsByTagName('processor'): - found_processors.add(processor.getAttribute('name')) - self.assertEquals({'com.google.auto.value.processor.AutoAnnotationProcessor', - 'com.google.auto.value.processor.AutoValueBuilderProcessor', - 'com.google.auto.value.processor.AutoValueProcessor'}, - found_processors) - - self._idea_test([ - 'examples/src/java/org/pantsbuild/example/autovalue::', - '--idea-annotation-processing-enabled', - '--idea-annotation-processor=com.google.auto.value.processor.AutoAnnotationProcessor', - '--idea-annotation-processor=com.google.auto.value.processor.AutoValueBuilderProcessor', - '--idea-annotation-processor=com.google.auto.value.processor.AutoValueProcessor', - ], check_func=do_check) From fa90a182161d411f71ddd3a20725b44eb65475fe Mon Sep 17 00:00:00 2001 From: Benjy Weinberger Date: Mon, 5 Feb 2018 16:24:07 +0200 Subject: [PATCH 14/21] Make the Kythe Java indexer emit JVM nodes. (#5435) A recent Kythe change optionally emits "generates" edges from class/method/field definitions to nodes representing the named JVM entities they define. This is a step towards being able to model JVM linkage between different JVM languages. --- BUILD.tools | 4 ++-- build-support/ivy/ivysettings.xml | 4 ++-- .../src/python/pants/contrib/codeanalysis/tasks/index_java.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/BUILD.tools b/BUILD.tools index 3a71bce03b9..5d5aca91dd7 100644 --- a/BUILD.tools +++ b/BUILD.tools @@ -41,12 +41,12 @@ jar_library(name = 'scrooge-linter', # for more info. jar_library( name='kythe-extractor', - jars = [jar(org='kythe', name='extractors/javac_extractor', rev='v0.0.26-snowchain3')] + jars = [jar(org='kythe', name='javac_extractor', rev='v0.0.26.5-snowchain037-82964297aef')] ) jar_library( name='kythe-indexer', - jars = [jar(org='kythe', name='indexers/java_indexer', rev='v0.0.26-snowchain3')] + jars = [jar(org='kythe', name='java_indexer', rev='v0.0.26.5-snowchain037-82964297aef')] ) # Runtime dependencies for the Avro Java generated code. diff --git a/build-support/ivy/ivysettings.xml b/build-support/ivy/ivysettings.xml index b5420cdd25e..4000ba3778a 100644 --- a/build-support/ivy/ivysettings.xml +++ b/build-support/ivy/ivysettings.xml @@ -12,7 +12,7 @@ Licensed under the Apache License, Version 2.0 (see LICENSE). - + @@ -70,7 +70,7 @@ Licensed under the Apache License, Version 2.0 (see LICENSE). - + diff --git a/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/index_java.py b/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/index_java.py index 0b52a79f690..bc660e77c7f 100644 --- a/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/index_java.py +++ b/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/index_java.py @@ -26,7 +26,7 @@ def subsystem_dependencies(cls): @classmethod def implementation_version(cls): # Bump this version to invalidate all past artifacts generated by this task. - return super(IndexJava, cls).implementation_version() + [('IndexJava', 7), ] + return super(IndexJava, cls).implementation_version() + [('IndexJava', 8), ] @classmethod def product_types(cls): @@ -72,7 +72,7 @@ def _index(self, vt, indexer_cp, jvm_options): kindex_file = self.context.products.get_data('kindex_files').get(vt.target) if not kindex_file: raise TaskError('No .kindex file found for {}'.format(vt.target.address.spec)) - args = [kindex_file, '--out', self._entries_file(vt)] + args = [kindex_file, '--emit_jvm', 'semantic', '--out', self._entries_file(vt)] result = self.runjava(classpath=indexer_cp, main=self._KYTHE_INDEXER_MAIN, jvm_options=jvm_options, args=args, workunit_name='kythe-index', From 01f1064eccacb6730879cb163ffc264f613f191c Mon Sep 17 00:00:00 2001 From: Nick Howard Date: Mon, 5 Feb 2018 09:16:16 -0800 Subject: [PATCH 15/21] [strict-deps][build-graph] add new predicate to build graph traversal; Update Target.strict_deps to use it (#5150) ### Problem This patch is to fix #4952, the case where strict_deps compiles ignore scope constraints on dependencies. The fundamental issue is that strict_deps has its own, different, build graph traversal and doesn't use the regular one. The way strict_deps works requires a different, more general predicate in build graph traversal methods. The existing one, `leveled_predicate` was too specific. ### Solution I did the work I described in #5149, and then converted Target.strict_dependencies to take advantage of it. The work was to add a new more general predicate, called `dep_predicate` that applies during expansion of dependencies. The generality there allows for constructing a predicate that can implement strict deps while going through the existing traversal code. leveled_predicate still works, but is deprecated and unused by anything in the pants repo. ### Result Scopes are handled properly by strict_deps. --- src/python/pants/build_graph/build_graph.py | 110 ++++++++++--- src/python/pants/build_graph/target.py | 147 ++++++++---------- src/python/pants/task/simple_codegen_task.py | 2 +- .../build_graph/test_build_graph.py | 55 +++++++ .../pants_test/build_graph/test_scopes.py | 2 +- .../pants_test/build_graph/test_target.py | 11 +- .../projects/test_testprojects_integration.py | 1 + 7 files changed, 222 insertions(+), 106 deletions(-) diff --git a/src/python/pants/build_graph/build_graph.py b/src/python/pants/build_graph/build_graph.py index cb1010aadc4..aceca2ebef3 100644 --- a/src/python/pants/build_graph/build_graph.py +++ b/src/python/pants/build_graph/build_graph.py @@ -12,6 +12,7 @@ from twitter.common.collections import OrderedSet +from pants.base.deprecated import deprecated_conditional from pants.build_graph.address import Address from pants.build_graph.address_lookup_error import AddressLookupError from pants.build_graph.target import Target @@ -49,8 +50,9 @@ def __init__(self, addr): super(BuildGraph.ManualSyntheticTargetError, self).__init__( 'Found a manually-defined target at synthetic address {}'.format(addr.spec)) - class DepthAgnosticWalk(object): - """This is a utility class to aid in graph traversals that don't care about the depth.""" + + class NoDepPredicateWalk(object): + """This is a utility class to aid in graph traversals that don't have predicates on dependency edges.""" def __init__(self): self._worked = set() @@ -74,12 +76,28 @@ def expand_once(self, vertex, _): self._expanded.add(vertex) return True - class DepthAwareWalk(DepthAgnosticWalk): + def dep_predicate(self, target, dep, level): + return True + + + class DepthAgnosticWalk(NoDepPredicateWalk): + """This is a utility class to aid in graph traversals that don't care about the depth.""" + + def __init__(self, dep_predicate): + super(BuildGraph.DepthAgnosticWalk, self).__init__() + self._dep_predicate = dep_predicate + + def dep_predicate(self, target, dep, level): + return self._dep_predicate(target, dep) + + + class DepthAwareWalk(NoDepPredicateWalk): """This is a utility class to aid in graph traversals that care about the depth.""" - def __init__(self): + def __init__(self, leveled_predicate): super(BuildGraph.DepthAwareWalk, self).__init__() self._expanded = defaultdict(set) + self._leveled_predicate = leveled_predicate def expand_once(self, vertex, level): """Returns True if this (vertex, level) pair has never been expanded, and False otherwise. @@ -92,6 +110,9 @@ def expand_once(self, vertex, level): self._expanded[vertex].add(level) return True + def dep_predicate(self, target, dep, level): + return self._leveled_predicate(dep, level) + @staticmethod def closure(*vargs, **kwargs): """See `Target.closure_for_targets` for arguments. @@ -329,8 +350,38 @@ def sorted_targets(self): """ return sort_targets(self.targets()) - def walk_transitive_dependency_graph(self, addresses, work, predicate=None, postorder=False, - leveled_predicate=None): + def _walk_factory(self, dep_predicate, leveled_predicate): + """Construct the right context object for managing state during a transitive walk.""" + deprecated_conditional( + lambda: leveled_predicate is not None, + '1.7.0.dev0', + 'leveled_predicate', + ''' + Deprecated property leveled_predicate used. Please migrate to using dep_predicate. + ''', + stacklevel=5 + ) + if leveled_predicate and dep_predicate: + raise ValueError('Cannot specify both leveled_predicate and dep_predicate') + # Use the DepthAgnosticWalk if we can, because DepthAwareWalk does a bit of extra work that can + # slow things down by few millis. + + walk = None + if leveled_predicate: + walk = self.DepthAwareWalk(leveled_predicate) + elif dep_predicate: + walk = self.DepthAgnosticWalk(dep_predicate) + else: + walk = self.NoDepPredicateWalk() + return walk + + def walk_transitive_dependency_graph(self, + addresses, + work, + predicate=None, + postorder=False, + leveled_predicate=None, + dep_predicate=None): """Given a work function, walks the transitive dependency closure of `addresses` using DFS. :API: public @@ -344,14 +395,16 @@ def walk_transitive_dependency_graph(self, addresses, work, predicate=None, post out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. - :param function leveled_predicate: Behaves identically to predicate, but takes the depth of the + :param function dep_predicate: Takes two parameters, the current target and the dependency of + the current target. If this parameter is not given, no dependencies will be filtered + when traversing the closure. If it is given, when the predicate fails, the edge to the dependency + will not be expanded. + :param function leveled_predicate: Deprecated. Behaves identically to predicate, but takes the depth of the target in the search tree as a second parameter, and it is checked just before a dependency is expanded. """ - # Use the DepthAgnosticWalk if we can, because DepthAwareWalk does a bit of extra work that can - # slow things down by few millis. - walker = self.DepthAwareWalk if leveled_predicate else self.DepthAgnosticWalk - walk = walker() + walk = self._walk_factory(dep_predicate, leveled_predicate) + def _walk_rec(addr, level=0): # If we've followed an edge to this address, stop recursing. if not walk.expand_once(addr, level): @@ -368,8 +421,7 @@ def _walk_rec(addr, level=0): for dep_address in self._target_dependencies_by_address[addr]: if walk.expanded_or_worked(dep_address): continue - if not leveled_predicate \ - or leveled_predicate(self._target_by_address[dep_address], level): + if walk.dep_predicate(target, self._target_by_address[dep_address], level): _walk_rec(dep_address, level + 1) if postorder and walk.do_work_once(addr): @@ -438,7 +490,11 @@ def transitive_subgraph_of_addresses(self, addresses, *vargs, **kwargs): out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. - :param function leveled_predicate: Behaves identically to predicate, but takes the depth of the + :param function dep_predicate: Takes two parameters, the current target and the dependency of + the current target. If this parameter is not given, no dependencies will be filtered + when traversing the closure. If it is given, when the predicate fails, the edge to the dependency + will not be expanded. + :param function leveled_predicate: Deprecated. Behaves identically to predicate, but takes the depth of the target in the search tree as a second parameter, and it is checked just before a dependency is expanded. """ @@ -448,7 +504,11 @@ def transitive_subgraph_of_addresses(self, addresses, *vargs, **kwargs): **kwargs) return ret - def transitive_subgraph_of_addresses_bfs(self, addresses, predicate=None, leveled_predicate=None): + def transitive_subgraph_of_addresses_bfs(self, + addresses, + predicate=None, + leveled_predicate=None, + dep_predicate=None): """Returns the transitive dependency closure of `addresses` using BFS. :API: public @@ -458,15 +518,17 @@ def transitive_subgraph_of_addresses_bfs(self, addresses, predicate=None, levele out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. - :param function leveled_predicate: Behaves identically to predicate, but takes the depth of the + :param function dep_predicate: Takes two parameters, the current target and the dependency of + the current target. If this parameter is not given, no dependencies will be filtered + when traversing the closure. If it is given, when the predicate fails, the edge to the dependency + will not be expanded. + :param function leveled_predicate: Deprecated. Behaves identically to predicate, but takes the depth of the target in the search tree as a second parameter, and it is checked just before a dependency is expanded. """ + walk = self._walk_factory(dep_predicate, leveled_predicate) + ordered_closure = OrderedSet() - # Use the DepthAgnosticWalk if we can, because DepthAwareWalk does a bit of extra work that can - # slow things down by few millis. - walker = self.DepthAwareWalk if leveled_predicate else self.DepthAgnosticWalk - walk = walker() to_walk = deque((0, addr) for addr in addresses) while len(to_walk) > 0: level, address = to_walk.popleft() @@ -479,11 +541,11 @@ def transitive_subgraph_of_addresses_bfs(self, addresses, predicate=None, levele continue if walk.do_work_once(address): ordered_closure.add(target) - for addr in self._target_dependencies_by_address[address]: - if walk.expanded_or_worked(addr): + for dep_address in self._target_dependencies_by_address[address]: + if walk.expanded_or_worked(dep_address): continue - if not leveled_predicate or leveled_predicate(self._target_by_address[addr], level): - to_walk.append((level + 1, addr)) + if walk.dep_predicate(target, self._target_by_address[dep_address], level): + to_walk.append((level + 1, dep_address)) return ordered_closure @abstractmethod diff --git a/src/python/pants/build_graph/target.py b/src/python/pants/build_graph/target.py index 5c5d5e77f95..df6e8e697c9 100644 --- a/src/python/pants/build_graph/target.py +++ b/src/python/pants/build_graph/target.py @@ -33,49 +33,6 @@ logger = logging.getLogger(__name__) -class SyntheticTargetNotFound(Exception): - pass - - -def _get_synthetic_target(target, codegen_dep): - """Find a codegen_dep's corresponding synthetic target in the dependencies of the given target. - - TODO: This lookup represents a workaround to avoid including logic about exports at codegen time. - We should likely make SimpleCodegenTask aware of exports, so that it can clone exports to - generated targets while updating the exports with relevant synthetic target specs. - see https://github.com/pantsbuild/pants/issues/4936 - """ - for dep in target.dependencies: - if dep != codegen_dep and dep.is_synthetic and dep.derived_from == codegen_dep: - return dep - raise SyntheticTargetNotFound('No synthetic target is found for thrift target: {}'.format(codegen_dep)) - - -def _resolve_strict_dependencies(target, dep_context): - for declared in target.dependencies: - if type(declared) in dep_context.alias_types: - # Is an alias. Recurse to expand. - for r in declared.strict_dependencies(dep_context): - yield r - else: - yield declared - - for export in _resolve_exports(declared, dep_context): - yield export - - -def _resolve_exports(target, dep_context): - for export in target.exports(dep_context): - if type(export) in dep_context.alias_types: - # If exported target is an alias, expand its dependencies. - for dep in export.strict_dependencies(dep_context): - yield dep - else: - yield export - for exp in _resolve_exports(export, dep_context): - yield exp - - class AbstractTarget(object): @classmethod @@ -267,13 +224,16 @@ def maybe_readable_combine_ids(cls, ids): return ids[0] if len(ids) == 1 else cls.combine_ids(ids) @classmethod - def _closure_predicate(cls, include_scopes=None, exclude_scopes=None, respect_intransitive=False): + def _closure_dep_predicate(cls, roots, include_scopes=None, exclude_scopes=None, respect_intransitive=False): if not respect_intransitive and include_scopes is None and exclude_scopes is None: return None - def predicate(target, level): - if not target.scope.in_scope(include_scopes=include_scopes, exclude_scopes=exclude_scopes): + + root_lookup = set(roots) + def predicate(target, dep_target): + if not dep_target.scope.in_scope(include_scopes=include_scopes, exclude_scopes=exclude_scopes): return False - if respect_intransitive and not target.transitive and level > 0: + # dep_target.transitive == False means that dep_target is only included if target is a root target. + if respect_intransitive and not dep_target.transitive and target not in root_lookup: return False return True return predicate @@ -302,7 +262,8 @@ def closure_for_targets(cls, target_roots, exclude_scopes=None, include_scopes=N build_graph = target_roots[0]._build_graph addresses = [target.address for target in target_roots] - leveled_predicate = cls._closure_predicate(include_scopes=include_scopes, + dep_predicate = cls._closure_dep_predicate(target_roots, + include_scopes=include_scopes, exclude_scopes=exclude_scopes, respect_intransitive=respect_intransitive) closure = OrderedSet() @@ -312,12 +273,12 @@ def closure_for_targets(cls, target_roots, exclude_scopes=None, include_scopes=N addresses=addresses, work=closure.add, postorder=postorder, - leveled_predicate=leveled_predicate, + dep_predicate=dep_predicate, ) else: closure.update(build_graph.transitive_subgraph_of_addresses_bfs( addresses=addresses, - leveled_predicate=leveled_predicate, + dep_predicate=dep_predicate, )) # Make sure all the roots made it into the closure. @@ -374,7 +335,7 @@ def __init__(self, name, address, build_graph, type_alias=None, payload=None, ta self._cached_all_transitive_fingerprint_map = {} self._cached_direct_transitive_fingerprint_map = {} self._cached_strict_dependencies_map = {} - self._cached_exports_map = {} + self._cached_exports_addresses = None self._no_cache = no_cache if kwargs: self.Arguments.check(self, kwargs) @@ -458,7 +419,7 @@ def mark_invalidation_hash_dirty(self): self._cached_all_transitive_fingerprint_map = {} self._cached_direct_transitive_fingerprint_map = {} self._cached_strict_dependencies_map = {} - self._cached_exports_map = {} + self._cached_exports_addresses = None self.mark_extra_invalidation_hash_dirty() self.payload.mark_dirty() @@ -687,31 +648,27 @@ def dependencies(self): return [self._build_graph.get_target(dep_address) for dep_address in self._build_graph.dependencies_of(self.address)] - def exports(self, dep_context): - """ - :param dep_context: A DependencyContext with configuration for the request. - - :return: targets that this target directly exports. Note that this list is not transitive, - but that exports are transitively expanded during the computation of strict_dependencies. - :rtype: list of Target - """ - exports = self._cached_exports_map.get(dep_context, None) + @property + def export_addresses(self): + exports = self._cached_exports_addresses if exports is None: + exports = [] - for export in getattr(self, 'export_specs', []): - if not isinstance(export, Target): - export_spec = export - export_addr = Address.parse(export_spec, relative_to=self.address.spec_path) - export = self._build_graph.get_target(export_addr) - if export not in self.dependencies: - # A target can only export its dependencies. - raise TargetDefinitionException( - self, - 'Invalid export: "{}" must also be a dependency.'.format(export_spec)) - if isinstance(export, dep_context.codegen_types): - export = _get_synthetic_target(self, export) - exports.append(export) - self._cached_exports_map[dep_context] = exports + for export_spec in getattr(self, 'export_specs', tuple()): + if isinstance(export_spec, Target): + exports.append(export_spec.address) + else: + exports.append(Address.parse(export_spec, relative_to=self.address.spec_path)) + exports = tuple(exports) + + dep_addresses = {d.address for d in self.dependencies} + invalid_export_specs = [a.spec for a in exports if a not in dep_addresses] + if len(invalid_export_specs) > 0: + raise TargetDefinitionException( + self, + 'Invalid exports: these exports must also be dependencies\n {}'.format('\n '.join(invalid_export_specs))) + + self._cached_exports_addresses = exports return exports def strict_dependencies(self, dep_context): @@ -726,16 +683,48 @@ def strict_dependencies(self, dep_context): """ strict_deps = self._cached_strict_dependencies_map.get(dep_context, None) if strict_deps is None: + default_predicate = self._closure_dep_predicate({self}, + **dep_context.target_closure_kwargs) + + def dep_predicate(source, dependency): + if not default_predicate(source, dependency): + return False + + # Always expand aliases. + if type(source) in dep_context.alias_types: + return True + + # Traverse other dependencies if they are exported. + if source._dep_is_exported(dependency): + return True + return False + + dep_addresses = [d.address for d in self.dependencies + if default_predicate(self, d) + ] + result = self._build_graph.transitive_subgraph_of_addresses_bfs( + addresses=dep_addresses, + dep_predicate=dep_predicate + ) + strict_deps = OrderedSet() - for declared in _resolve_strict_dependencies(self, dep_context): + for declared in result: + if type(declared) in dep_context.alias_types: + continue if isinstance(declared, dep_context.compiler_plugin_types): - strict_deps.update(declared.closure(bfs=True, **dep_context.target_closure_kwargs)) - else: - strict_deps.add(declared) + strict_deps.update(declared.closure( + bfs=True, + **dep_context.target_closure_kwargs)) + strict_deps.add(declared) + strict_deps = list(strict_deps) self._cached_strict_dependencies_map[dep_context] = strict_deps return strict_deps + def _dep_is_exported(self, dependency): + return dependency.address in self.export_addresses or \ + dependency.is_synthetic and (dependency.concrete_derived_from.address in self.export_addresses) + @property def dependents(self): """ diff --git a/src/python/pants/task/simple_codegen_task.py b/src/python/pants/task/simple_codegen_task.py index 030f331790f..039d095063d 100644 --- a/src/python/pants/task/simple_codegen_task.py +++ b/src/python/pants/task/simple_codegen_task.py @@ -340,7 +340,7 @@ def _supports_exports(self, target_type): return hasattr(target_type, 'export_specs') def _original_export_specs(self, target): - return [t.address.spec for t in target.exports(EmptyDepContext())] + return [t.spec for t in target.export_addresses] def resolve_deps(self, unresolved_deps): """ diff --git a/tests/python/pants_test/build_graph/test_build_graph.py b/tests/python/pants_test/build_graph/test_build_graph.py index 42a5da77385..cc781aae3e4 100644 --- a/tests/python/pants_test/build_graph/test_build_graph.py +++ b/tests/python/pants_test/build_graph/test_build_graph.py @@ -324,6 +324,7 @@ def test_raise_on_duplicate_dependencies(self): self.inject_address_closure('//:a') def test_leveled_predicate(self): + # Keeping the test of this until it is removed to ensure it continues to function. a = self.make_target(spec='a') b = self.make_target(spec='b', dependencies=[a]) c = self.make_target(spec='c', dependencies=[b]) @@ -369,6 +370,60 @@ def only_indirect_a(target, depth): check_funcs({a, b, c, d}, {c, d}, leveled_predicate=only_indirect_a) check_funcs({a, b, c}, {c}, leveled_predicate=only_indirect_a) + def test_dep_predicate(self): + a = self.make_target(spec='a') + b = self.make_target(spec='b', dependencies=[a]) + c = self.make_target(spec='c', dependencies=[b]) + d = self.make_target(spec='d', dependencies=[a]) + + subgraph_funcs = (self.build_graph.transitive_subgraph_of_addresses, + self.build_graph.transitive_subgraph_of_addresses_bfs) + + def check_funcs(expected, roots, dep_predicate_factory=None, **kwargs): + if dep_predicate_factory: + kwargs['dep_predicate'] = dep_predicate_factory(roots) + for func in subgraph_funcs: + seen_targets = defaultdict(lambda: 0) + def predicate_sees(target): + seen_targets[target] += 1 + return True + + result = func([t.address for t in roots], predicate=predicate_sees, **kwargs) + self.assertEquals(set(expected), set(result)) + if any(ct > 1 for ct in seen_targets.values()): + self.fail('func {} visited {} more than once.'.format( + func, + ', '.join(t.address for t, ct in seen_targets.items() if ct > 1)) + ) + + def only_roots_factory(roots): + def only_roots(_, __): + # This is a silly constraint, because it effectively turns the transitive subgraph functions + # into the identity function. + return False + return only_roots + + def only_direct_deps_factory(roots): + def only_direct_deps(source, dep): + return source in roots + return only_direct_deps + + def only_indirect_a_factory(roots): + def only_indirect_a(source, dep): + # This is a really weird constraint just to demonstrate functionality. + return dep != a or source not in roots + return only_indirect_a + + check_funcs({a, b, d}, {b, d}, dep_predicate_factory=None) + check_funcs({b, d}, {b, d}, dep_predicate_factory=only_roots_factory) + check_funcs({a, b, d}, {b, d}, dep_predicate_factory=only_direct_deps_factory) + check_funcs({d, a}, {d}, dep_predicate_factory=only_direct_deps_factory) + check_funcs({b, c}, {c}, dep_predicate_factory=only_direct_deps_factory) + check_funcs({b, c, d, a}, {c, d}, dep_predicate_factory=only_direct_deps_factory) + check_funcs({b, d}, {b, d}, dep_predicate_factory=only_indirect_a_factory) + check_funcs({a, b, c, d}, {c, d}, dep_predicate_factory=only_indirect_a_factory) + check_funcs({a, b, c}, {c}, dep_predicate_factory=only_indirect_a_factory) + def test_derivation(self): a = self.make_target('a') a_addr = a.address diff --git a/tests/python/pants_test/build_graph/test_scopes.py b/tests/python/pants_test/build_graph/test_scopes.py index 30fc6054adc..d95e6bf8675 100644 --- a/tests/python/pants_test/build_graph/test_scopes.py +++ b/tests/python/pants_test/build_graph/test_scopes.py @@ -120,7 +120,7 @@ def test_intransitive(self): self.assert_closure({b_intransitive, a}, {b_intransitive}) self.assert_closure({e, d, a, c}, {e, d}) self.assert_closure_dfs([d, c, b_intransitive, a], [d, c], ordered=True) - self.assert_closure_dfs([c, d, a, b_intransitive], [d, c], ordered=True, postorder=True) + self.assert_closure_dfs([a, b_intransitive, c, d], [d, c], ordered=True, postorder=True) self.assert_closure_bfs([d, c, b_intransitive, a], [d, c], ordered=True) self.assert_closure({a, b_intransitive, d, c}, {d}, respect_intransitive=False) diff --git a/tests/python/pants_test/build_graph/test_target.py b/tests/python/pants_test/build_graph/test_target.py index 2f2790c59d8..208131f8a33 100644 --- a/tests/python/pants_test/build_graph/test_target.py +++ b/tests/python/pants_test/build_graph/test_target.py @@ -16,6 +16,7 @@ from pants.base.payload_field import SetOfPrimitivesField from pants.build_graph.address import Address from pants.build_graph.target import Target +from pants.build_graph.target_scopes import Scopes from pants.source.wrapped_globs import Globs from pants_test.base_test import BaseTest from pants_test.subsystem.subsystem_util import init_subsystem @@ -283,11 +284,18 @@ def _generate_strict_dependencies(self): exports=[':C_alias'], ) + self.lib_f = self.make_target( + 'com/foo:F', + target_type=SourcesTarget, + sources=['com/foo/E.scala'], + scope=Scopes.RUNTIME + ) + self.lib_e = self.make_target( 'com/foo:E', target_type=SourcesTarget, sources=['com/foo/E.scala'], - dependencies=[self.lib_d], + dependencies=[self.lib_d, self.lib_f], ) def test_strict_dependencies(self): @@ -296,6 +304,7 @@ def test_strict_dependencies(self): dep_context.compiler_plugin_types = () dep_context.codegen_types = () dep_context.alias_types = (Target,) + dep_context.target_closure_kwargs = {'include_scopes': Scopes.JVM_COMPILE_SCOPES} self.assertEqual(set(self.lib_b.strict_dependencies(dep_context)), {self.lib_a, self.lib_aa}) self.assertEqual(set(self.lib_c.strict_dependencies(dep_context)), {self.lib_b, self.lib_a}) self.assertEqual(set(self.lib_c_alias.strict_dependencies(dep_context)), {self.lib_c, self.lib_b, self.lib_a}) diff --git a/tests/python/pants_test/projects/test_testprojects_integration.py b/tests/python/pants_test/projects/test_testprojects_integration.py index eaa062cb05d..8d2327ed349 100644 --- a/tests/python/pants_test/projects/test_testprojects_integration.py +++ b/tests/python/pants_test/projects/test_testprojects_integration.py @@ -49,6 +49,7 @@ def targets(self): 'testprojects/src/java/org/pantsbuild/testproject/missingdepswhitelist.*', 'testprojects/src/java/org/pantsbuild/testproject/missingdirectdepswhitelist:missingdirectdepswhitelist', 'testprojects/src/java/org/pantsbuild/testproject/missingjardepswhitelist:missingjardepswhitelist', + 'testprojects/src/java/org/pantsbuild/testproject/runtime:compile-fail', 'testprojects/src/scala/org/pantsbuild/testproject/compilation_failure', 'testprojects/src/scala/org/pantsbuild/testproject/compilation_warnings:fatal', 'testprojects/src/thrift/org/pantsbuild/thrift_exports:C-without-exports', From bd01191d03f217d84ac5dbd75439ee87fbb930bd Mon Sep 17 00:00:00 2001 From: Stu Hood Date: Mon, 5 Feb 2018 10:12:32 -0800 Subject: [PATCH 16/21] Add pantsd client profiling (#5434) ### Problem As discovered in #5433, our current profiling does not cover an important case: the client in a `pantsd` session. ### Solution Add another profiling entrypoint to cover an entire client session with `pantsd` enabled, and document the options. --- src/docs/invoking.md | 20 ++++++++++++++++++++ src/python/pants/bin/pants_exe.py | 12 ++++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/src/docs/invoking.md b/src/docs/invoking.md index af5d67dca11..0c9a5de310f 100644 --- a/src/docs/invoking.md +++ b/src/docs/invoking.md @@ -142,3 +142,23 @@ To enable the daemon, see the example in `pants.daemon.ini` in the root of the p The daemon will be in beta until the caveat mentioned above is addressed, but we hope to enable the daemon by default for the [`1.5.0` release of pants](https://github.com/pantsbuild/pants/milestone/12). + +Profiling Pants +--------------- + +There are three environment variables that profile various parts of a pants run. + +* `PANTS_PROFILE` - Covers the entire run when pantsd is disabled, or the post-fork portion + of a pantsd run. +* `PANTSC_PROFILE` - Covers the client in a pantsd run, which connects to pantsd and then + communicates on the socket until the run completes. +* `PANTSD_PROFILE` - Covers the graph warming operations pre-fork in a pantsd run. + +To enable profiling, set the relevant environment variable to a path to write a profile to, and +then run pants: + + :::bash + PANTS_PROFILE=myprofile.prof ./pants .. + +Once you have a profile file, you can use any visualizer that supports python profiles, such as +`snakeviz` or `gprof2dot`. diff --git a/src/python/pants/bin/pants_exe.py b/src/python/pants/bin/pants_exe.py index 795ff904e99..806eec7b1c1 100644 --- a/src/python/pants/bin/pants_exe.py +++ b/src/python/pants/bin/pants_exe.py @@ -5,8 +5,11 @@ from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) +import os + from pants.base.exiter import Exiter from pants.bin.pants_runner import PantsRunner +from pants.util.contextutil import maybe_profiled TEST_STR = 'T E S T' @@ -28,7 +31,8 @@ def main(): exiter = Exiter() exiter.set_except_hook() - try: - PantsRunner(exiter).run() - except KeyboardInterrupt: - exiter.exit_and_fail(b'Interrupted by user.') + with maybe_profiled(os.environ.get('PANTSC_PROFILE')): + try: + PantsRunner(exiter).run() + except KeyboardInterrupt: + exiter.exit_and_fail(b'Interrupted by user.') From afaf19876e08165f7e3af66b45a8f6a0d33aa347 Mon Sep 17 00:00:00 2001 From: Mateo Rodriguez Date: Mon, 5 Feb 2018 17:04:24 -0500 Subject: [PATCH 17/21] Prepare the 1.5.0dev2 release. (#5439) * Prepare the 1.5.0dev2 release. --- src/python/pants/VERSION | 2 +- src/python/pants/notes/master.rst | 90 +++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 1 deletion(-) diff --git a/src/python/pants/VERSION b/src/python/pants/VERSION index c831dd27769..4db3f644be6 100644 --- a/src/python/pants/VERSION +++ b/src/python/pants/VERSION @@ -1 +1 @@ -1.5.0.dev1 +1.5.0.dev2 diff --git a/src/python/pants/notes/master.rst b/src/python/pants/notes/master.rst index e0822f7c43a..27c7a71e906 100644 --- a/src/python/pants/notes/master.rst +++ b/src/python/pants/notes/master.rst @@ -4,6 +4,96 @@ Master Pre-Releases This document describes ``dev`` releases which occur weekly from master, and which do not undergo the vetting associated with ``stable`` releases. + +1.5.0.dev2 (02/05/2018) +----------------------- + +New Features +~~~~~~~~~~~~ +* Allow intransitive unpacking of jars. (#5398) + `PR #5398 `_ + +API Changes +~~~~~~~~~~~ +* [strict-deps][build-graph] add new predicate to build graph traversal; Update Target.strict_deps to use it (#5150) + `PR #5150 `_ + +* Deprecate IDE project generation tasks. (#5432) + `PR #5432 `_ + +* Enable workdir-max-build-entries by default. (#5423) + `PR #5423 `_ + +* Fix tasks2 deprecations to each have their own module. (#5421) + `PR #5421 `_ + +* Console tasks can output nothing without erroring (#5412) + `PR #5412 `_ + +* Remove a remaining old-python-pipeline task from contrib/python. (#5411) + `PR #5411 `_ + +* Make the thrift linter use the standard linter mixin. (#5394) + `PR #5394 `_ + +Bugfixes +~~~~~~~~ +* Fix `PytestRun` to handle multiple source roots. (#5400) + `PR #5400 `_ + +* Fix a bug in task logging in tests. (#5404) + `PR #5404 `_ + +* [pantsd] Repair console interactivity in pantsd runs. (#5352) + `PR #5352 `_ + +Documentation Updates +~~~~~~~~~~~~~~~~~~~~~ +* Document release reset of master. (#5397) + `PR #5397 `_ + +Refactoring, Improvements, and Tooling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +* Make the Kythe Java indexer emit JVM nodes. (#5435) + `PR #5435 `_ + +* Release script allows wheel listing (#5431) + `PR #5431 `_ + +* Get version from version file not by running pants (#5428) + `PR #5428 `_ + +* Improve python/rust boundary error handling (#5414) + `PR #5414 `_ + +* Factor up shared test partitioning code. (#5416) + `PR #5416 `_ + +* Set the log level when capturing logs in tests. (#5418) + `PR #5418 `_ + +* Simplify `JUnitRun` internals. (#5410) + `PR #5410 `_ + +* [v2-engine errors] Sort suggestions for typo'd targets, unique them when trace is disabled (#5413) + `PR #5413 `_ + +* No-op ivy resolve is ~100ms cheaper (#5389) + `PR #5389 `_ + +* Process executor does not require env flag to be set (#5409) + `PR #5409 `_ + +* [pantsd] Don't invalidate on surface name changes to config/rc files. (#5408) + `PR #5408 `_ + +* [pantsd] Break out DPR._nailgunned_stdio() into multiple methods. (#5405) + `PR #5405 `_ + +* Sort the indexable targets consistently. (#5403) + `PR #5403 `_ + + 1.5.0.dev1 (01/26/2018) ----------------------- From af91bd810f915519cc00fbe7fae5cee92e36df4b Mon Sep 17 00:00:00 2001 From: Chris Livingston Date: Wed, 7 Feb 2018 13:01:27 -0800 Subject: [PATCH 18/21] Python distribution task for user-defined setup.py + integration with ./pants {run/binary/test} (#5141) Problem There is no way to consume a python_library with c_extensions. Relates to #4740. Solution Create a new target, python_distribution, that specifies only a distribution name that maps to the directory the BUILD file lives in (a distribution's project directory). Pants will treat the directory that the python_distribution target is defined in aa a distribution project directory (containing a setup.py and sources) and run the pex command on that directory to build an inline wheel using the setup.py. The ./pants binary and ./pants run tasks will dump this distribution into the binary pex for consumption (or requirements pex for the ./pants run case). The setup.py takes care of linking the package sources and c/cpp code together as setuptools Extensions. We've run a similar example by our customer to validate that this meets their needs. Result Running ./pants binary or ./pants run on a python_binary target that depends on a python distribution will package the distribution as a wheel and include it in the created pex binary or requirements pex (for ./pants run) for consumption. This example has simple C code to demonstrate the ability to call C extensions from a package built by using this python_distribution target. --- .../python_distribution/hello/fasthello/BUILD | 23 +++++ .../hello/fasthello/hello_package/__init__.py | 0 .../hello/fasthello/hello_package/hello.py | 12 +++ .../hello/fasthello/main.py | 13 +++ .../hello/fasthello/setup.py | 19 ++++ .../hello/fasthello/super_greet.c | 14 +++ .../hello/test_fasthello/BUILD | 14 +++ .../hello/test_fasthello/test_fasthello.py | 14 +++ src/python/pants/backend/python/register.py | 5 + src/python/pants/backend/python/targets/BUILD | 1 + .../python/targets/python_distribution.py | 63 +++++++++++++ src/python/pants/backend/python/tasks/BUILD | 2 + .../tasks/build_local_python_distributions.py | 88 ++++++++++++++++++ .../backend/python/tasks/pex_build_util.py | 48 +++++++++- .../python/tasks/python_binary_create.py | 15 ++- .../tasks/python_execution_task_base.py | 4 +- .../python/tasks/resolve_requirements.py | 7 +- .../tasks/resolve_requirements_task_base.py | 26 +++++- .../pants/backend/python/tasks/setup_py.py | 5 +- .../fasthello_with_install_requires/BUILD | 42 +++++++++ .../hello_package/__init__.py | 0 .../hello_package/hello.py | 11 +++ .../fasthello_with_install_requires/main.py | 14 +++ .../fasthello_with_install_requires/setup.py | 20 ++++ .../super_greet.c | 14 +++ .../test_build_local_python_distributions.py | 52 +++++++++++ .../test_python_distribution_integration.py | 92 +++++++++++++++++++ .../projects/test_testprojects_integration.py | 1 + 28 files changed, 607 insertions(+), 12 deletions(-) create mode 100644 examples/src/python/example/python_distribution/hello/fasthello/BUILD create mode 100644 examples/src/python/example/python_distribution/hello/fasthello/hello_package/__init__.py create mode 100644 examples/src/python/example/python_distribution/hello/fasthello/hello_package/hello.py create mode 100644 examples/src/python/example/python_distribution/hello/fasthello/main.py create mode 100644 examples/src/python/example/python_distribution/hello/fasthello/setup.py create mode 100644 examples/src/python/example/python_distribution/hello/fasthello/super_greet.c create mode 100644 examples/tests/python/example/python_distribution/hello/test_fasthello/BUILD create mode 100644 examples/tests/python/example/python_distribution/hello/test_fasthello/test_fasthello.py create mode 100644 src/python/pants/backend/python/targets/python_distribution.py create mode 100644 src/python/pants/backend/python/tasks/build_local_python_distributions.py create mode 100644 testprojects/src/python/python_distribution/fasthello_with_install_requires/BUILD create mode 100644 testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/__init__.py create mode 100644 testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/hello.py create mode 100644 testprojects/src/python/python_distribution/fasthello_with_install_requires/main.py create mode 100644 testprojects/src/python/python_distribution/fasthello_with_install_requires/setup.py create mode 100644 testprojects/src/python/python_distribution/fasthello_with_install_requires/super_greet.c create mode 100644 tests/python/pants_test/backend/python/tasks/test_build_local_python_distributions.py create mode 100644 tests/python/pants_test/backend/python/tasks/test_python_distribution_integration.py diff --git a/examples/src/python/example/python_distribution/hello/fasthello/BUILD b/examples/src/python/example/python_distribution/hello/fasthello/BUILD new file mode 100644 index 00000000000..a80cba71451 --- /dev/null +++ b/examples/src/python/example/python_distribution/hello/fasthello/BUILD @@ -0,0 +1,23 @@ +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +# Like Hello world, but built with a python_dist. +# python_dist allows you to use setup.py to depend on C/C++ extensions. + +python_dist( + name='fasthello', + sources=[ + 'super_greet.c', + 'hello_package/hello.py', + 'hello_package/__init__.py', + 'setup.py' + ] +) + +python_binary( + name='main', + source='main.py', + dependencies=[ + ':fasthello', + ] +) diff --git a/examples/src/python/example/python_distribution/hello/fasthello/hello_package/__init__.py b/examples/src/python/example/python_distribution/hello/fasthello/hello_package/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/src/python/example/python_distribution/hello/fasthello/hello_package/hello.py b/examples/src/python/example/python_distribution/hello/fasthello/hello_package/hello.py new file mode 100644 index 00000000000..b58c0454e09 --- /dev/null +++ b/examples/src/python/example/python_distribution/hello/fasthello/hello_package/hello.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +import super_greet + +def hello(): + print(super_greet.super_greet()) + return super_greet.super_greet() diff --git a/examples/src/python/example/python_distribution/hello/fasthello/main.py b/examples/src/python/example/python_distribution/hello/fasthello/main.py new file mode 100644 index 00000000000..badaa78a22e --- /dev/null +++ b/examples/src/python/example/python_distribution/hello/fasthello/main.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +# hello_package is a python module within the fasthello python_distribution +from hello_package import hello + + +if __name__ == '__main__': + hello.hello() diff --git a/examples/src/python/example/python_distribution/hello/fasthello/setup.py b/examples/src/python/example/python_distribution/hello/fasthello/setup.py new file mode 100644 index 00000000000..f7d72491658 --- /dev/null +++ b/examples/src/python/example/python_distribution/hello/fasthello/setup.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from setuptools import setup, find_packages +from distutils.core import Extension + + +c_module = Extension(str('super_greet'), sources=[str('super_greet.c')]) + +setup( + name='fasthello', + version='1.0.0', + ext_modules=[c_module], + packages=find_packages(), +) diff --git a/examples/src/python/example/python_distribution/hello/fasthello/super_greet.c b/examples/src/python/example/python_distribution/hello/fasthello/super_greet.c new file mode 100644 index 00000000000..1db125e8de4 --- /dev/null +++ b/examples/src/python/example/python_distribution/hello/fasthello/super_greet.c @@ -0,0 +1,14 @@ +#include + +static PyObject * super_greet(PyObject *self, PyObject *args) { + return Py_BuildValue("s", "Super hello"); +} + +static PyMethodDef Methods[] = { + {"super_greet", super_greet, METH_VARARGS, "A super greeting"}, + {NULL, NULL, 0, NULL} +}; + +PyMODINIT_FUNC initsuper_greet(void) { + (void) Py_InitModule("super_greet", Methods); +} diff --git a/examples/tests/python/example/python_distribution/hello/test_fasthello/BUILD b/examples/tests/python/example/python_distribution/hello/test_fasthello/BUILD new file mode 100644 index 00000000000..0b7a6ba9fb9 --- /dev/null +++ b/examples/tests/python/example/python_distribution/hello/test_fasthello/BUILD @@ -0,0 +1,14 @@ +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +# Example of defining a test target that depends on a python_dist target. + +python_tests( + name='fasthello', + sources=[ + 'test_fasthello.py' + ], + dependencies=[ + 'examples/src/python/example/python_distribution/hello/fasthello:fasthello' + ] +) diff --git a/examples/tests/python/example/python_distribution/hello/test_fasthello/test_fasthello.py b/examples/tests/python/example/python_distribution/hello/test_fasthello/test_fasthello.py new file mode 100644 index 00000000000..31ec7fc4925 --- /dev/null +++ b/examples/tests/python/example/python_distribution/hello/test_fasthello/test_fasthello.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +# hello_package is a python module within the fasthello python_distribution. +from hello_package import hello + + +# Example of writing a test that depends on a python_dist target. +def test_fasthello(): + assert hello.hello() == "Super hello" diff --git a/src/python/pants/backend/python/register.py b/src/python/pants/backend/python/register.py index b174436cedd..3a84d143392 100644 --- a/src/python/pants/backend/python/register.py +++ b/src/python/pants/backend/python/register.py @@ -10,9 +10,12 @@ from pants.backend.python.python_requirement import PythonRequirement from pants.backend.python.python_requirements import PythonRequirements from pants.backend.python.targets.python_binary import PythonBinary +from pants.backend.python.targets.python_distribution import PythonDistribution from pants.backend.python.targets.python_library import PythonLibrary from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary from pants.backend.python.targets.python_tests import PythonTests +from pants.backend.python.tasks.build_local_python_distributions import \ + BuildLocalPythonDistributions from pants.backend.python.tasks.gather_sources import GatherSources from pants.backend.python.tasks.pytest_prep import PytestPrep from pants.backend.python.tasks.pytest_run import PytestRun @@ -34,6 +37,7 @@ def build_file_aliases(): PythonBinary.alias(): PythonBinary, PythonLibrary.alias(): PythonLibrary, PythonTests.alias(): PythonTests, + PythonDistribution.alias(): PythonDistribution, 'python_requirement_library': PythonRequirementLibrary, Resources.alias(): Resources, }, @@ -51,6 +55,7 @@ def build_file_aliases(): def register_goals(): task(name='interpreter', action=SelectInterpreter).install('pyprep') + task(name='build-local-dists', action=BuildLocalPythonDistributions).install('pyprep') task(name='requirements', action=ResolveRequirements).install('pyprep') task(name='sources', action=GatherSources).install('pyprep') task(name='py', action=PythonRun).install('run') diff --git a/src/python/pants/backend/python/targets/BUILD b/src/python/pants/backend/python/targets/BUILD index 6529abf0d76..58da7e7240c 100644 --- a/src/python/pants/backend/python/targets/BUILD +++ b/src/python/pants/backend/python/targets/BUILD @@ -4,6 +4,7 @@ python_library( sources = [ 'python_binary.py', + 'python_distribution.py', 'python_library.py', 'python_requirement_library.py', 'python_target.py', diff --git a/src/python/pants/backend/python/targets/python_distribution.py b/src/python/pants/backend/python/targets/python_distribution.py new file mode 100644 index 00000000000..14f52753444 --- /dev/null +++ b/src/python/pants/backend/python/targets/python_distribution.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pex.interpreter import PythonIdentity +from twitter.common.collections import maybe_list + +from pants.base.exceptions import TargetDefinitionException +from pants.base.payload import Payload +from pants.base.payload_field import PrimitiveField +from pants.build_graph.target import Target + + +class PythonDistribution(Target): + """A Python distribution target that accepts a user-defined setup.py.""" + + default_sources_globs = '*.py' + + @classmethod + def alias(cls): + return 'python_dist' + + def __init__(self, + address=None, + payload=None, + sources=None, + compatibility=None, + **kwargs): + """ + :param address: The Address that maps to this Target in the BuildGraph. + :type address: :class:`pants.build_graph.address.Address` + :param payload: The configuration encapsulated by this target. Also in charge of most + fingerprinting details. + :type payload: :class:`pants.base.payload.Payload` + :param sources: Files to "include". Paths are relative to the + BUILD file's directory. + :type sources: ``Fileset`` or list of strings. Must include setup.py. + :param compatibility: either a string or list of strings that represents + interpreter compatibility for this target, using the Requirement-style + format, e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements + agnostic to interpreter class. + """ + payload = payload or Payload() + payload.add_fields({ + 'sources': self.create_sources_field(sources, address.spec_path, key_arg='sources'), + 'compatibility': PrimitiveField(maybe_list(compatibility or ())) + }) + super(PythonDistribution, self).__init__(address=address, payload=payload, **kwargs) + + if not 'setup.py' in sources: + raise TargetDefinitionException( + self, 'A setup.py in the top-level directory relative to the target definition is required.' + ) + + # Check that the compatibility requirements are well-formed. + for req in self.payload.compatibility: + try: + PythonIdentity.parse_requirement(req) + except ValueError as e: + raise TargetDefinitionException(self, str(e)) diff --git a/src/python/pants/backend/python/tasks/BUILD b/src/python/pants/backend/python/tasks/BUILD index 4816ee904cc..ae2ced4ea59 100644 --- a/src/python/pants/backend/python/tasks/BUILD +++ b/src/python/pants/backend/python/tasks/BUILD @@ -6,6 +6,8 @@ python_library( '3rdparty/python:pex', '3rdparty/python/twitter/commons:twitter.common.collections', '3rdparty/python/twitter/commons:twitter.common.dirutil', + 'src/python/pants/backend/python:python_requirement', + 'src/python/pants/backend/python:python_requirements', 'src/python/pants/backend/python:interpreter_cache', 'src/python/pants/backend/python:python_chroot', 'src/python/pants/backend/python/subsystems', diff --git a/src/python/pants/backend/python/tasks/build_local_python_distributions.py b/src/python/pants/backend/python/tasks/build_local_python_distributions.py new file mode 100644 index 00000000000..cdbb7ecad2f --- /dev/null +++ b/src/python/pants/backend/python/tasks/build_local_python_distributions.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +import glob +import os +import shutil + +from pex.interpreter import PythonInterpreter + +from pants.backend.python.tasks.pex_build_util import is_local_python_dist +from pants.backend.python.tasks.setup_py import SetupPyRunner +from pants.base.build_environment import get_buildroot +from pants.base.exceptions import TargetDefinitionException, TaskError +from pants.base.fingerprint_strategy import DefaultFingerprintStrategy +from pants.task.task import Task +from pants.util.dirutil import safe_mkdir + + +class BuildLocalPythonDistributions(Task): + """Create python distributions (.whl) from python_dist targets.""" + + options_scope = 'python-create-distributions' + PYTHON_DISTS = 'user_defined_python_dists' + + @classmethod + def product_types(cls): + return [cls.PYTHON_DISTS] + + @classmethod + def prepare(cls, options, round_manager): + round_manager.require_data(PythonInterpreter) + + @property + def cache_target_dirs(self): + return True + + def execute(self): + dist_targets = self.context.targets(is_local_python_dist) + built_dists = set() + + if dist_targets: + with self.invalidated(dist_targets, + fingerprint_strategy=DefaultFingerprintStrategy(), + invalidate_dependents=True) as invalidation_check: + for vt in invalidation_check.all_vts: + if vt.valid: + built_dists.add(self._get_whl_from_dir(os.path.join(vt.results_dir, 'dist'))) + else: + if vt.target.dependencies: + raise TargetDefinitionException( + vt.target, 'The `dependencies` field is disallowed on `python_dist` targets. List any 3rd ' + 'party requirements in the install_requirements argument of your setup function.' + ) + built_dists.add(self._create_dist(vt.target, vt.results_dir)) + + self.context.products.register_data(self.PYTHON_DISTS, built_dists) + + def _create_dist(self, dist_tgt, dist_target_dir): + """Create a .whl file for the specified python_distribution target.""" + interpreter = self.context.products.get_data(PythonInterpreter) + + # Copy sources and setup.py over to vt results directory for packaging. + # NB: The directory structure of the destination directory needs to match 1:1 + # with the directory structure that setup.py expects. + for src_relative_to_target_base in dist_tgt.sources_relative_to_target_base(): + src_rel_to_results_dir = os.path.join(dist_target_dir, src_relative_to_target_base) + safe_mkdir(os.path.dirname(src_rel_to_results_dir)) + abs_src_path = os.path.join(get_buildroot(), + dist_tgt.address.spec_path, + src_relative_to_target_base) + shutil.copyfile(abs_src_path, src_rel_to_results_dir) + # Build a whl using SetupPyRunner and return its absolute path. + setup_runner = SetupPyRunner(dist_target_dir, 'bdist_wheel', interpreter=interpreter) + setup_runner.run() + return self._get_whl_from_dir(os.path.join(dist_target_dir, 'dist')) + + def _get_whl_from_dir(self, install_dir): + """Return the absolute path of the whl in a setup.py install directory.""" + dists = glob.glob(os.path.join(install_dir, '*.whl')) + if len(dists) == 0: + raise TaskError('No distributions were produced by python_create_distribution task.') + if len(dists) > 1: + raise TaskError('Ambiguous local python distributions found: {}'.format(dists)) + return dists[0] diff --git a/src/python/pants/backend/python/tasks/pex_build_util.py b/src/python/pants/backend/python/tasks/pex_build_util.py index bb3b0f2e514..97244b05689 100644 --- a/src/python/pants/backend/python/tasks/pex_build_util.py +++ b/src/python/pants/backend/python/tasks/pex_build_util.py @@ -12,13 +12,16 @@ from pex.resolver import resolve from twitter.common.collections import OrderedSet +from pants.backend.python.python_requirement import PythonRequirement from pants.backend.python.subsystems.python_setup import PythonSetup from pants.backend.python.targets.python_binary import PythonBinary +from pants.backend.python.targets.python_distribution import PythonDistribution from pants.backend.python.targets.python_library import PythonLibrary from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary from pants.backend.python.targets.python_tests import PythonTests from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError +from pants.build_graph.address import Address from pants.build_graph.files import Files from pants.python.python_repos import PythonRepos @@ -34,6 +37,10 @@ def has_python_sources(tgt): return is_python_target(tgt) and tgt.has_sources() +def is_local_python_dist(tgt): + return isinstance(tgt, PythonDistribution) + + def has_resources(tgt): return isinstance(tgt, Files) and tgt.has_sources() @@ -113,7 +120,6 @@ def dump_requirements(builder, interpreter, req_libs, log, platforms=None): # Resolve the requirements into distributions. distributions = _resolve_multi(interpreter, reqs_to_build, platforms, find_links) - locations = set() for platform, dists in distributions.items(): for dist in dists: @@ -158,3 +164,43 @@ def _resolve_multi(interpreter, requirements, platforms, find_links): allow_prereleases=python_setup.resolver_allow_prereleases) return distributions + + +def inject_synthetic_dist_requirements(build_graph, local_built_dists, synthetic_address, binary_tgt=None): + """Inject a synthetic requirements library from a local wheel. + + :param build_graph: The build graph needed for injecting synthetic targets. + :param local_built_dists: A list of paths to locally built wheels to package into + requirements libraries. + :param synthetic_address: A generative address for addressing synthetic targets. + :param binary_tgt: An optional parameter to be passed only when called by the `python_binary_create` + task. This is needed to ensure that only python_dist targets in a binary target's closure are included + in the binary for the case where a user specifies mulitple binary targets in a single invocation of + `./pants binary`. + :return: a :class: `PythonRequirementLibrary` containing a requirements that maps to a locally-built wheels. + """ + def should_create_req(bin_tgt, loc): + if not bin_tgt: + return True + # Ensure that a target is in a binary target's closure. See docstring for more detail. + return any([tgt.id in loc for tgt in bin_tgt.closure()]) + + def python_requirement_from_wheel(path): + base = os.path.basename(path) + whl_dir = os.path.dirname(path) + whl_metadata = base.split('-') + req_name = '=='.join([whl_metadata[0], whl_metadata[1]]) + return PythonRequirement(req_name, repository=whl_dir) + + local_whl_reqs = [ + python_requirement_from_wheel(whl_location) + for whl_location in local_built_dists + if should_create_req(binary_tgt, whl_location) + ] + + if not local_whl_reqs: + return [] + + addr = Address.parse(synthetic_address) + build_graph.inject_synthetic_target(addr, PythonRequirementLibrary, requirements=local_whl_reqs) + return [build_graph.get_target(addr)] diff --git a/src/python/pants/backend/python/tasks/python_binary_create.py b/src/python/pants/backend/python/tasks/python_binary_create.py index 65977a10edb..b9ce3d8ad7b 100644 --- a/src/python/pants/backend/python/tasks/python_binary_create.py +++ b/src/python/pants/backend/python/tasks/python_binary_create.py @@ -12,9 +12,12 @@ from pex.pex_info import PexInfo from pants.backend.python.targets.python_binary import PythonBinary +from pants.backend.python.tasks.build_local_python_distributions import \ + BuildLocalPythonDistributions from pants.backend.python.tasks.pex_build_util import (dump_requirements, dump_sources, has_python_requirements, has_python_sources, - has_resources) + has_resources, + inject_synthetic_dist_requirements) from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.build_graph.target_scopes import Scopes @@ -43,6 +46,7 @@ def cache_target_dirs(self): def prepare(cls, options, round_manager): round_manager.require_data(PythonInterpreter) round_manager.require_data('python') # For codegen. + round_manager.require_data(BuildLocalPythonDistributions.PYTHON_DISTS) @staticmethod def is_binary(target): @@ -126,6 +130,15 @@ def _create_binary(self, binary_tgt, results_dir): # Dump everything into the builder's chroot. for tgt in source_tgts: dump_sources(builder, tgt, self.context.log) + + # Handle locally-built python distribution dependencies. + built_dists = self.context.products.get_data(BuildLocalPythonDistributions.PYTHON_DISTS) + if built_dists: + req_tgts = inject_synthetic_dist_requirements(self.context.build_graph, + built_dists, + ':'.join(2 * [binary_tgt.invalidation_hash()]), + binary_tgt) + req_tgts + dump_requirements(builder, interpreter, req_tgts, self.context.log, binary_tgt.platforms) # Build the .pex file. diff --git a/src/python/pants/backend/python/tasks/python_execution_task_base.py b/src/python/pants/backend/python/tasks/python_execution_task_base.py index feff3b475cc..5874f18b895 100644 --- a/src/python/pants/backend/python/tasks/python_execution_task_base.py +++ b/src/python/pants/backend/python/tasks/python_execution_task_base.py @@ -13,6 +13,7 @@ from pex.pex_builder import PEXBuilder from pants.backend.python.python_requirement import PythonRequirement +from pants.backend.python.targets.python_distribution import PythonDistribution from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary from pants.backend.python.targets.python_target import PythonTarget from pants.backend.python.tasks.gather_sources import GatherSources @@ -102,7 +103,8 @@ def extra_requirements(self): def create_pex(self, pex_info=None): """Returns a wrapped pex that "merges" the other pexes via PEX_PATH.""" relevant_targets = self.context.targets( - lambda tgt: isinstance(tgt, (PythonRequirementLibrary, PythonTarget, Files))) + lambda tgt: isinstance(tgt, ( + PythonDistribution, PythonRequirementLibrary, PythonTarget, Files))) with self.invalidated(relevant_targets) as invalidation_check: # If there are no relevant targets, we still go through the motions of resolving diff --git a/src/python/pants/backend/python/tasks/resolve_requirements.py b/src/python/pants/backend/python/tasks/resolve_requirements.py index 07657b08905..2a4436f749f 100644 --- a/src/python/pants/backend/python/tasks/resolve_requirements.py +++ b/src/python/pants/backend/python/tasks/resolve_requirements.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) -from pants.backend.python.tasks.pex_build_util import has_python_requirements +from pants.backend.python.tasks.pex_build_util import has_python_requirements, is_local_python_dist from pants.backend.python.tasks.resolve_requirements_task_base import ResolveRequirementsTaskBase @@ -19,6 +19,7 @@ def product_types(cls): def execute(self): req_libs = self.context.targets(has_python_requirements) - if req_libs: - pex = self.resolve_requirements(req_libs) + dist_tgts = self.context.targets(is_local_python_dist) + if req_libs or dist_tgts: + pex = self.resolve_requirements(req_libs, dist_tgts) self.context.products.register_data(self.REQUIREMENTS_PEX, pex) diff --git a/src/python/pants/backend/python/tasks/resolve_requirements_task_base.py b/src/python/pants/backend/python/tasks/resolve_requirements_task_base.py index cccb7fd454d..4dc3d8d039c 100644 --- a/src/python/pants/backend/python/tasks/resolve_requirements_task_base.py +++ b/src/python/pants/backend/python/tasks/resolve_requirements_task_base.py @@ -11,7 +11,10 @@ from pex.pex import PEX from pex.pex_builder import PEXBuilder -from pants.backend.python.tasks.pex_build_util import dump_requirements +from pants.backend.python.tasks.build_local_python_distributions import \ + BuildLocalPythonDistributions +from pants.backend.python.tasks.pex_build_util import (dump_requirements, + inject_synthetic_dist_requirements) from pants.invalidation.cache_manager import VersionedTargetSet from pants.task.task import Task from pants.util.dirutil import safe_concurrent_creation @@ -28,9 +31,19 @@ class ResolveRequirementsTaskBase(Task): @classmethod def prepare(cls, options, round_manager): round_manager.require_data(PythonInterpreter) + round_manager.require_data(BuildLocalPythonDistributions.PYTHON_DISTS) - def resolve_requirements(self, req_libs): - with self.invalidated(req_libs) as invalidation_check: + def resolve_requirements(self, req_libs, local_dist_targets=None): + """Requirements resolution for PEX files. + + :param req_libs: A list of :class:`PythonRequirementLibrary` targets to resolve. + :param local_dist_targets: A list of :class:`PythonDistribution` targets to resolve. + :returns: a PEX containing target requirements and any specified python dist targets. + """ + tgts = req_libs + if local_dist_targets: + tgts = req_libs + local_dist_targets + with self.invalidated(tgts) as invalidation_check: # If there are no relevant targets, we still go through the motions of resolving # an empty set of requirements, to prevent downstream tasks from having to check # for this special case. @@ -42,11 +55,16 @@ def resolve_requirements(self, req_libs): interpreter = self.context.products.get_data(PythonInterpreter) path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity), target_set_id)) - # Note that we check for the existence of the directory, instead of for invalid_vts, # to cover the empty case. if not os.path.isdir(path): with safe_concurrent_creation(path) as safe_path: + # Handle locally-built python distribution dependencies. + built_dists = self.context.products.get_data(BuildLocalPythonDistributions.PYTHON_DISTS) + if built_dists: + req_libs = inject_synthetic_dist_requirements(self.context.build_graph, + built_dists, + ':'.join(2 * [target_set_id])) + req_libs self._build_requirements_pex(interpreter, safe_path, req_libs) return PEX(path, interpreter=interpreter) diff --git a/src/python/pants/backend/python/tasks/setup_py.py b/src/python/pants/backend/python/tasks/setup_py.py index ae0f0317f1b..3f173669c02 100644 --- a/src/python/pants/backend/python/tasks/setup_py.py +++ b/src/python/pants/backend/python/tasks/setup_py.py @@ -22,6 +22,7 @@ from pants.backend.python.targets.python_binary import PythonBinary from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary from pants.backend.python.targets.python_target import PythonTarget +from pants.backend.python.tasks.pex_build_util import is_local_python_dist from pants.base.build_environment import get_buildroot from pants.base.exceptions import TargetDefinitionException, TaskError from pants.base.specs import SiblingAddresses @@ -588,9 +589,9 @@ def create_setup_py(self, target, dist_dir): def execute(self): # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd - # sources when needed. + # sources when needed. We ignore PythonDistribution targets. def is_exported_python_target(t): - return t.is_original and self.has_provides(t) + return t.is_original and self.has_provides(t) and not is_local_python_dist(t) exported_python_targets = OrderedSet(t for t in self.context.target_roots if is_exported_python_target(t)) diff --git a/testprojects/src/python/python_distribution/fasthello_with_install_requires/BUILD b/testprojects/src/python/python_distribution/fasthello_with_install_requires/BUILD new file mode 100644 index 00000000000..22a62c69800 --- /dev/null +++ b/testprojects/src/python/python_distribution/fasthello_with_install_requires/BUILD @@ -0,0 +1,42 @@ +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + + +python_dist( + name='fasthello', + sources=[ + 'super_greet.c', + 'hello_package/hello.py', + 'hello_package/__init__.py', + 'setup.py' + ] +) + +python_binary( + name='main_with_no_conflict', + source='main.py', + dependencies=[ + ':fasthello', + ] +) + +python_binary( + name='main_with_conflicting_dep', + source='main.py', + dependencies=[ + ':fasthello', + ':pycountry' + ] +) + +python_requirement_library( + name='pycountry', + requirements=[ + python_requirement('pycountry==17.9.23'), + ] +) + +python_binary( + name='main_with_no_pycountry', + source='main.py' +) diff --git a/testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/__init__.py b/testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/hello.py b/testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/hello.py new file mode 100644 index 00000000000..fa29f9a2bbd --- /dev/null +++ b/testprojects/src/python/python_distribution/fasthello_with_install_requires/hello_package/hello.py @@ -0,0 +1,11 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +import super_greet + +def hello(): + print(super_greet.super_greet()) diff --git a/testprojects/src/python/python_distribution/fasthello_with_install_requires/main.py b/testprojects/src/python/python_distribution/fasthello_with_install_requires/main.py new file mode 100644 index 00000000000..24790bd3db0 --- /dev/null +++ b/testprojects/src/python/python_distribution/fasthello_with_install_requires/main.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +import pycountry +from hello_package import hello + + +if __name__ == '__main__': + hello.hello() + print(pycountry.countries.get(alpha_2='US').name) diff --git a/testprojects/src/python/python_distribution/fasthello_with_install_requires/setup.py b/testprojects/src/python/python_distribution/fasthello_with_install_requires/setup.py new file mode 100644 index 00000000000..98fc6590c83 --- /dev/null +++ b/testprojects/src/python/python_distribution/fasthello_with_install_requires/setup.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from setuptools import setup, find_packages +from distutils.core import Extension + + +c_module = Extension(str('super_greet'), sources=[str('super_greet.c')]) + +setup( + name='fasthello_test', + version='1.0.0', + ext_modules=[c_module], + packages=find_packages(), + install_requires=['pycountry==17.1.2'] +) diff --git a/testprojects/src/python/python_distribution/fasthello_with_install_requires/super_greet.c b/testprojects/src/python/python_distribution/fasthello_with_install_requires/super_greet.c new file mode 100644 index 00000000000..1db125e8de4 --- /dev/null +++ b/testprojects/src/python/python_distribution/fasthello_with_install_requires/super_greet.c @@ -0,0 +1,14 @@ +#include + +static PyObject * super_greet(PyObject *self, PyObject *args) { + return Py_BuildValue("s", "Super hello"); +} + +static PyMethodDef Methods[] = { + {"super_greet", super_greet, METH_VARARGS, "A super greeting"}, + {NULL, NULL, 0, NULL} +}; + +PyMODINIT_FUNC initsuper_greet(void) { + (void) Py_InitModule("super_greet", Methods); +} diff --git a/tests/python/pants_test/backend/python/tasks/test_build_local_python_distributions.py b/tests/python/pants_test/backend/python/tasks/test_build_local_python_distributions.py new file mode 100644 index 00000000000..bc509eab684 --- /dev/null +++ b/tests/python/pants_test/backend/python/tasks/test_build_local_python_distributions.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from textwrap import dedent + +from pants.backend.python.targets.python_distribution import PythonDistribution +from pants.backend.python.tasks.build_local_python_distributions import \ + BuildLocalPythonDistributions +from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase + + +class TestBuildLocalPythonDistributions(PythonTaskTestBase): + @classmethod + def task_type(cls): + return BuildLocalPythonDistributions + + def setUp(self): + super(TestBuildLocalPythonDistributions, self).setUp() + + # Setup simple python_dist target + sources = ['foo.py', 'bar.py', '__init__.py', 'setup.py'] + self.filemap = { + 'src/python/dist/__init__.py': '', + 'src/python/dist/foo.py': 'print("foo")', + 'src/python/dist/bar.py': 'print("bar")', + 'src/python/dist/setup.py': dedent(""" + from setuptools import setup, find_packages + setup( + name='my_dist', + version='0.0.0', + packages=find_packages() + ) + """) + } + for rel_path, content in self.filemap.items(): + self.create_file(rel_path, content) + + self.python_dist_tgt = self.make_target(spec='src/python/dist:my_dist', + target_type=PythonDistribution, + sources=sources) + + def test_python_create_distributions(self): + context = self.context(target_roots=[self.python_dist_tgt], for_task_types=[BuildLocalPythonDistributions]) + python_create_distributions_task = self.create_task(context) + python_create_distributions_task.execute() + built_dists = context.products.get_data(BuildLocalPythonDistributions.PYTHON_DISTS) + self.assertGreater(len(built_dists), 0) + self.assertTrue(any(['my_dist-0.0.0-py2-none-any.whl' in dist for dist in list(built_dists)])) diff --git a/tests/python/pants_test/backend/python/tasks/test_python_distribution_integration.py b/tests/python/pants_test/backend/python/tasks/test_python_distribution_integration.py new file mode 100644 index 00000000000..e6040445bf5 --- /dev/null +++ b/tests/python/pants_test/backend/python/tasks/test_python_distribution_integration.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +import os + +from pants.base.build_environment import get_buildroot +from pants.util.process_handler import subprocess +from pants_test.pants_run_integration_test import PantsRunIntegrationTest + + +class PythonDistributionIntegrationTest(PantsRunIntegrationTest): + # The paths to both a project containing a simple C extension (to be packaged into a + # whl by setup.py) and an associated test to be consumed by the pants goals tested below. + fasthello_project = 'examples/src/python/example/python_distribution/hello/fasthello' + fasthello_tests = 'examples/tests/python/example/python_distribution/hello/test_fasthello' + fasthello_install_requires = 'testprojects/src/python/python_distribution/fasthello_with_install_requires' + + def test_pants_binary(self): + command=['binary', '{}:main'.format(self.fasthello_project)] + pants_run = self.run_pants(command=command) + self.assert_success(pants_run) + # Check that the pex was built. + pex = os.path.join(get_buildroot(), 'dist', 'main.pex') + self.assertTrue(os.path.isfile(pex)) + # Check that the pex runs. + output = subprocess.check_output(pex) + self.assertIn('Super hello', output) + # Cleanup + os.remove(pex) + + def test_pants_run(self): + command=['run', '{}:main'.format(self.fasthello_project)] + pants_run = self.run_pants(command=command) + self.assert_success(pants_run) + # Check that text was properly printed to stdout. + self.assertIn('Super hello', pants_run.stdout_data) + + def test_pants_test(self): + command=['test', '{}:fasthello'.format(self.fasthello_tests)] + pants_run = self.run_pants(command=command) + self.assert_success(pants_run) + + def test_with_install_requires(self): + command=['run', '{}:main_with_no_conflict'.format(self.fasthello_install_requires)] + pants_run = self.run_pants(command=command) + self.assert_success(pants_run) + self.assertIn('United States', pants_run.stdout_data) + command=['binary', '{}:main_with_no_conflict'.format(self.fasthello_install_requires)] + pants_run = self.run_pants(command=command) + self.assert_success(pants_run) + pex = os.path.join(get_buildroot(), 'dist', 'main_with_no_conflict.pex') + output = subprocess.check_output(pex) + self.assertIn('United States', output) + os.remove(pex) + + def test_with_conflicting_transitive_deps(self): + command=['run', '{}:main_with_conflicting_dep'.format(self.fasthello_install_requires)] + pants_run = self.run_pants(command=command) + self.assert_failure(pants_run) + self.assertIn('pycountry', pants_run.stderr_data) + self.assertIn('fasthello', pants_run.stderr_data) + command=['binary', '{}:main_with_conflicting_dep'.format(self.fasthello_install_requires)] + pants_run = self.run_pants(command=command) + self.assert_failure(pants_run) + self.assertIn('pycountry', pants_run.stderr_data) + self.assertIn('fasthello', pants_run.stderr_data) + + def test_pants_binary_dep_isolation_with_multiple_targets(self): + command=['binary', '{}:main_with_no_conflict'.format(self.fasthello_install_requires), + '{}:main_with_no_pycountry'.format(self.fasthello_install_requires)] + pants_run = self.run_pants(command=command) + self.assert_success(pants_run) + # Check that the pex was built. + pex1 = os.path.join(get_buildroot(), 'dist', 'main_with_no_conflict.pex') + self.assertTrue(os.path.isfile(pex1)) + pex2 = os.path.join(get_buildroot(), 'dist', 'main_with_no_pycountry.pex') + self.assertTrue(os.path.isfile(pex2)) + # Check that the pex 1 runs. + output = subprocess.check_output(pex1) + self.assertIn('Super hello', output) + # Check that the pex 2 fails due to no python_dists leaked into it. + try: + output = subprocess.check_output(pex2) + except subprocess.CalledProcessError as e: + self.assertNotEquals(0, e.returncode) + # Cleanup + os.remove(pex1) + os.remove(pex2) diff --git a/tests/python/pants_test/projects/test_testprojects_integration.py b/tests/python/pants_test/projects/test_testprojects_integration.py index 8d2327ed349..3901777a4ea 100644 --- a/tests/python/pants_test/projects/test_testprojects_integration.py +++ b/tests/python/pants_test/projects/test_testprojects_integration.py @@ -66,6 +66,7 @@ def targets(self): 'testprojects/tests/java/org/pantsbuild/testproject/depman:old-tests', 'testprojects/tests/java/org/pantsbuild/testproject/htmlreport:htmlreport', 'testprojects/tests/java/org/pantsbuild/testproject/parallel.*', + 'testprojects/src/python/python_distribution/fasthello_with_install_requires.*' ] # May not succeed without java8 installed From 476f37a0301210197d73f5e775d5623bfeebbbf6 Mon Sep 17 00:00:00 2001 From: Benjy Weinberger Date: Fri, 9 Feb 2018 00:54:45 -0500 Subject: [PATCH 19/21] Bundle all kythe entries, regardless of origin. (#5450) --- .../pants/contrib/codeanalysis/tasks/bundle_entries.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/bundle_entries.py b/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/bundle_entries.py index 2e88d1f63b7..f4db6ea7fbe 100644 --- a/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/bundle_entries.py +++ b/contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/bundle_entries.py @@ -11,8 +11,6 @@ from pants.backend.jvm.tasks.nailgun_task import NailgunTask from pants.util.dirutil import safe_mkdir -from pants.contrib.codeanalysis.tasks.indexable_java_targets import IndexableJavaTargets - class BundleEntries(NailgunTask): @classmethod @@ -33,9 +31,7 @@ def execute(self): if archive == 'none': return - indexable_targets = IndexableJavaTargets.global_instance().get(self.context) - for tgt in indexable_targets: - entries = self.context.products.get_data('kythe_entries_files', dict)[tgt] + for tgt, entries in self.context.products.get_data('kythe_entries_files', dict).items(): kythe_distdir = os.path.join(self.get_options().pants_distdir, 'kythe') safe_mkdir(kythe_distdir) uncompressed_kythe_distpath = os.path.join( From 01a5b5c9313cae02c8fd866c4d2524fe46952a85 Mon Sep 17 00:00:00 2001 From: Benjy Weinberger Date: Sat, 10 Feb 2018 17:19:54 -0800 Subject: [PATCH 20/21] Prepare the 1.5.0dev3 release. (#5454) --- src/python/pants/VERSION | 2 +- src/python/pants/notes/master.rst | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/python/pants/VERSION b/src/python/pants/VERSION index 4db3f644be6..ab9b284c7d7 100644 --- a/src/python/pants/VERSION +++ b/src/python/pants/VERSION @@ -1 +1 @@ -1.5.0.dev2 +1.5.0.dev3 diff --git a/src/python/pants/notes/master.rst b/src/python/pants/notes/master.rst index 27c7a71e906..51451b52d23 100644 --- a/src/python/pants/notes/master.rst +++ b/src/python/pants/notes/master.rst @@ -5,6 +5,20 @@ This document describes ``dev`` releases which occur weekly from master, and whi not undergo the vetting associated with ``stable`` releases. +1.5.0.dev3 (02/10/2018) +----------------------- + +New Features +~~~~~~~~~~~~ +* Python distribution task for user-defined setup.py + integration with ./pants {run/binary/test} (#5141) + `PR #5141 `_ + +Refactoring, Improvements, and Tooling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +* Bundle all kythe entries, regardless of origin. (#5450) + `PR #5450 `_ + + 1.5.0.dev2 (02/05/2018) ----------------------- From 569f14c2682444729340101929fee07edfd959fa Mon Sep 17 00:00:00 2001 From: Danny McClanahan <1305167+cosmicexplorer@users.noreply.github.com> Date: Mon, 12 Feb 2018 07:17:11 -0800 Subject: [PATCH 21/21] fix/tests: subsystems can't declare dependencies on non-globally-scoped subsystems (#5456) --- .gitignore | 1 + src/python/pants/subsystem/subsystem.py | 4 ++-- tests/python/pants_test/subsystem/test_subsystem.py | 12 ++++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 0b82bf30d82..b4e02377338 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ .factorypath .python .pydevproject +.pytest_cache/ build/ codegen/classes/ htmlcov/ diff --git a/src/python/pants/subsystem/subsystem.py b/src/python/pants/subsystem/subsystem.py index 9c12850315a..ecffba314ed 100644 --- a/src/python/pants/subsystem/subsystem.py +++ b/src/python/pants/subsystem/subsystem.py @@ -104,8 +104,8 @@ def collect_subsystems(subsystem): path.add(subsystem) if subsystem not in known_subsystem_types: known_subsystem_types.add(subsystem) - for dependency in subsystem.subsystem_dependencies(): - collect_subsystems(dependency) + for dependency in subsystem.subsystem_dependencies_iter(): + collect_subsystems(dependency.subsystem_cls) path.remove(subsystem) for subsystem_type in subsystem_types: diff --git a/tests/python/pants_test/subsystem/test_subsystem.py b/tests/python/pants_test/subsystem/test_subsystem.py index 91c85abb8db..3dc87e25612 100644 --- a/tests/python/pants_test/subsystem/test_subsystem.py +++ b/tests/python/pants_test/subsystem/test_subsystem.py @@ -28,6 +28,16 @@ class UninitializedSubsystem(Subsystem): options_scope = 'uninitialized-scope' +class ScopedDependentSubsystem(Subsystem): + options_scope = 'scoped-dependent-subsystem' + + @classmethod + def subsystem_dependencies(cls): + return super(ScopedDependentSubsystem, cls).subsystem_dependencies() + ( + DummySubsystem.scoped(cls), + ) + + class SubsystemTest(unittest.TestCase): def setUp(self): DummySubsystem._options = DummyOptions() @@ -52,6 +62,8 @@ class NoScopeSubsystem(Subsystem): def test_closure_simple(self): self.assertEqual({DummySubsystem}, Subsystem.closure((DummySubsystem,))) + self.assertEqual({DummySubsystem, ScopedDependentSubsystem}, + Subsystem.closure((ScopedDependentSubsystem,))) def test_closure_tree(self): class SubsystemB(Subsystem):