Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Add allennlp test-install command #1213

Merged
merged 17 commits into from
May 16, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions allennlp/commands/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from typing import Dict
import argparse
import logging
import sys

from allennlp.commands.elmo import Elmo
from allennlp.commands.evaluate import Evaluate
Expand All @@ -11,8 +10,8 @@
from allennlp.commands.serve import Serve
from allennlp.commands.dry_run import DryRun
from allennlp.commands.subcommand import Subcommand
from allennlp.commands.test_install import TestInstall
from allennlp.commands.train import Train
from allennlp.service.predictors import DemoModel
from allennlp.common.util import import_submodules

logger = logging.getLogger(__name__) # pylint: disable=invalid-name
Expand Down Expand Up @@ -40,6 +39,7 @@ def main(prog: str = None,
"elmo": Elmo(),
"fine-tune": FineTune(),
"dry-run": DryRun(),
"test-install": TestInstall(),

# Superseded by overrides
**subcommand_overrides
Expand Down
66 changes: 66 additions & 0 deletions allennlp/commands/test_install.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
"""
The ``test-install`` subcommand verifies
an installation by running the unit tests.

.. code-block:: bash

$ allennlp test-install --help
usage: allennlp test-install [-h] [--run-all]
[--include-package INCLUDE_PACKAGE]

Test that installation works by running the unit tests.

optional arguments:
-h, --help show this help message and exit
--run-all By default, we skip tests that are slow or download
large files. This flag will run all tests.
--include-package INCLUDE_PACKAGE
additional packages to include
"""

import argparse
import logging
import os

import pytest

from allennlp.commands.subcommand import Subcommand

logger = logging.getLogger(__name__) # pylint: disable=invalid-name

class TestInstall(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Test that installation works by running the unit tests.'''
subparser = parser.add_parser(
name, description=description, help='Run the unit tests.')

subparser.add_argument('--run-all', action="store_true",
help="By default, we skip tests that are slow "
"or download large files. This flag will run all tests.")

subparser.set_defaults(func=_run_test)

return subparser


def _get_project_root():
return os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir))


def _run_test(args: argparse.Namespace):
initial_working_dir = os.getcwd()
project_root = _get_project_root()
logger.info("Changing directory to %s", project_root)
os.chdir(project_root)
test_dir = os.path.join(project_root, "tests")
logger.info("Running tests at %s", test_dir)
if args.run_all:
pytest.main([test_dir])
else:
pytest.main([test_dir, '-k', 'not sniff_test and not notebooks_test'])
# Change back to original working directory after running tests
os.chdir(initial_working_dir)
20 changes: 12 additions & 8 deletions doc/api/allennlp.commands.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,17 @@ The included module ``allennlp.run`` is such a script:
-h, --help show this help message and exit

Commands:
train Train a model
evaluate Evaluate the specified model + dataset
predict Use a trained model to make predictions.
serve Run the web service and demo.
make-vocab
Create a vocabulary
elmo Use a trained model to make predictions.
dry-run Create a vocabulary, compute dataset statistics and other training utilities.
train Train a model
evaluate Evaluate the specified model + dataset
predict Use a trained model to make predictions.
serve Run the web service and demo.
make-vocab Create a vocabulary
elmo Use a trained model to make predictions.
fine-tune Continue training a model on a new dataset
dry-run Create a vocabulary, compute dataset statistics and other
training utilities.
test-install
Run the unit tests.

However, it only knows about the models and classes that are
included with AllenNLP. Once you start creating custom models,
Expand All @@ -41,6 +44,7 @@ calls ``main()``.
allennlp.commands.fine_tune
allennlp.commands.elmo
allennlp.commands.dry_run
allennlp.commands.test_install

.. automodule:: allennlp.commands
:members:
Expand Down
4 changes: 4 additions & 0 deletions doc/api/allennlp.commands.test_install.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
allennlp.commands.test_install
==============================

.. automodule:: allennlp.commands.test_install
11 changes: 11 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -66,3 +66,14 @@ h5py

# For timezone utilities
pytz==2017.3

#### ESSENTIAL TESTING-RELATED PACKAGES ####
# We'll use pytest to run our tests; this isn't really necessary to run the code, but it is to run
# the tests. With this here, you can run the tests with `py.test` from the base directory.
pytest

# Allows marking tests as flaky, to be rerun if they fail
flaky

# Required to mock out `requests` calls
responses>=0.7
10 changes: 0 additions & 10 deletions requirements_test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@
# Checks style, syntax, and other useful errors.
pylint==1.8.1

# We'll use pytest to run our tests; this isn't really necessary to run the code, but it is to run
# the tests. With this here, you can run the tests with `py.test` from the base directory.
pytest

# Tutorial notebooks
jupyter

Expand All @@ -16,19 +12,13 @@ mypy==0.521
# Allows generation of coverage reports with pytest.
pytest-cov

# Allows marking tests as flaky, to be rerun if they fail
flaky

# Allows codecov to generate coverage reports
coverage
codecov

# Required to run sanic tests
aiohttp

# Required to mock out `requests` calls
responses>=0.7

#### DOC-RELATED PACKAGES ####

# Builds our documentation.
Expand Down
5 changes: 4 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,10 @@
'scikit-learn',
'scipy',
'pytz==2017.3',
'unidecode'
'unidecode',
'pytest',
'flaky',
'responses>=0.7'
],
scripts=["bin/allennlp"],
setup_requires=setup_requirements,
Expand Down
13 changes: 13 additions & 0 deletions tests/commands/test_install_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# pylint: disable=invalid-name,no-self-use
import os

from allennlp.common.testing import AllenNlpTestCase
from allennlp.commands.test_install import _get_project_root


class TestTestInstall(AllenNlpTestCase):
def test_get_project_root(self):
project_root = _get_project_root()
assert os.path.exists(os.path.join(project_root, "tests"))
assert os.path.exists(os.path.join(project_root, "LICENSE"))
assert os.path.exists(os.path.join(project_root, "setup.py"))
10 changes: 7 additions & 3 deletions tests/notebooks_test.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import os

import nbformat
from nbconvert.preprocessors.execute import CellExecutionError
from nbconvert.preprocessors import ExecutePreprocessor
try:
import nbformat
from nbconvert.preprocessors.execute import CellExecutionError
from nbconvert.preprocessors import ExecutePreprocessor
except ModuleNotFoundError:
print("jupyter must be installed in order to run notebook tests. "
"To install with pip, run: pip install jupyter")

from allennlp.common.testing import AllenNlpTestCase

Expand Down