diff --git a/.coveragerc b/.coveragerc index 70e985016..3e659c1f4 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,4 +3,10 @@ branch = True source = src/ omit = src/pynwb/_version.py + src/pynwb/_due.py src/pynwb/testing/* + +[report] +exclude_lines = + pragma: no cover + @abstract diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 9a888d4dc..ba3fdb027 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -20,14 +20,14 @@ assignees: '' Python Executable: Conda or Python - Python Version: Python 3.5, 3.6, or 3.7 + Python Version: Python 3.6, 3.7, or 3.8 Operating System: Windows, macOS or Linux - HDMF Version: + HDMF Version: PyNWB Version: ## Checklist -- [ ] Have you ensured the feature or change was not already [reported](https://github.com/NeurodataWithoutBorders/pynwb/issues)? +- [ ] Have you ensured the bug was not already [reported](https://github.com/NeurodataWithoutBorders/pynwb/issues)? - [ ] Have you included a brief and descriptive title? - [ ] Have you included a clear description of the problem you are trying to solve? - [ ] Have you included a minimal code snippet that reproduces the issue you are encountering? diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d3b9783b1..3542c3f8c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -4,13 +4,14 @@ What was the reasoning behind this change? Please explain the changes briefly. ## How to test the behavior? ``` -Show here how to reproduce the new behavior (can be a bug fix or a new feature) +Show how to reproduce the new behavior (can be a bug fix or a new feature) ``` ## Checklist +- [ ] Did you update CHANGELOG.md with your changes? - [ ] Have you checked our [Contributing](https://github.com/NeurodataWithoutBorders/pynwb/blob/dev/docs/CONTRIBUTING.rst) document? -- [ ] Have you ensured the PR description clearly describes problem and the solution? +- [ ] Have you ensured the PR clearly describes the problem and the solution? - [ ] Is your contribution compliant with our coding style? This can be checked running `flake8` from the source directory. - [ ] Have you checked to ensure that there aren't other open [Pull Requests](https://github.com/NeurodataWithoutBorders/pynwb/pulls) for the same change? -- [ ] Have you included the relevant issue number using `#XXX` notation where `XXX` is the issue number? +- [ ] Have you included the relevant issue number using "Fix #XXX" notation where XXX is the issue number? By including "Fix #XXX" you allow GitHub to close issue #XXX when the PR is merged. diff --git a/.github/PULL_REQUEST_TEMPLATE/release.md b/.github/PULL_REQUEST_TEMPLATE/release.md new file mode 100644 index 000000000..bf73b7df7 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/release.md @@ -0,0 +1,20 @@ +Prepare for release of PyNWB [version] + +### Before merging: +- [ ] Minor releases: Update package versions in `requirements.txt`, `requirements-dev.txt`, `requirements-doc.txt`, `requirements-min.txt` as needed. See https://requires.io/github/NeurodataWithoutBorders/pynwb/requirements/?branch=dev +- [ ] Check legal file dates and information in `Legal.txt`, `license.txt`, `README.rst`, `docs/source/conf.py`, and any other locations as needed +- [ ] Update `setup.py` as needed +- [ ] Update `README.rst` as needed +- [ ] Update `src/pynwb/nwb-schema` submodule as needed. Check the version number manually. +- [ ] Update changelog (set release date) in `CHANGELOG.md` and any other docs as needed +- [ ] Run tests locally including gallery tests and validation tests, and inspect all warnings and outputs (`python test.py -v > out.txt`) +- [ ] Test docs locally (`make apidoc`, `make html`) +- [ ] Push changes to this PR and make sure all PRs to be included in this release have been merged +- [ ] Check that the readthedocs build for this PR succeeds (build latest to pull the new branch, then activate and + build docs for new branch): https://readthedocs.org/projects/pynwb/builds/ + +### After merging: +1. Create release by following steps in `docs/source/make_a_release.rst` or use alias `git pypi-release [tag]` if set up +2. After the CI bot creates the new release (wait ~10 min), update the release notes on the [GitHub releases page](https://github.com/NeurodataWithoutBorders/pynwb/releases) with the changelog +3. Check that the readthedocs "latest" and "stable" builds run and succeed +4. Update [conda-forge/pynwb-feedstock](https://github.com/conda-forge/pynwb-feedstock) diff --git a/.gitignore b/.gitignore index d2565814b..db7652720 100644 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,9 @@ docs/tmpl.index.rst tests/coverage/htmlcov .coverage +# duecredit output +.duecredit.p + # tox .tox diff --git a/CHANGELOG.md b/CHANGELOG.md index 171e0eb2a..419cf06d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,11 @@ - Update documentation, CI GitHub processes. @oruebel @yarikoptic, @bendichter, @TomDonoghue, @rly (#1311, #1336, #1351, #1352, #1345, #1340, #1327) - Set default `neurodata_type_inc` for `NWBGroupSpec`, `NWBDatasetSpec`. @rly (#1295) +- Block usage of h5py 3+ for now. h5py>=2.9, <3 is supported. (#1355) +- Fix incompatibility issue with downstream github-release tool used to deploy releases to GitHub. @rly (#1245) +- Fix issue with Sphinx gallery. @rly +- Add citation information to documentation and support for duecredit tool. @rly +- Remove use of ColoredTestRunner for more readable verbose test output. @rly - Add support for nwb-schema 2.3.0. @rly (#1245, #1330) - Add optional `waveforms` column to the `Units` table. - Add optional `strain` field to `Subject`. @@ -21,7 +26,25 @@ - Clarify documentation for electrode impedance and filtering. - Set the `stimulus_description` for `IZeroCurrentClamp` to have the fixed value "N/A". - See https://nwb-schema.readthedocs.io/en/latest/format_release_notes.html for full schema release notes. -- Add support for HDMF 2.5.3. @rly @ajtritt (#1325, #1355, #1360, #1245, #1287) +- Add support for HDMF 2.5.5 and upgrade HDMF requirement from 2.1.0 to 2.5.5. @rly @ajtritt + (#1325, #1355, #1360, #1245, #1287). This includes several relevant features and bug fixes, including: + - Fix issue where dependencies of included types were not being loaded in namespaces / extensions. + - Add `HDF5IO.get_namespaces(path=path, file=file)` method which returns a dict of namespace name mapped to the + namespace version (the largest one if there are multiple) for each namespace cached in the given HDF5 file. + - Add methods for automatic creation of `MultiContainerInterface` classes. + - Add ability to specify a custom class for new columns to a `DynamicTable` that are not `VectorData`, + `DynamicTableRegion`, or `VocabData` using `DynamicTable.__columns__` or `DynamicTable.add_column(...)`. + - Add support for creating and specifying multi-index columns in a `DynamicTable` using `add_column(...)`. + - Add capability to add a row to a column after IO. + - Add method `AbstractContainer.get_fields_conf`. + - Add functionality for storing external resource references. + - Add method `hdmf.utils.get_docval_macro` to get a tuple of the current values for a docval_macro, e.g., 'array_data' + and 'scalar_data'. + - `DynamicTable` can be automatically generated using `get_class`. Now the HDMF API can read files with extensions + that contain a DynamicTable without needing to import the extension first. + - Add `EnumData` type for storing data that comes from a fixed set of values. + - Add `AlignedDynamicTable` type which defines a DynamicTable that supports storing a collection of subtables. + - Allow `np.bool_` as a valid `bool` dtype when validating. - See https://github.com/hdmf-dev/hdmf/releases for full HDMF release notes. ## PyNWB 1.4.0 (August 12, 2020) diff --git a/MANIFEST.in b/MANIFEST.in index 9fa9268fe..1b9053bf2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include license.txt Legal.txt versioneer.py src/pynwb/_version.py +include license.txt Legal.txt versioneer.py src/pynwb/_version.py src/pynwb/_due.py include requirements.txt requirements-dev.txt requirements-doc.txt requirements-min.txt include test.py tox.ini diff --git a/README.rst b/README.rst index 580d395ae..0cc44a94d 100644 --- a/README.rst +++ b/README.rst @@ -37,17 +37,20 @@ Build Status Overall Health ============== +.. image:: https://github.com/NeurodataWithoutBorders/pynwb/workflows/Run%20coverage/badge.svg + :target: https://github.com/NeurodataWithoutBorders/pynwb/actions?query=workflow%3A%22Run+coverage%22 + .. image:: https://codecov.io/gh/NeurodataWithoutBorders/pynwb/branch/dev/graph/badge.svg :target: https://codecov.io/gh/NeurodataWithoutBorders/pynwb .. image:: https://requires.io/github/NeurodataWithoutBorders/pynwb/requirements.svg?branch=dev :target: https://requires.io/github/NeurodataWithoutBorders/pynwb/requirements/?branch=dev :alt: Requirements Status - + .. image:: https://readthedocs.org/projects/pynwb/badge/?version=latest :target: https://pynwb.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status - + .. image:: https://img.shields.io/pypi/l/pynwb.svg :target: https://github.com/neurodatawithoutborders/pynwb/blob/dev/license.txt :alt: PyPI - License @@ -82,6 +85,11 @@ Contributing For details on how to contribute to PyNWB see our `contribution guidelines `_. +Citing NWB +========== + +Oliver Rübel, Andrew Tritt, Ryan Ly, Benjamin K. Dichter, Satrajit Ghosh, Lawrence Niu, Ivan Soltesz, Karel Svoboda, Loren Frank, Kristofer E. Bouchard, "The Neurodata Without Borders ecosystem for neurophysiological data science", bioRxiv 2021.03.13.435173, March 15, 2021, doi: https://doi.org/10.1101/2021.03.13.435173 + LICENSE ======= diff --git a/docs/gallery/domain/brain_observatory.py b/docs/gallery/domain/brain_observatory.py index 6347864ba..9be41de72 100644 --- a/docs/gallery/domain/brain_observatory.py +++ b/docs/gallery/domain/brain_observatory.py @@ -142,9 +142,9 @@ ) ######################################## -# The Allen Institute does not include the raw imaging signal, as this data would make the file too large. Instead, these -# data are preprocessed, and a dF/F fluorescence signal extracted for each region-of-interest (ROI). To store the chain -# of computations necessary to describe this data processing pipeline, pynwb provides a "processing module" with +# The Allen Institute does not include the raw imaging signal, as this data would make the file too large. Instead, +# these data are preprocessed, and a dF/F fluorescence signal extracted for each region-of-interest (ROI). To store the +# chain of computations necessary to describe this data processing pipeline, pynwb provides a "processing module" with # interfaces that simplify and standardize the process of adding the steps in this provenance chain to the file: ophys_module = nwbfile.create_processing_module( name='ophys_module', diff --git a/docs/source/conf.py b/docs/source/conf.py index c89d2b536..63923cff0 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -65,17 +65,16 @@ 'gallery_dirs': ['tutorials'], 'subsection_order': ExplicitOrder(['../gallery/general', '../gallery/domain']), 'backreferences_dir': 'gen_modules/backreferences', - 'download_section_examples': False, 'min_reported_time': 5 } intersphinx_mapping = { 'python': ('https://docs.python.org/3.8', None), - 'numpy': ('https://docs.scipy.org/doc/numpy/', None), + 'numpy': ('https://numpy.org/doc/stable/objects.inv', None), 'matplotlib': ('https://matplotlib.org', None), - 'h5py': ('http://docs.h5py.org/en/latest/', None), + 'h5py': ('https://docs.h5py.org/en/latest/', None), 'hdmf': ('https://hdmf.readthedocs.io/en/latest/', None), - 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None) + 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None) } # Add any paths that contain templates here, relative to this directory. @@ -326,5 +325,5 @@ def skip(app, what, name, obj, skip, options): def setup(app): app.connect('builder-inited', run_apidoc) - app.add_stylesheet("theme_overrides.css") # overrides for wide tables in RTD theme + app.add_css_file("theme_overrides.css") # overrides for wide tables in RTD theme app.connect("autodoc-skip-member", skip) diff --git a/docs/source/index.rst b/docs/source/index.rst index 8f7f13392..4108a1717 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -33,6 +33,7 @@ breaking down the barriers to data sharing in neuroscience. overview_intro overview_software_architecture overview_nwbfile + overview_citing .. toctree:: :maxdepth: 2 diff --git a/docs/source/overview_citing.rst b/docs/source/overview_citing.rst new file mode 100644 index 000000000..d449015d6 --- /dev/null +++ b/docs/source/overview_citing.rst @@ -0,0 +1,54 @@ +.. _citing: + +Citing PyNWB +============ + +BibTeX entry +------------ + +If you use PyNWB in your research, please use the following citation: + +.. code-block:: bibtex + + @article {R{\"u}bel2021.03.13.435173, + author = {R{\"u}bel, Oliver and Tritt, Andrew and Ly, Ryan and Dichter, Benjamin K. and Ghosh, Satrajit and Niu, Lawrence and Soltesz, Ivan and Svoboda, Karel and Frank, Loren and Bouchard, Kristofer E.}, + title = {The Neurodata Without Borders ecosystem for neurophysiological data science}, + elocation-id = {2021.03.13.435173}, + year = {2021}, + doi = {10.1101/2021.03.13.435173}, + publisher = {Cold Spring Harbor Laboratory}, + abstract = {The neurophysiology of cells and tissues are monitored electrophysiologically and optically in diverse experiments and species, ranging from flies to humans. Understanding the brain requires integration of data across this diversity, and thus these data must be findable, accessible, interoperable, and reusable (FAIR). This requires a standard language for data and metadata that can coevolve with neuroscience. We describe design and implementation principles for a language for neurophysiology data. Our software (Neurodata Without Borders, NWB) defines and modularizes the interdependent, yet separable, components of a data language. We demonstrate NWB{\textquoteright}s impact through unified description of neurophysiology data across diverse modalities and species. NWB exists in an ecosystem which includes data management, analysis, visualization, and archive tools. Thus, the NWB data language enables reproduction, interchange, and reuse of diverse neurophysiology data. More broadly, the design principles of NWB are generally applicable to enhance discovery across biology through data FAIRness.Competing Interest StatementThe authors have declared no competing interest.}, + URL = {https://www.biorxiv.org/content/early/2021/03/15/2021.03.13.435173}, + eprint = {https://www.biorxiv.org/content/early/2021/03/15/2021.03.13.435173.full.pdf}, + journal = {bioRxiv} + } + +Using duecredit +----------------- + +Citations can be generated using duecredit_. To install duecredit, run ``pip install duecredit``. + +You can obtain a list of citations for your Python script, e.g., ``yourscript.py``, using: + +.. code-block:: bash + + cd /path/to/your/module + python -m duecredit yourscript.py + +Alternatively, you can set the environment variable ``DUECREDIT_ENABLE=yes`` + +.. code-block:: bash + + DUECREDIT-ENABLE=yes python yourscript.py + +Citations will be saved in a hidden file (``.duecredit.p``) in the current directory. You can then use the duecredit_ +command line tool to export the citations to different formats. For example, you can display your citations in +BibTeX format using: + +.. code-block:: bash + + duecredit summary --format=bibtex + +For more information on using duecredit, please consult its `homepage `_. + +.. _duecredit: https://github.com/duecredit/duecredit diff --git a/requirements-dev.txt b/requirements-dev.txt index f5a4edb2d..563995cf9 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,9 @@ -codecov==2.1.8 -coverage==5.2 -flake8==3.8.3 -flake8-debugger==3.1.0 -flake8-print==3.1.4 -importlib-metadata<2 -tox==3.17.1 +# pinned dependencies to reproduce an entire development environment to use PyNWB, run PyNWB tests, check code style, +# compute coverage, and create test environments +codecov==2.1.11 +coverage==5.5 +flake8==3.9.1 +flake8-debugger==4.0.0 +flake8-print==4.0.0 +importlib-metadata==4.0.1 +tox==3.23.0 diff --git a/requirements-doc.txt b/requirements-doc.txt index c311a0bb4..5f7ceff81 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -1,3 +1,4 @@ +# dependencies to generate the documentation for PyNWB sphinx matplotlib sphinx_rtd_theme diff --git a/requirements.txt b/requirements.txt index f8c374bda..926d5a00d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ +# pinned dependencies to reproduce an entire development environment to use PyNWB h5py==2.10.0 hdmf==2.5.5 -numpy==1.18.5 -pandas==0.25.3 +numpy==1.19.3 +pandas==1.1.5 python-dateutil==2.8.1 setuptools==56.0.0 diff --git a/setup.cfg b/setup.cfg index 1b3b3aa0e..1f5cbfee7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,18 +15,25 @@ exclude = .git, .tox, __pycache__, - docs/_build/, + build/, dist/, src/pynwb/nwb-schema/ + docs/_build/, docs/source/conf.py - versioneer.py + docs/source/tutorials/ + versioneer.py, + src/pynwb/_version.py + src/pynwb/_due.py per-file-ignores = - docs/gallery/*:E402, - docs/source/tutorials/*:E402 + docs/gallery/*:E402,T001 src/pynwb/io/__init__.py:F401 src/pynwb/legacy/io/__init__.py:F401 tests/integration/__init__.py:F401 src/pynwb/testing/__init__.py:F401 + src/pynwb/validate.py:T001 + setup.py:T001 + test.py:T001 + scripts/*:T001 [metadata] description-file = README.rst diff --git a/setup.py b/setup.py index 5a2dcce91..c94edf3d5 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,6 @@ 'package_data': {'pynwb': ["%s/*.yaml" % schema_dir, "%s/*.json" % schema_dir]}, 'classifiers': [ "Programming Language :: Python", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", diff --git a/src/pynwb/__init__.py b/src/pynwb/__init__.py index 4f73d034a..f91ffc0d7 100644 --- a/src/pynwb/__init__.py +++ b/src/pynwb/__init__.py @@ -279,3 +279,21 @@ def export(self, **kwargs): from ._version import get_versions # noqa: E402 __version__ = get_versions()['version'] del get_versions + +from ._due import due, BibTeX # noqa: E402 +due.cite(BibTeX(""" +@article {R{\"u}bel2021.03.13.435173, + author = {R{\"u}bel, Oliver and Tritt, Andrew and Ly, Ryan and Dichter, Benjamin K. and Ghosh, Satrajit and Niu, Lawrence and Soltesz, Ivan and Svoboda, Karel and Frank, Loren and Bouchard, Kristofer E.}, + title = {The Neurodata Without Borders ecosystem for neurophysiological data science}, + elocation-id = {2021.03.13.435173}, + year = {2021}, + doi = {10.1101/2021.03.13.435173}, + publisher = {Cold Spring Harbor Laboratory}, + abstract = {The neurophysiology of cells and tissues are monitored electrophysiologically and optically in diverse experiments and species, ranging from flies to humans. Understanding the brain requires integration of data across this diversity, and thus these data must be findable, accessible, interoperable, and reusable (FAIR). This requires a standard language for data and metadata that can coevolve with neuroscience. We describe design and implementation principles for a language for neurophysiology data. Our software (Neurodata Without Borders, NWB) defines and modularizes the interdependent, yet separable, components of a data language. We demonstrate NWB{\textquoteright}s impact through unified description of neurophysiology data across diverse modalities and species. NWB exists in an ecosystem which includes data management, analysis, visualization, and archive tools. Thus, the NWB data language enables reproduction, interchange, and reuse of diverse neurophysiology data. More broadly, the design principles of NWB are generally applicable to enhance discovery across biology through data FAIRness.Competing Interest StatementThe authors have declared no competing interest.}, + URL = {https://www.biorxiv.org/content/early/2021/03/15/2021.03.13.435173}, + eprint = {https://www.biorxiv.org/content/early/2021/03/15/2021.03.13.435173.full.pdf}, + journal = {bioRxiv} +} +"""), description="The Neurodata Without Borders ecosystem for neurophysiological data science", # noqa: E501 + path="pynwb/", version=__version__, cite_module=True) +del due, BibTeX diff --git a/src/pynwb/_due.py b/src/pynwb/_due.py new file mode 100644 index 000000000..9a1c4dd08 --- /dev/null +++ b/src/pynwb/_due.py @@ -0,0 +1,74 @@ +# emacs: at the end of the file +# ex: set sts=4 ts=4 sw=4 et: +# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### # +""" + +Stub file for a guaranteed safe import of duecredit constructs: if duecredit +is not available. + +To use it, place it into your project codebase to be imported, e.g. copy as + + cp stub.py /path/tomodule/module/due.py + +Note that it might be better to avoid naming it duecredit.py to avoid shadowing +installed duecredit. + +Then use in your code as + + from .due import due, Doi, BibTeX, Text + +See https://github.com/duecredit/duecredit/blob/master/README.md for examples. + +Origin: Originally a part of the duecredit +Copyright: 2015-2019 DueCredit developers +License: BSD-2 +""" + +__version__ = '0.0.8' + + +class InactiveDueCreditCollector(object): + """Just a stub at the Collector which would not do anything""" + def _donothing(self, *args, **kwargs): + """Perform no good and no bad""" + pass + + def dcite(self, *args, **kwargs): + """If I could cite I would""" + def nondecorating_decorator(func): + return func + return nondecorating_decorator + + active = False + activate = add = cite = dump = load = _donothing + + def __repr__(self): + return self.__class__.__name__ + '()' + + +def _donothing_func(*args, **kwargs): + """Perform no good and no bad""" + pass + + +try: + from duecredit import due, BibTeX, Doi, Url, Text + if 'due' in locals() and not hasattr(due, 'cite'): + raise RuntimeError( + "Imported due lacks .cite. DueCredit is now disabled") +except Exception as e: + if not isinstance(e, ImportError): + import logging + logging.getLogger("duecredit").error( + "Failed to import duecredit due to %s" % str(e)) + # Initiate due stub + due = InactiveDueCreditCollector() + BibTeX = Doi = Url = Text = _donothing_func + +# Emacs mode definitions +# Local Variables: +# mode: python +# py-indent-offset: 4 +# tab-width: 4 +# indent-tabs-mode: nil +# End: diff --git a/test.py b/test.py index e74fa1890..234ef51f3 100755 --- a/test.py +++ b/test.py @@ -11,7 +11,6 @@ import sys import traceback import unittest -from tests.coloredtestrunner import ColoredTestRunner, ColoredTestResult flags = {'pynwb': 2, 'integration': 3, 'example': 4, 'backwards': 5, 'validation': 6} @@ -20,14 +19,34 @@ ERRORS = 0 +class SuccessRecordingResult(unittest.TextTestResult): + '''A unittest test result class that stores successful test cases as well + as failures and skips. + ''' + + def addSuccess(self, test): + if not hasattr(self, 'successes'): + self.successes = [test] + else: + self.successes.append(test) + + def get_all_cases_run(self): + '''Return a list of each test case which failed or succeeded + ''' + cases = [] + + if hasattr(self, 'successes'): + cases.extend(self.successes) + cases.extend([failure[0] for failure in self.failures]) + + return cases + + def run_test_suite(directory, description="", verbose=True): global TOTAL, FAILURES, ERRORS logging.info("running %s" % description) directory = os.path.join(os.path.dirname(__file__), directory) - if verbose > 1: - runner = ColoredTestRunner(verbosity=verbose) - else: - runner = unittest.TextTestRunner(verbosity=verbose, resultclass=ColoredTestResult) + runner = unittest.TextTestRunner(verbosity=verbose, resultclass=SuccessRecordingResult) test_result = runner.run(unittest.TestLoader().discover(directory)) TOTAL += test_result.testsRun diff --git a/tests/coloredtestrunner.py b/tests/coloredtestrunner.py deleted file mode 100644 index e6fcb272f..000000000 --- a/tests/coloredtestrunner.py +++ /dev/null @@ -1,498 +0,0 @@ -# -*- coding: utf-8 -*- -""" -A ColoredTestRunner for use with the Python unit testing framework. It -generates a tabular report to show the result at a glance, with COLORS. - -coloredtestrunner.py was modified from code written by Vinícius Dantas and posted -as a gist: https://gist.github.com/viniciusd/73e6eccd39dea5e714b1464e3c47e067 -and demonstrated here: -https://stackoverflow.com/questions/17162682/display-python-unittest-results-in-nice-tabular-form/31665827#31665827 - -The code linked above is based on HTMLTestRunner -written by Wai Yip Tung. The BSD-3-Clause license covering HTMLTestRunner is below. - ------------------------------------------------------------------------- -Copyright (c) 2004-2007, Wai Yip Tung -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -* Neither the name Wai Yip Tung nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""" -import datetime -try: - from StringIO import StringIO -except ImportError: - from io import StringIO -import sys -import re -import unittest -import textwrap - - -# ------------------------------------------------------------------------ -# The redirectors below are used to capture output during testing. Output -# sent to sys.stdout and sys.stderr are automatically captured. However -# in some cases sys.stdout is already cached before HTMLTestRunner is -# invoked (e.g. calling logging.basicConfig). In order to capture those -# output, use the redirectors for the cached stream. -# -# e.g. -# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector) -# >>> - -class OutputRedirector(object): - """ Wrapper to redirect stdout or stderr """ - def __init__(self, fp): - self.fp = fp - - def write(self, s): - self.fp.write(s) - - def writelines(self, lines): - self.fp.writelines(lines) - - def flush(self): - self.fp.flush() - - -class Table(object): - - def __init__(self, padding='', allow_newlines=False): - self.__columnSize__ = [] - self.__rows__ = [] - self.__titles__ = None - self.padding = padding - self.allow_newlines = allow_newlines - - def __len__(self, x): - return len(re.sub(r"\033\[[0-9];[0-9];[0-9]{1,2}m", "", x)) - - def addRow(self, row): - rows = [[''] for i in range(len(row))] - maxrows = 0 - for i, x in enumerate(row): - for j, y in enumerate(x.split("\n")): - if len(y) == 0 and not self.allow_newlines: - continue - try: - self.__columnSize__[i] = max(self.__columnSize__[i], self.__len__(y)) - except IndexError: - self.__columnSize__.append(self.__len__(y)) - rows[i].append(y) - maxrows = max(j, maxrows) - for i in range(len(rows)): - rows[i] += (maxrows-len(rows[i])+2)*[''] - for i in range(maxrows + 1): - self.__rows__.append([rows[j][i+1] for j in range(len(row))]) - - def addTitles(self, titles): - for i, x in enumerate(titles): - try: - self.__columnSize__[i] = max(self.__columnSize__[i], self.__len__(x)) - except IndexError: - self.__columnSize__.append(self.__len__(x)) - self.__titles__ = titles - - def __repr__(self): - hline = self.padding+"+" - for x in self.__columnSize__: - hline += (x+2)*'-'+'+' - rows = [] - if self.__titles__ is None: - title = "" - else: - if len(self.__titles__) < len(self.__columnSize__): - self.__titles__ += ((len(self.__columnSize__)-len(self.__titles__))*['']) - for i, x in enumerate(self.__titles__): - self.__titles__[i] = x.center(self.__columnSize__[i]) - title = self.padding+"| "+" | ".join(self.__titles__)+" |\n"+hline+"\n" - for x in self.__rows__: - if len(x) < len(self.__columnSize__): - x += ((len(self.__columnSize__)-len(x))*['']) - for i, c in enumerate(x): - x[i] = c.ljust(self.__columnSize__[i])+(len(c)-self.__len__(c)-3)*' ' - rows.append(self.padding+"| "+" | ".join(x)+" |") - return hline+"\n"+title+"\n".join(rows)+"\n"+hline+"\n" - - -class bcolors(object): - FORMAT = { - 'Regular': '0', - 'Bold': '1', - 'Underline': '4', - 'High Intensity': '0', # +60 on color - 'BoldHighIntensity': '1', # +60 on color - } - START = "\033[" - COLOR = { - 'black': "0;30m", - 'red': "0;31m", - 'green': "0;32m", - 'yellow': "0;33m", - 'blue': "0;34m", - 'purple': "0;35m", - 'cyan': "0;36m", - 'white': "0;37m", - 'end': "0m", - } - - def __getattr__(self, name): - def handlerFunction(*args, **kwargs): - return self.START+self.FORMAT['Regular']+";"+self.COLOR[name.lower()] - return handlerFunction(name=name) - - -# ---------------------------------------------------------------------- -# Template - -class Template_mixin(object): - bc = bcolors() - - STATUS = { - 0: bc.GREEN+'pass'+bc.END, - 1: bc.PURPLE+'fail'+bc.END, - 2: bc.RED+'error'+bc.END, - } - - # ------------------------------------------------------------------------ - # Report - REPORT_TEST_WITH_OUTPUT_TMPL = r""" - %(desc)s - - %(status)s - - %(script)s - -""" # variables: (tid, Class, style, desc, status) - - REPORT_TEST_NO_OUTPUT_TMPL = r""" - %(desc)s - %(status)s -""" # variables: (tid, Class, style, desc, status) - - REPORT_TEST_OUTPUT_TMPL = r""" -%(output)s -""" # variables: (id, output) - - -class ColoredTestResult(unittest.TextTestResult): - - stdout_redirector = OutputRedirector(sys.stdout) - stderr_redirector = OutputRedirector(sys.stderr) - - def __init__(self, stream, descriptions, verbosity=1): - super(ColoredTestResult, self).__init__(stream, descriptions, verbosity) - self.stdout0 = None - self.stderr0 = None - self.success_count = 0 - self.failure_count = 0 - self.error_count = 0 - self.skip_count = 0 - self.verbosity = verbosity - - # deny TextTestResult showAll functionality - self.showAll = False - - # result is a list of result in 4 tuple - # ( - # result code (0: success; 1: fail; 2: error), - # TestCase object, - # Test output (byte string), - # stack trace, - # ) - self.result = [] - - def startTest(self, test): - super(ColoredTestResult, self).startTest(test) - # just one buffer for both stdout and stderr - self.outputBuffer = StringIO() - self.stdout_redirector.fp = self.outputBuffer - self.stderr_redirector.fp = self.outputBuffer - self.stdout0 = sys.stdout - self.stderr0 = sys.stderr - sys.stdout = self.stdout_redirector - sys.stderr = self.stderr_redirector - - def complete_output(self): - """ - Disconnect output redirection and return buffer. - Safe to call multiple times. - """ - if self.stdout0: - sys.stdout = self.stdout0 - sys.stderr = self.stderr0 - self.stdout0 = None - self.stderr0 = None - return self.outputBuffer.getvalue() - - def stopTest(self, test): - # Usually one of addSuccess, addError or addFailure would have been called. - # But there are some path in unittest that would bypass this. - # We must disconnect stdout in stopTest(), which is guaranteed to be called. - self.complete_output() - - def addSuccess(self, test): - self.success_count += 1 - super(ColoredTestResult, self).addSuccess(test) - output = self.complete_output() - self.result.append((0, test, output, '')) - if self.verbosity > 1: - sys.stdout.write('.') - sys.stdout.flush() - - if not hasattr(self, 'successes'): - self.successes = [test] - else: - self.successes.append(test) - - def addError(self, test, err): - self.error_count += 1 - super(ColoredTestResult, self).addError(test, err) - output = self.complete_output() - _, _exc_str = self.errors[-1] - self.result.append((2, test, output, _exc_str)) - if self.verbosity > 1: - sys.stdout.write('E') - sys.stdout.flush() - - def addFailure(self, test, err): - self.failure_count += 1 - super(ColoredTestResult, self).addFailure(test, err) - output = self.complete_output() - _, _exc_str = self.failures[-1] - self.result.append((1, test, output, _exc_str)) - if self.verbosity > 1: - sys.stdout.write('F') - sys.stdout.flush() - - def addSubTest(self, test, subtest, err): - if err is not None: - if issubclass(err[0], test.failureException): - self.addFailure(subtest, err) - else: - self.addError(subtest, err) - - def addSkip(self, test, reason): - self.skip_count += 1 - super(ColoredTestResult, self).addSkip(test, reason) - self.complete_output() - if self.verbosity > 1: - sys.stdout.write('s') - sys.stdout.flush() - - def get_all_cases_run(self): - '''Return a list of each test case which failed or succeeded - ''' - cases = [] - if hasattr(self, 'successes'): - cases.extend(self.successes) - cases.extend([failure[0] for failure in self.failures]) - return cases - - -class ColoredTestRunner(Template_mixin): - - def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None): - self.stream = stream - self.verbosity = verbosity - if title is None: - self.title = '' - else: - self.title = title - if description is None: - self.description = '' - else: - self.description = description - - self.startTime = datetime.datetime.now() - self.bc = bcolors() - self.desc_width = 40 - self.output_width = 60 - - def run(self, test): - "Run the given test case or test suite." - result = ColoredTestResult(stream=self.stream, descriptions=True, verbosity=self.verbosity) - test(result) - self.stopTime = datetime.datetime.now() - self.generateReport(test, result) - return result - - def sortResult(self, result_list): - # unittest does not seems to run in any particular order. - # Here at least we want to group them together by class. - rmap = {} - classes = [] - for n, test, output, error in result_list: - testClass = test.__class__ - if testClass not in rmap: - rmap[testClass] = [] - classes.append(testClass) - rmap[testClass].append((n, test, output, error)) - r = [(testClass, rmap[testClass]) for testClass in classes] - return r - - def getReportAttributes(self, result): - """ - Return report attributes as a list of (name, value). - Override this to add custom attributes. - """ - startTime = str(self.startTime)[:19] - duration = str(self.stopTime - self.startTime) - status = [] - padding = 4 * ' ' - if result.success_count: - status.append(padding + self.bc.GREEN + 'Pass:' + self.bc.END + ' %s\n' % result.success_count) - if result.failure_count: - status.append(padding + self.bc.PURPLE + 'Failure:' + self.bc.END + ' %s\n' % result.failure_count) - if result.error_count: - status.append(padding + self.bc.RED + 'Error:' + self.bc.END + ' %s\n' % result.error_count) - if status: - status = '\n'+''.join(status) - else: - status = 'none' - return [ - ('Start Time', startTime), - ('Duration', duration), - ('Status', status), - ] - - def generateReport(self, test, result): - report_attrs = self.getReportAttributes(result) - heading = self._generate_heading(report_attrs) - report = self._generate_report(result) - output = "\n" + self.title.rjust(30) + "\n" + heading + report - try: - self.stream.write(output.encode('utf8')) - except TypeError: - self.stream.write(output) - - def _generate_heading(self, report_attrs): - a_lines = [] - for name, value in report_attrs: - line = self.bc.CYAN + name + ": " + self.bc.END + value + "\n" - a_lines.append(line) - heading = ''.join(a_lines) + self.bc.CYAN + "Description:" + self.bc.END + self.description + "\n" - return heading - - def _generate_report(self, result): - sortedResult = self.sortResult(result.result) - padding = 4 * ' ' - table = Table(padding=padding, allow_newlines=True) - table.addTitles(["Test group/Test case", "Count", "Pass", "Fail", "Error"]) - tests = '' - for cid, (testClass, classResults) in enumerate(sortedResult): # Iterate over the test cases - classTable = Table(padding=2*padding) - classTable.addTitles(["Test name", "Output", "Status"]) - # subtotal for a class - np = nf = ne = 0 - for n, test, output, error in classResults: - if n == 0: - np += 1 - elif n == 1: - nf += 1 - else: - ne += 1 - - # format class description - if testClass.__module__ == "__main__": - name = testClass.__name__ - else: - name = "%s.%s" % (testClass.__module__, testClass.__name__) - tests += padding + name + "\n" - doc = testClass.__doc__ and testClass.__doc__.split("\n")[0] or "" - if doc: - doc = self._indent(self._wrap_text(doc, width=self.output_width - 4), 4) - desc = doc and ('%s:\n%s' % (name, doc)) or name - - table.addRow([self._wrap_text(desc, width=self.desc_width), str(np + nf + ne), str(np), str(nf), str(ne)]) - for tid, (n, test, output, error) in enumerate(classResults): # Iterate over the unit tests - classTable.addRow(self._generate_report_test(cid, tid, n, test, output, error)) - tests += str(classTable) - - for tid, (n, test, output, error) in enumerate(classResults): # Iterate over the unit tests - if error: - tests += self._indent(self.bc.RED + "ERROR in test %s:" % test + self.bc.END, 2) - tests += "\n" + self._indent(error, 2) + "\n" - - table.addRow([self.desc_width * '-', '----', '----', '----', '----']) - table.addRow(["Total", str(result.success_count + result.failure_count + result.error_count), - str(result.success_count), str(result.failure_count), str(result.error_count)]) - report = self.bc.CYAN + "Summary: " + self.bc.END + "\n" + str(table) + tests - return report - - def _generate_report_test(self, cid, tid, n, test, output, error): - name = test.id().split('.')[-1] - doc = test.shortDescription() or "" - if doc: - doc = self._indent(self._wrap_text(doc, width=self.output_width - 4), 4) - desc = doc and ('%s:\n%s' % (name, doc)) or name - - # o and e should be byte string because they are collected from stdout and stderr? - if isinstance(output, str): - # TODO: some problem with 'string_escape': it escape \n and mess up formating - # uo = unicode(o.encode('string_escape')) - try: - uo = output.decode('latin-1') - except AttributeError: - uo = output - else: - uo = output - if isinstance(error, str): - # TODO: some problem with 'string_escape': it escape \n and mess up formating - # ue = unicode(e.encode('string_escape')) - try: - ue = error.decode('latin-1') - except AttributeError: - ue = error - else: - ue = error - - # just print the last line of any error - if "\n" in ue: - ue = ue.splitlines()[-1] - - script = self.REPORT_TEST_OUTPUT_TMPL % dict( - output=self._wrap_text(uo, width=self.output_width) + self._wrap_text(ue, width=self.output_width), - ) - row = [desc, script, self.STATUS[n]] - return row - - @staticmethod - def _wrap_text(text, width): - """Wrap text to a given width but preserve line breaks - """ - # https://stackoverflow.com/a/26538082 - return '\n'.join(['\n'.join(textwrap.wrap(line, width, break_long_words=False, replace_whitespace=False)) - for line in text.splitlines() if line.strip() != '']) - - @staticmethod - def _indent(text, amount): - """Indent text by a particular number of spaces on each line - """ - try: - return textwrap.indent(text, amount * ' ') - except AttributeError: # undefined function (indent wasn't added until Python 3.3) - return ''.join((amount * ' ') + line for line in text.splitlines(True))