diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index 835a4e4..8fd0cb9 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -8,7 +8,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12"] os: [ubuntu-latest, windows-latest] steps: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index edb7dc5..34a415b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -47,13 +47,13 @@ repos: # additional_dependencies: [black] - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 7.1.0 hooks: - id: flake8 ## You can add flake8 plugins via `additional_dependencies`: # additional_dependencies: [flake8-bugbear] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.9.0 # Use the sha / tag you want to point at + rev: v1.10.1 # Use the sha / tag you want to point at hooks: - id: mypy additional_dependencies: ['types-PyYAML'] diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7515705..36c9b23 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,6 +6,8 @@ Changelog =========================== - Lock pandas to 2.1.4 or later - Capital Investment result calculation fixed +- Defults expansion moved to ReadStrategy +- Adds Python 3.12 support - Add HiGHS as a solver Version 1.1.2 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..8949a18 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,316 @@ + +============ +Contributing +============ + +Welcome to ``otoole`` contributor's guide! + +This document focuses on getting any potential contributor familiarized +with the development processes, but `other kinds of contributions`_ are also +appreciated. + +If you are new to using git_ or have never collaborated in a project previously, +please have a look at `contribution-guide.org`_. Other resources are also +listed in the excellent `guide created by FreeCodeCamp`_. + +Please notice, all users and contributors are expected to be **open, +considerate, reasonable, and respectful**. When in doubt, `Python Software +Foundation's Code of Conduct`_ is a good reference in terms of behavior +guidelines. + + +Issue Reports +============= + +If you experience bugs or general issues with ``otoole``, please have a look +on the `issue tracker`_. If you don't see anything useful there, please feel +free to fire an issue report. + +.. tip:: + Please don't forget to include the closed issues in your search. + Sometimes a solution was already reported, and the problem is considered + **solved**. + +New issue reports should include information about your programming environment +(e.g., operating system, Python version) and steps to reproduce the problem. +Please try also to simplify the reproduction steps to a very minimal example +that still illustrates the problem you are facing. By removing other factors, +you help us to identify the root cause of the issue. + + +Documentation Improvements +========================== + +You can help improve ``otoole`` docs by making them more readable and coherent, or +by adding missing information and correcting mistakes. + +``otoole`` documentation uses Sphinx_ as its main documentation compiler. +This means that the docs are kept in the same repository as the project code, and +that any documentation update is done in the same way was a code contribution. + +Our documentation is written in reStructuredText_. + +.. tip:: + Please notice that the `GitHub web interface`_ provides a quick way of + propose changes in ``otoole``'s files. While this mechanism can + be tricky for normal code contributions, it works perfectly fine for + contributing to the docs, and can be quite handy. + + If you are interested in trying this method out, please navigate to + the ``docs`` folder in the source repository_, find which file you + would like to propose changes and click in the little pencil icon at the + top, to open `GitHub's code editor`_. Once you finish editing the file, + please write a message in the form at the bottom of the page describing + which changes have you made and what are the motivations behind them and + submit your proposal. + +When working on documentation changes in your local machine, you can +compile them using |tox|_:: + + tox -e docs + +and use Python's built-in web server for a preview in your web browser +(``http://localhost:8000``):: + + python3 -m http.server --directory 'docs/_build/html' + + +Code Contributions +================== + +``otoole`` is built around a command line tool which is written +using the Python argparse library. The ``otoole.cli`` module is a useful +place to start when trying to understand how each command works. + +The ``otoole convert`` and ``otoole results`` commands both +use classes which inherit the ``otoole.Strategy`` class. +An ``otoole.ReadStrategy`` implements functionality to read in data, while an +``otoole.WriteStrategy`` writes out the target file format. The internal datastore +format in ``otool`` is a dictionary of ``pandas.DataFrames``. + +Comprehensive unit tests in the ``tests`` folder provide another way to +understand what each of the components does. + +Submit an issue +--------------- + +Before you work on any non-trivial code contribution it's best to first create +a report in the `issue tracker`_ to start a discussion on the subject. +This often provides additional considerations and avoids unnecessary work. + +Create an environment +--------------------- + +Before you start coding, we recommend creating an isolated `virtual +environment`_ to avoid any problems with your installed Python packages. +This can easily be done via either |virtualenv|_:: + + virtualenv + source /bin/activate + +or Miniconda_:: + + conda create -n otoole python=3 six virtualenv pytest pytest-cov + conda activate otoole + +Clone the repository +-------------------- + +#. Create an user account on |the repository service| if you do not already have one. +#. Fork the project repository_: click on the *Fork* button near the top of the + page. This creates a copy of the code under your account on |the repository service|. +#. Clone this copy to your local disk:: + + git clone git@github.com:YourLogin/otoole.git + cd otoole + +#. You should run:: + + pip install -U pip setuptools -e . + + to be able to import the package under development in the Python REPL. + +#. Install |pre-commit|_:: + + pip install pre-commit + pre-commit install + + ``otoole`` comes with a lot of hooks configured to automatically help the + developer to check the code being written. + +Implement your changes +---------------------- + +#. Create a branch to hold your changes:: + + git checkout -b my-feature + + and start making changes. Never work on the main branch! + +#. Start your work on this branch. Don't forget to add docstrings_ to new + functions, modules and classes, especially if they are part of public APIs. + +#. Add yourself to the list of contributors in ``AUTHORS.rst``. + +#. When you’re done editing, do:: + + git add + git commit + + to record your changes in git_. + + Please make sure to see the validation messages from |pre-commit|_ and fix + any eventual issues. + This should automatically use flake8_/black_ to check/fix the code style + in a way that is compatible with the project. + + .. important:: Don't forget to add unit tests and documentation in case your + contribution adds an additional feature and is not just a bugfix. + + Moreover, writing a `descriptive commit message`_ is highly recommended. + In case of doubt, you can check the commit history with:: + + git log --graph --decorate --pretty=oneline --abbrev-commit --all + + to look for recurring communication patterns. + +#. Please check that your changes don't break any unit tests with:: + + tox + + (after having installed |tox|_ with ``pip install tox`` or ``pipx``). + + You can also use |tox|_ to run several other pre-configured tasks in the + repository. Try ``tox -av`` to see a list of the available checks. + +Submit your contribution +------------------------ + +#. If everything works fine, push your local branch to |the repository service| with:: + + git push -u origin my-feature + +#. Go to the web page of your fork and click |contribute button| + to send your changes for review. + +Find more detailed information in `creating a PR`_. You might also want to open +the PR as a draft first and mark it as ready for review after the feedbacks +from the continuous integration (CI) system or any required fixes. + +We track test coverage using coveralls_. You can check the coverage +of your PR by clicking on the "details" link in the "Coverage" section of +the pull request checks. Try to ensure that your pull requests always increase +test coverage. + +Troubleshooting +--------------- + +The following tips can be used when facing problems to build or test the +package: + +#. Make sure to fetch all the tags from the upstream repository_. + The command ``git describe --abbrev=0 --tags`` should return the version you + are expecting. If you are trying to run CI scripts in a fork repository, + make sure to push all the tags. + You can also try to remove all the egg files or the complete egg folder, i.e., + ``.eggs``, as well as the ``*.egg-info`` folders in the ``src`` folder or + potentially in the root of your project. + +#. Sometimes |tox|_ misses out when new dependencies are added, especially to + ``setup.cfg`` and ``docs/requirements.txt``. If you find any problems with + missing dependencies when running a command with |tox|_, try to recreate the + ``tox`` environment using the ``-r`` flag. For example, instead of:: + + tox -e docs + + Try running:: + + tox -r -e docs + +#. Make sure to have a reliable |tox|_ installation that uses the correct + Python version (e.g., 3.8+). When in doubt you can run:: + + tox --version + # OR + which tox + + If you have trouble and are seeing weird errors upon running |tox|_, you can + also try to create a dedicated `virtual environment`_ with a |tox|_ binary + freshly installed. For example:: + + virtualenv .venv + source .venv/bin/activate + .venv/bin/pip install tox + .venv/bin/tox -e all + +#. `Pytest can drop you`_ in an interactive session in the case an error occurs. + In order to do that you need to pass a ``--pdb`` option (for example by + running ``tox -- -k --pdb``). + You can also setup breakpoints manually instead of using the ``--pdb`` option. + + +Maintainer tasks +================ + +Releases +-------- + +If you are part of the group of maintainers and have correct user permissions +on PyPI_, the following steps can be used to release a new version for +``otoole``: + +#. Make sure all unit tests are successful. +#. Tag the current commit on the main branch with a release tag, e.g., ``v1.2.3``. +#. Push the new tag to the upstream repository_, e.g., ``git push upstream v1.2.3`` +#. Clean up the ``dist`` and ``build`` folders with ``tox -e clean`` + (or ``rm -rf dist build``) + to avoid confusion with old builds and Sphinx docs. +#. Run ``tox -e build`` and check that the files in ``dist`` have + the correct version (no ``.dirty`` or git_ hash) according to the git_ tag. + Also check the sizes of the distributions, if they are too big (e.g., > + 500KB), unwanted clutter may have been accidentally included. +#. Run ``tox -e publish -- --repository pypi`` and check that everything was + uploaded to PyPI_ correctly. + +.. <-- strart --> +.. |the repository service| replace:: GitHub +.. |contribute button| replace:: "Create pull request" + +.. _repository: https://github.com/OSeMOSYS/otoole +.. _issue tracker: https://github.com/OSeMOSYS/otoole/issues +.. <-- end --> + + +.. |virtualenv| replace:: ``virtualenv`` +.. |pre-commit| replace:: ``pre-commit`` +.. |tox| replace:: ``tox`` + + +.. _coveralls: https://coveralls.io/github/OSeMOSYS/otoole +.. _black: https://pypi.org/project/black/ +.. _CommonMark: https://commonmark.org/ +.. _contribution-guide.org: https://www.contribution-guide.org/ +.. _creating a PR: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request +.. _descriptive commit message: https://chris.beams.io/posts/git-commit +.. _docstrings: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html +.. _first-contributions tutorial: https://github.com/firstcontributions/first-contributions +.. _flake8: https://flake8.pycqa.org/en/stable/ +.. _git: https://git-scm.com +.. _GitHub's fork and pull request workflow: https://guides.github.com/activities/forking/ +.. _guide created by FreeCodeCamp: https://github.com/FreeCodeCamp/how-to-contribute-to-open-source +.. _Miniconda: https://docs.conda.io/en/latest/miniconda.html +.. _MyST: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html +.. _other kinds of contributions: https://opensource.guide/how-to-contribute +.. _pre-commit: https://pre-commit.com/ +.. _PyPI: https://pypi.org/ +.. _PyScaffold's contributor's guide: https://pyscaffold.org/en/stable/contributing.html +.. _Pytest can drop you: https://docs.pytest.org/en/stable/how-to/failures.html#using-python-library-pdb-with-pytest +.. _Python Software Foundation's Code of Conduct: https://www.python.org/psf/conduct/ +.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/ +.. _Sphinx: https://www.sphinx-doc.org/en/master/ +.. _tox: https://tox.wiki/en/stable/ +.. _virtual environment: https://realpython.com/python-virtual-environments-a-primer/ +.. _virtualenv: https://virtualenv.pypa.io/en/stable/ + +.. _GitHub web interface: https://docs.github.com/en/repositories/working-with-files/managing-files/editing-files +.. _GitHub's code editor: https://docs.github.com/en/repositories/working-with-files/managing-files/editing-files diff --git a/README.rst b/README.rst index df34843..a34821a 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ otoole: OSeMOSYS tools for energy work :target: https://github.com/psf/black :alt: Code Style -.. image:: https://img.shields.io/badge/python-3.9_|_3.10_|_3.11-blue.svg +.. image:: https://img.shields.io/badge/python-3.9_|_3.10_|_3.11|_3.12-blue.svg :target: https://crate.io/packages/otoole/ :alt: Python Version diff --git a/docs/conf.py b/docs/conf.py index 2a5fbc5..c12997d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -88,7 +88,7 @@ # General information about the project. project = "otoole" -copyright = "2022, Will Usher" +copyright = "2024, Will Usher" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/docs/contributing.rst b/docs/contributing.rst index 96bdfb1..e582053 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -1,315 +1 @@ -============ -Contributing -============ - -Welcome to ``otoole`` contributor's guide! - -This document focuses on getting any potential contributor familiarized -with the development processes, but `other kinds of contributions`_ are also -appreciated. - -If you are new to using git_ or have never collaborated in a project previously, -please have a look at `contribution-guide.org`_. Other resources are also -listed in the excellent `guide created by FreeCodeCamp`_. - -Please notice, all users and contributors are expected to be **open, -considerate, reasonable, and respectful**. When in doubt, `Python Software -Foundation's Code of Conduct`_ is a good reference in terms of behavior -guidelines. - - -Issue Reports -============= - -If you experience bugs or general issues with ``otoole``, please have a look -on the `issue tracker`_. If you don't see anything useful there, please feel -free to fire an issue report. - -.. tip:: - Please don't forget to include the closed issues in your search. - Sometimes a solution was already reported, and the problem is considered - **solved**. - -New issue reports should include information about your programming environment -(e.g., operating system, Python version) and steps to reproduce the problem. -Please try also to simplify the reproduction steps to a very minimal example -that still illustrates the problem you are facing. By removing other factors, -you help us to identify the root cause of the issue. - - -Documentation Improvements -========================== - -You can help improve ``otoole`` docs by making them more readable and coherent, or -by adding missing information and correcting mistakes. - -``otoole`` documentation uses Sphinx_ as its main documentation compiler. -This means that the docs are kept in the same repository as the project code, and -that any documentation update is done in the same way was a code contribution. - -Our documentation is written in reStructuredText_. - -.. tip:: - Please notice that the `GitHub web interface`_ provides a quick way of - propose changes in ``otoole``'s files. While this mechanism can - be tricky for normal code contributions, it works perfectly fine for - contributing to the docs, and can be quite handy. - - If you are interested in trying this method out, please navigate to - the ``docs`` folder in the source repository_, find which file you - would like to propose changes and click in the little pencil icon at the - top, to open `GitHub's code editor`_. Once you finish editing the file, - please write a message in the form at the bottom of the page describing - which changes have you made and what are the motivations behind them and - submit your proposal. - -When working on documentation changes in your local machine, you can -compile them using |tox|_:: - - tox -e docs - -and use Python's built-in web server for a preview in your web browser -(``http://localhost:8000``):: - - python3 -m http.server --directory 'docs/_build/html' - - -Code Contributions -================== - -``otoole`` is built around a command line tool which is written -using the Python argparse library. The ``otoole.cli`` module is a useful -place to start when trying to understand how each command works. - -The ``otoole convert`` and ``otoole results`` commands both -use classes which inherit the ``otoole.Strategy`` class. -An ``otoole.ReadStrategy`` implements functionality to read in data, while an -``otoole.WriteStrategy`` writes out the target file format. The internal datastore -format in ``otool`` is a dictionary of ``pandas.DataFrames``. - -Comprehensive unit tests in the ``tests`` folder provide another way to -understand what each of the components does. - -Submit an issue ---------------- - -Before you work on any non-trivial code contribution it's best to first create -a report in the `issue tracker`_ to start a discussion on the subject. -This often provides additional considerations and avoids unnecessary work. - -Create an environment ---------------------- - -Before you start coding, we recommend creating an isolated `virtual -environment`_ to avoid any problems with your installed Python packages. -This can easily be done via either |virtualenv|_:: - - virtualenv - source /bin/activate - -or Miniconda_:: - - conda create -n otoole python=3 six virtualenv pytest pytest-cov - conda activate otoole - -Clone the repository --------------------- - -#. Create an user account on |the repository service| if you do not already have one. -#. Fork the project repository_: click on the *Fork* button near the top of the - page. This creates a copy of the code under your account on |the repository service|. -#. Clone this copy to your local disk:: - - git clone git@github.com:YourLogin/otoole.git - cd otoole - -#. You should run:: - - pip install -U pip setuptools -e . - - to be able to import the package under development in the Python REPL. - -#. Install |pre-commit|_:: - - pip install pre-commit - pre-commit install - - ``otoole`` comes with a lot of hooks configured to automatically help the - developer to check the code being written. - -Implement your changes ----------------------- - -#. Create a branch to hold your changes:: - - git checkout -b my-feature - - and start making changes. Never work on the main branch! - -#. Start your work on this branch. Don't forget to add docstrings_ to new - functions, modules and classes, especially if they are part of public APIs. - -#. Add yourself to the list of contributors in ``AUTHORS.rst``. - -#. When you’re done editing, do:: - - git add - git commit - - to record your changes in git_. - - Please make sure to see the validation messages from |pre-commit|_ and fix - any eventual issues. - This should automatically use flake8_/black_ to check/fix the code style - in a way that is compatible with the project. - - .. important:: Don't forget to add unit tests and documentation in case your - contribution adds an additional feature and is not just a bugfix. - - Moreover, writing a `descriptive commit message`_ is highly recommended. - In case of doubt, you can check the commit history with:: - - git log --graph --decorate --pretty=oneline --abbrev-commit --all - - to look for recurring communication patterns. - -#. Please check that your changes don't break any unit tests with:: - - tox - - (after having installed |tox|_ with ``pip install tox`` or ``pipx``). - - You can also use |tox|_ to run several other pre-configured tasks in the - repository. Try ``tox -av`` to see a list of the available checks. - -Submit your contribution ------------------------- - -#. If everything works fine, push your local branch to |the repository service| with:: - - git push -u origin my-feature - -#. Go to the web page of your fork and click |contribute button| - to send your changes for review. - -Find more detailed information in `creating a PR`_. You might also want to open -the PR as a draft first and mark it as ready for review after the feedbacks -from the continuous integration (CI) system or any required fixes. - -We track test coverage using coveralls_. You can check the coverage -of your PR by clicking on the "details" link in the "Coverage" section of -the pull request checks. Try to ensure that your pull requests always increase -test coverage. - -Troubleshooting ---------------- - -The following tips can be used when facing problems to build or test the -package: - -#. Make sure to fetch all the tags from the upstream repository_. - The command ``git describe --abbrev=0 --tags`` should return the version you - are expecting. If you are trying to run CI scripts in a fork repository, - make sure to push all the tags. - You can also try to remove all the egg files or the complete egg folder, i.e., - ``.eggs``, as well as the ``*.egg-info`` folders in the ``src`` folder or - potentially in the root of your project. - -#. Sometimes |tox|_ misses out when new dependencies are added, especially to - ``setup.cfg`` and ``docs/requirements.txt``. If you find any problems with - missing dependencies when running a command with |tox|_, try to recreate the - ``tox`` environment using the ``-r`` flag. For example, instead of:: - - tox -e docs - - Try running:: - - tox -r -e docs - -#. Make sure to have a reliable |tox|_ installation that uses the correct - Python version (e.g., 3.8+). When in doubt you can run:: - - tox --version - # OR - which tox - - If you have trouble and are seeing weird errors upon running |tox|_, you can - also try to create a dedicated `virtual environment`_ with a |tox|_ binary - freshly installed. For example:: - - virtualenv .venv - source .venv/bin/activate - .venv/bin/pip install tox - .venv/bin/tox -e all - -#. `Pytest can drop you`_ in an interactive session in the case an error occurs. - In order to do that you need to pass a ``--pdb`` option (for example by - running ``tox -- -k --pdb``). - You can also setup breakpoints manually instead of using the ``--pdb`` option. - - -Maintainer tasks -================ - -Releases --------- - -If you are part of the group of maintainers and have correct user permissions -on PyPI_, the following steps can be used to release a new version for -``otoole``: - -#. Make sure all unit tests are successful. -#. Tag the current commit on the main branch with a release tag, e.g., ``v1.2.3``. -#. Push the new tag to the upstream repository_, e.g., ``git push upstream v1.2.3`` -#. Clean up the ``dist`` and ``build`` folders with ``tox -e clean`` - (or ``rm -rf dist build``) - to avoid confusion with old builds and Sphinx docs. -#. Run ``tox -e build`` and check that the files in ``dist`` have - the correct version (no ``.dirty`` or git_ hash) according to the git_ tag. - Also check the sizes of the distributions, if they are too big (e.g., > - 500KB), unwanted clutter may have been accidentally included. -#. Run ``tox -e publish -- --repository pypi`` and check that everything was - uploaded to PyPI_ correctly. - -.. <-- strart --> -.. |the repository service| replace:: GitHub -.. |contribute button| replace:: "Create pull request" - -.. _repository: https://github.com/OSeMOSYS/otoole -.. _issue tracker: https://github.com/OSeMOSYS/otoole/issues -.. <-- end --> - - -.. |virtualenv| replace:: ``virtualenv`` -.. |pre-commit| replace:: ``pre-commit`` -.. |tox| replace:: ``tox`` - - -.. _coveralls: https://coveralls.io/github/OSeMOSYS/otoole -.. _black: https://pypi.org/project/black/ -.. _CommonMark: https://commonmark.org/ -.. _contribution-guide.org: https://www.contribution-guide.org/ -.. _creating a PR: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request -.. _descriptive commit message: https://chris.beams.io/posts/git-commit -.. _docstrings: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html -.. _first-contributions tutorial: https://github.com/firstcontributions/first-contributions -.. _flake8: https://flake8.pycqa.org/en/stable/ -.. _git: https://git-scm.com -.. _GitHub's fork and pull request workflow: https://guides.github.com/activities/forking/ -.. _guide created by FreeCodeCamp: https://github.com/FreeCodeCamp/how-to-contribute-to-open-source -.. _Miniconda: https://docs.conda.io/en/latest/miniconda.html -.. _MyST: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html -.. _other kinds of contributions: https://opensource.guide/how-to-contribute -.. _pre-commit: https://pre-commit.com/ -.. _PyPI: https://pypi.org/ -.. _PyScaffold's contributor's guide: https://pyscaffold.org/en/stable/contributing.html -.. _Pytest can drop you: https://docs.pytest.org/en/stable/how-to/failures.html#using-python-library-pdb-with-pytest -.. _Python Software Foundation's Code of Conduct: https://www.python.org/psf/conduct/ -.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/ -.. _Sphinx: https://www.sphinx-doc.org/en/master/ -.. _tox: https://tox.wiki/en/stable/ -.. _virtual environment: https://realpython.com/python-virtual-environments-a-primer/ -.. _virtualenv: https://virtualenv.pypa.io/en/stable/ - -.. _GitHub web interface: https://docs.github.com/en/repositories/working-with-files/managing-files/editing-files -.. _GitHub's code editor: https://docs.github.com/en/repositories/working-with-files/managing-files/editing-files +.. include:: ../CONTRIBUTING.rst diff --git a/docs/examples.rst b/docs/examples.rst index 47349f6..8d88d02 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -302,6 +302,8 @@ save the solution as ``simplicity.sol``. Use otoole to create a folder of CSV re $ otoole results cplex csv simplicity.sol results csv data config.yaml +.. _model-visualization: + Model Visualization ------------------- @@ -322,18 +324,21 @@ Run the following command, where the RES will be saved as the file ``res.png``:: .. WARNING:: If you encounter a ``graphviz`` dependency error, install it on your system - from the graphviz_ website (if on Windows) or via the command:: + following instructions on the Graphviz_ website. If on Windows, + download the install package `from Graphviz `_. + If on Mac or Linux, or running conda, use one of the following commands:: - sudo apt install graphviz # if on Ubuntu - brew install graphviz # if on Mac + brew install graphviz # if on Mac + sudo apt install graphviz # if on Ubuntu + conda install graphviz # if using conda To check that ``graphviz`` installed correctly, run ``dot -V`` to check the version:: - ~$ dot -V + $ dot -V dot - graphviz version 2.43.0 (0) -1. View the RES +2. View the RES ~~~~~~~~~~~~~~~ Open the newly created file, ``res.png`` and the following image should be displayed @@ -608,6 +613,7 @@ will also flag it as an isolated fuel. This means the fuel is unconnected from t .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Anaconda: https://www.anaconda.com/ .. _Gurobi: https://www.gurobi.com/ -.. _graphviz: https://www.graphviz.org/download/ +.. _Graphviz: https://www.graphviz.org/download/ .. _HiGHS: https://ergo-code.github.io/HiGHS/dev/ .. _HiGHS Python API: https://ergo-code.github.io/HiGHS/dev/interfaces/python/ + diff --git a/docs/functionality.rst b/docs/functionality.rst index 4d184ab..d3d08e9 100644 --- a/docs/functionality.rst +++ b/docs/functionality.rst @@ -180,9 +180,14 @@ visualising the reference energy system through the ``vis res`` command is suppo .. NOTE:: The ``resfile`` command should include a file ending used for images, - including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The graphviz_ library + including ``bmp``, ``jpg``, ``pdf``, ``png`` etc. The Graphviz_ library used to layout the reference energy system will interpret the file ending. +.. WARNING:: + If you encounter a Graphviz_ dependencey error, please follow Graphviz_ + installation instructions described in the + :ref:`visualization examples `. + Validation ---------- The validation module in ``otoole`` checks technology and fuel names against a @@ -216,5 +221,5 @@ the rest of the model:: .. _CPLEX: https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer .. _Gurobi: https://www.gurobi.com/ .. _`OSeMOSYS Repository`: https://github.com/OSeMOSYS/OSeMOSYS_GNU_MathProg/tree/master/scripts -.. _graphviz: https://graphviz.org/ +.. _Graphviz: https://graphviz.org/ .. _HiGHS: https://ergo-code.github.io/HiGHS/dev/ diff --git a/setup.cfg b/setup.cfg index 8276af0..307eac8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,6 +44,7 @@ python_requires = >=3.9 # If this list changes, update docs/requirements.txt as well. install_requires = + importlib-metadata; python_version<"3.8" xlrd pyyaml pydot @@ -128,7 +129,7 @@ follow_imports = silent [pyscaffold] # PyScaffold's parameters when the project was created. # This will be used when updating. Do not change! -version = 4.2.3 +version = 4.5 package = otoole extensions = pre_commit diff --git a/setup.py b/setup.py index 0c14f11..cf29d3e 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ Setup file for otoole. Use setup.cfg to configure your project. - This file was generated with PyScaffold 4.2.3. + This file was generated with PyScaffold 4.5. PyScaffold helps you to put up the scaffold of your new Python project. Learn more under: https://pyscaffold.org/ """ diff --git a/src/otoole/cli.py b/src/otoole/cli.py index 8466ac5..13ae06f 100644 --- a/src/otoole/cli.py +++ b/src/otoole/cli.py @@ -38,6 +38,7 @@ --version, -V The version of otoole """ + import argparse import logging import os @@ -125,7 +126,6 @@ def setup(args): data_type = args.data_type data_path = args.data_path - write_defaults = args.write_defaults overwrite = args.overwrite if os.path.exists(data_path) and not overwrite: @@ -139,9 +139,7 @@ def setup(args): elif data_type == "csv": config = get_config_setup_data() input_data, default_values = get_csv_setup_data(config) - WriteCsv(user_config=config).write( - input_data, data_path, default_values, write_defaults=write_defaults - ) + WriteCsv(user_config=config).write(input_data, data_path, default_values) def get_parser(): @@ -172,7 +170,7 @@ def get_parser(): result_parser.add_argument( "to_format", help="Result data format to convert to", - choices=sorted(["csv"]), + choices=sorted(["csv", "excel"]), ) result_parser.add_argument("from_path", help="Path to solver solution file") result_parser.add_argument("to_path", help="Path to file or folder to convert to") @@ -269,12 +267,6 @@ def get_parser(): "data_type", help="Type of file to setup", choices=sorted(["config", "csv"]) ) setup_parser.add_argument("data_path", help="Path to file or folder to save to") - setup_parser.add_argument( - "--write_defaults", - help="Writes default values", - default=False, - action="store_true", - ) setup_parser.add_argument( "--overwrite", help="Overwrites existing data", diff --git a/src/otoole/convert.py b/src/otoole/convert.py index 91248b5..6b22a38 100644 --- a/src/otoole/convert.py +++ b/src/otoole/convert.py @@ -37,6 +37,7 @@ def read_results( from_path: str, input_format: str, input_path: str, + write_defaults: bool = False, glpk_model: Optional[str] = None, ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: """Read OSeMOSYS results from CBC, GLPK, Gurobi, HiGHS, or CPLEX results files @@ -53,6 +54,8 @@ def read_results( Format of input data. Available options are 'datafile', 'csv' and 'excel' input_path: str Path to input data + write_defaults: bool, default: False + Expand default values to pad dataframes glpk_model : str Path to ``*.glp`` model file @@ -63,7 +66,9 @@ def read_results( """ user_config = _get_user_config(config) input_strategy = _get_read_strategy(user_config, input_format) - result_strategy = _get_read_result_strategy(user_config, from_format, glpk_model) + result_strategy = _get_read_result_strategy( + user_config, from_format, glpk_model, write_defaults + ) if input_strategy: input_data, _ = input_strategy.read(input_path) @@ -98,7 +103,7 @@ def convert_results( from_format : str Available options are 'cbc', 'cplex', 'highs'. 'glpk', and 'gurobi' to_format : str - Available options are 'csv' + Available options are 'csv', 'excel' from_path : str Path to solution file to_path : str @@ -107,8 +112,8 @@ def convert_results( Format of input data. Available options are 'datafile', 'csv' and 'excel' input_path: str Path to input data - write_defaults : bool - Write default values to CSVs + write_defaults: bool, default: False + Expand default values to pad dataframes glpk_model : str Path to ``*.glp`` model file @@ -126,16 +131,16 @@ def convert_results( # set read strategy - read_strategy = _get_read_result_strategy(user_config, from_format, glpk_model) + read_strategy = _get_read_result_strategy( + user_config, from_format, glpk_model, write_defaults + ) # set write strategy - write_defaults = True if write_defaults else False - if to_format == "csv": - write_strategy = WriteCsv( - user_config=user_config, write_defaults=write_defaults - ) + write_strategy: WriteStrategy = WriteCsv(user_config=user_config) + elif to_format == "excel": + write_strategy = WriteExcel(user_config=user_config) else: raise NotImplementedError(msg) @@ -152,7 +157,7 @@ def convert_results( def _get_read_result_strategy( - user_config, from_format, glpk_model=None + user_config, from_format, glpk_model=None, write_defaults=False ) -> Union[ReadResults, None]: """Get ``ReadResults`` for gurobi, cbc, cplex, highs and glpk formats @@ -162,6 +167,8 @@ def _get_read_result_strategy( User configuration describing parameters and sets from_format : str Available options are 'cbc', 'gurobi', 'cplex', 'highs', and 'glpk' + write_defaults: bool, default: False + Write default values to output format glpk_model : str Path to ``*.glp`` model file @@ -173,17 +180,29 @@ def _get_read_result_strategy( """ if from_format == "cbc": - read_strategy: ReadResults = ReadCbc(user_config) + read_strategy: ReadResults = ReadCbc( + user_config=user_config, write_defaults=write_defaults + ) elif from_format == "gurobi": - read_strategy = ReadGurobi(user_config=user_config) + read_strategy = ReadGurobi( + user_config=user_config, write_defaults=write_defaults + ) elif from_format == "cplex": - read_strategy = ReadCplex(user_config=user_config) + read_strategy = ReadCplex( + user_config=user_config, write_defaults=write_defaults + ) elif from_format == "highs": - read_strategy = ReadHighs(user_config=user_config) + read_strategy = ReadHighs( + user_config=user_config, write_defaults=write_defaults + ) elif from_format == "glpk": if not glpk_model: raise OtooleError(resource="Read GLPK", message="Provide glpk model file") - read_strategy = ReadGlpk(user_config=user_config, glpk_model=glpk_model) + read_strategy = ReadGlpk( + user_config=user_config, + glpk_model=glpk_model, + write_defaults=write_defaults, + ) else: return None @@ -213,7 +232,9 @@ def _get_user_config(config) -> dict: return user_config -def _get_read_strategy(user_config, from_format, keep_whitespace=False) -> ReadStrategy: +def _get_read_strategy( + user_config, from_format, keep_whitespace=False, write_defaults=False +) -> ReadStrategy: """Get ``ReadStrategy`` for csv/datafile/excel format Arguments @@ -224,6 +245,8 @@ def _get_read_strategy(user_config, from_format, keep_whitespace=False) -> ReadS Available options are 'datafile', 'datapackage', 'csv' and 'excel' keep_whitespace: bool, default: False Keep whitespace in CSVs + write_defaults: bool, default: False + Expand default values to pad dataframes Returns ------- @@ -234,22 +257,30 @@ def _get_read_strategy(user_config, from_format, keep_whitespace=False) -> ReadS keep_whitespace = True if keep_whitespace else False if from_format == "datafile": - read_strategy: ReadStrategy = ReadDatafile(user_config=user_config) + read_strategy: ReadStrategy = ReadDatafile( + user_config=user_config, write_defaults=write_defaults + ) elif from_format == "datapackage": logger.warning( "Reading from datapackage is deprecated, trying to read from CSVs" ) logger.info("Successfully read folder of CSVs") read_strategy = ReadCsv( - user_config=user_config, keep_whitespace=keep_whitespace + user_config=user_config, + keep_whitespace=keep_whitespace, + write_defaults=write_defaults, ) # typing: ReadStrategy elif from_format == "csv": read_strategy = ReadCsv( - user_config=user_config, keep_whitespace=keep_whitespace + user_config=user_config, + keep_whitespace=keep_whitespace, + write_defaults=write_defaults, ) # typing: ReadStrategy elif from_format == "excel": read_strategy = ReadExcel( - user_config=user_config, keep_whitespace=keep_whitespace + user_config=user_config, + keep_whitespace=keep_whitespace, + write_defaults=write_defaults, ) # typing: ReadStrategy else: msg = f"Conversion from {from_format} is not supported" @@ -258,7 +289,7 @@ def _get_read_strategy(user_config, from_format, keep_whitespace=False) -> ReadS return read_strategy -def _get_write_strategy(user_config, to_format, write_defaults=False) -> WriteStrategy: +def _get_write_strategy(user_config, to_format) -> WriteStrategy: """Get ``WriteStrategy`` for csv/datafile/excel format Arguments @@ -267,8 +298,6 @@ def _get_write_strategy(user_config, to_format, write_defaults=False) -> WriteSt User configuration describing parameters and sets to_format : str Available options are 'datafile', 'datapackage', 'csv' and 'excel' - write_defaults: bool, default: False - Write default values to output format Returns ------- @@ -276,25 +305,15 @@ def _get_write_strategy(user_config, to_format, write_defaults=False) -> WriteSt A ReadStrategy object. Returns None if to_format is not recognised """ - # set write strategy - write_defaults = True if write_defaults else False if to_format == "datapackage": - write_strategy: WriteStrategy = WriteCsv( - user_config=user_config, write_defaults=write_defaults - ) + write_strategy: WriteStrategy = WriteCsv(user_config=user_config) elif to_format == "excel": - write_strategy = WriteExcel( - user_config=user_config, write_defaults=write_defaults - ) + write_strategy = WriteExcel(user_config=user_config) elif to_format == "datafile": - write_strategy = WriteDatafile( - user_config=user_config, write_defaults=write_defaults - ) + write_strategy = WriteDatafile(user_config=user_config) elif to_format == "csv": - write_strategy = WriteCsv( - user_config=user_config, write_defaults=write_defaults - ) + write_strategy = WriteCsv(user_config=user_config) else: msg = f"Conversion to {to_format} is not supported" raise NotImplementedError(msg) @@ -324,7 +343,7 @@ def convert( from_path : str Path to destination file (if datafile or excel) or folder (csv or datapackage) write_defaults: bool, default: False - Write default values to CSVs + Expand default values to pad dataframes keep_whitespace: bool, default: False Keep whitespace in CSVs @@ -336,12 +355,13 @@ def convert( user_config = _get_user_config(config) read_strategy = _get_read_strategy( - user_config, from_format, keep_whitespace=keep_whitespace + user_config, + from_format, + keep_whitespace=keep_whitespace, + write_defaults=write_defaults, ) - write_strategy = _get_write_strategy( - user_config, to_format, write_defaults=write_defaults - ) + write_strategy = _get_write_strategy(user_config, to_format) if from_format == "datapackage": logger.warning( @@ -357,7 +377,11 @@ def convert( def read( - config: str, from_format: str, from_path: str, keep_whitespace: bool = False + config: str, + from_format: str, + from_path: str, + keep_whitespace: bool = False, + write_defaults: bool = False, ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float]]: """Read OSeMOSYS data from datafile, csv or Excel formats @@ -371,6 +395,8 @@ def read( Path to source file (if datafile or excel) or folder (csv) keep_whitespace: bool, default: False Keep whitespace in source files + write_defaults: bool, default: False + Expand default values to pad dataframes Returns ------- @@ -379,7 +405,10 @@ def read( """ user_config = _get_user_config(config) read_strategy = _get_read_strategy( - user_config, from_format, keep_whitespace=keep_whitespace + user_config, + from_format, + keep_whitespace=keep_whitespace, + write_defaults=write_defaults, ) if from_format == "datapackage": @@ -412,15 +441,10 @@ def write( """ user_config = _get_user_config(config) + write_strategy = _get_write_strategy(user_config, to_format) if default_values is None: - write_strategy = _get_write_strategy( - user_config, to_format, write_defaults=False - ) write_strategy.write(inputs, to_path, {}) else: - write_strategy = _get_write_strategy( - user_config, to_format, write_defaults=True - ) write_strategy.write(inputs, to_path, default_values) return True diff --git a/src/otoole/input.py b/src/otoole/input.py index 210647a..d5a22e6 100644 --- a/src/otoole/input.py +++ b/src/otoole/input.py @@ -28,6 +28,7 @@ >>> converter.convert('my_datafile.txt', 'folder_of_csv_files') """ + from __future__ import annotations import logging @@ -110,6 +111,7 @@ def convert(self, input_filepath: str, output_filepath: str, **kwargs: Dict): input_filepath: str output_filepath: str """ + inputs, default_values = self._read(input_filepath, **kwargs) self._write(inputs, output_filepath, default_values, **kwargs) @@ -186,7 +188,6 @@ class WriteStrategy(Strategy): user_config: dict, default=None filepath: str, default=None default_values: dict, default=None - write_defaults: bool, default=False input_data: dict, default=None """ @@ -196,7 +197,6 @@ def __init__( user_config: Dict, filepath: Optional[str] = None, default_values: Optional[Dict] = None, - write_defaults: bool = False, input_data: Optional[Dict[str, pd.DataFrame]] = None, ): super().__init__(user_config=user_config) @@ -215,8 +215,6 @@ def __init__( else: self.input_data = {} - self.write_defaults = write_defaults - @abstractmethod def _header(self) -> Union[TextIO, Any]: raise NotImplementedError() @@ -256,14 +254,10 @@ def write( handle = self._header() logger.debug(default_values) - self.input_data = inputs - if self.write_defaults: - try: - self.input_data = self._expand_defaults(inputs, default_values) - except KeyError as ex: - logger.debug(ex) + self.inputs = inputs # parameter/set data OR result data + self.input_params = kwargs.get("input_data", None) # parameter/set data - for name, df in sorted(self.input_data.items()): + for name, df in sorted(self.inputs.items()): logger.debug("%s has %s columns: %s", name, len(df.index.names), df.columns) try: @@ -275,9 +269,12 @@ def write( raise KeyError("Cannot find %s in input or results config", name) if entity_type != "set": - default_value = default_values[name] self._write_parameter( - df, name, handle, default=default_value, input_data=self.input_data + df, + name, + handle, + default=default_values[name], + input_data=self.inputs, ) else: self._write_set(df, name, handle) @@ -287,70 +284,6 @@ def write( if isinstance(handle, TextIO): handle.close() - def _expand_defaults( - self, data_to_expand: Dict[str, pd.DataFrame], default_values: Dict[str, float] - ) -> Dict[str, pd.DataFrame]: - """Populates default value entry rows in dataframes - - Parameters - ---------- - data_to_expand : Dict[str, pd.DataFrame], - default_values : Dict[str, float] - - Returns - ------- - Dict[str, pd.DataFrame] - Input data with expanded default values replacing missing entries - - """ - - sets = [x for x in self.user_config if self.user_config[x]["type"] == "set"] - output = {} - for name, data in data_to_expand.items(): - logger.info(f"Writing defaults for {name}") - - # skip sets - if name in sets: - output[name] = data - continue - - # TODO - # Issue with how otoole handles trade route right now. - # The double definition of REGION throws an error. - if name == "TradeRoute": - output[name] = data - continue - - # save set information for each parameter - index_data = {} - for index in data.index.names: - index_data[index] = self.input_data[index]["VALUE"].to_list() - - # set index - if len(index_data) > 1: - new_index = pd.MultiIndex.from_product( - list(index_data.values()), names=list(index_data.keys()) - ) - else: - new_index = pd.Index( - list(index_data.values())[0], name=list(index_data.keys())[0] - ) - df_default = pd.DataFrame(index=new_index) - - # save default result value - df_default["VALUE"] = default_values[name] - - # combine result and default value dataframe - if not data.empty: - df = pd.concat([data, df_default]) - df = df[~df.index.duplicated(keep="first")] - else: - df = df_default - df = df.sort_index() - output[name] = df - - return output - class ReadStrategy(Strategy): """ @@ -360,6 +293,15 @@ class ReadStrategy(Strategy): Strategies. """ + def __init__( + self, + user_config: Dict, + write_defaults: bool = False, + ): + super().__init__(user_config=user_config) + + self.write_defaults = write_defaults + def _check_index( self, input_data: Dict[str, pd.DataFrame] ) -> Dict[str, pd.DataFrame]: @@ -588,6 +530,113 @@ def _compare_read_to_expected( logger.debug(f"data and config name errors are: {errors}") raise OtooleNameMismatchError(name=errors) + def _expand_dataframe( + self, + name: str, + input_data: Dict[str, pd.DataFrame], + default_values: Dict[str, pd.DataFrame], + ) -> pd.DataFrame: + """Populates default value entry rows in dataframes + + Parameters + ---------- + name: str + Name of parameter/result to expand + input_data: Dict[str, pd.DataFrame], + internal datastore + default_values: Dict[str, pd.DataFrame], + + Returns + ------- + pd.DataFrame, + Input data with expanded default values replacing missing entries + """ + + df = input_data[name] + + # TODO: Issue with how otoole handles trade route right now. + # The double definition of REGION throws an error. + if name == "TradeRoute": + return df + + default_df = self._get_default_dataframe(name, input_data, default_values) + + # future warning of concating empty dataframe + if not df.empty: + df = pd.concat([df, default_df]) + else: + df = default_df.copy() + + df = df[~df.index.duplicated(keep="first")] + + df = self._check_index_dtypes(name, self.user_config[name], df) + + return df.sort_index() + + def _get_default_dataframe( + self, + name: str, + input_data: Dict[str, pd.DataFrame], + default_values: Dict[str, pd.DataFrame], + ) -> pd.DataFrame: + """Creates default dataframe""" + + index_data = {} + indices = self.user_config[name]["indices"] + for index in indices: + index_data[index] = input_data[index]["VALUE"].to_list() + + if len(index_data) > 1: + new_index = pd.MultiIndex.from_product( + list(index_data.values()), names=list(index_data.keys()) + ) + else: + new_index = pd.Index( + list(index_data.values())[0], name=list(index_data.keys())[0] + ) + + df = pd.DataFrame(index=new_index).sort_index() + df["VALUE"] = default_values[name] + + return df + + def write_default_params( + self, + input_data: Dict[str, pd.DataFrame], + default_values: Dict[str, Union[str, int, float]], + ) -> Dict[str, pd.DataFrame]: + """Returns paramter dataframes with default values expanded""" + names = [x for x in self.user_config if self.user_config[x]["type"] == "param"] + for name in names: + try: + logger.debug(f"Serching for {name} data to expand") + input_data[name] = self._expand_dataframe( + name, input_data, default_values + ) + except KeyError: + logger.warning(f"Can not expand {name} data") + return input_data + + def write_default_results( + self, + result_data: Dict[str, pd.DataFrame], + input_data: Dict[str, pd.DataFrame], + default_values: Dict[str, Union[str, int, float]], + ) -> Dict[str, pd.DataFrame]: + """Returns result dataframes with default values expanded""" + + all_data = {**result_data, **input_data} + names = [x for x in self.user_config if self.user_config[x]["type"] == "result"] + for name in names: + try: + logger.debug(f"Serching for {name} data to expand") + result_data[name] = self._expand_dataframe( + name, all_data, default_values + ) + except KeyError: + logger.debug(f"Can not expand {name} data") + return result_data + @abstractmethod def read( self, filepath: Union[str, TextIO], **kwargs diff --git a/src/otoole/read_strategies.py b/src/otoole/read_strategies.py index da362ce..3b6273e 100644 --- a/src/otoole/read_strategies.py +++ b/src/otoole/read_strategies.py @@ -43,8 +43,13 @@ def read( class _ReadTabular(ReadStrategy): - def __init__(self, user_config: Dict[str, Dict], keep_whitespace: bool = False): - super().__init__(user_config) + def __init__( + self, + user_config: Dict[str, Dict], + write_defaults: bool = False, + keep_whitespace: bool = False, + ): + super().__init__(user_config=user_config, write_defaults=write_defaults) self.keep_whitespace = keep_whitespace def _check_set(self, df: pd.DataFrame, config_details: Dict, name: str): @@ -174,6 +179,9 @@ def read( input_data, config_type=config_type ) + if self.write_defaults: + input_data = self.write_default_params(input_data, default_values) + input_data = self._check_index(input_data) return input_data, default_values @@ -248,6 +256,9 @@ def read( input_data = self._check_index(input_data) + if self.write_defaults: + input_data = self.write_default_params(input_data, default_values) + return input_data, default_values @staticmethod @@ -328,13 +339,17 @@ def read( # Check filepath exists if os.path.exists(filepath): amply_datafile = self.read_in_datafile(filepath, config) - inputs = self._convert_amply_to_dataframe(amply_datafile, config) + input_data = self._convert_amply_to_dataframe(amply_datafile, config) for config_type in ["param", "set"]: - inputs = self._get_missing_input_dataframes( - inputs, config_type=config_type + input_data = self._get_missing_input_dataframes( + input_data, config_type=config_type ) - inputs = self._check_index(inputs) - return inputs, default_values + input_data = self._check_index(input_data) + + if self.write_defaults: + input_data = self.write_default_params(input_data, default_values) + + return input_data, default_values else: raise FileNotFoundError(f"File not found: {filepath}") diff --git a/src/otoole/results/result_package.py b/src/otoole/results/result_package.py index a63de92..c961d5e 100644 --- a/src/otoole/results/result_package.py +++ b/src/otoole/results/result_package.py @@ -775,6 +775,9 @@ def calc_crf(df: pd.DataFrame, operational_life: pd.Series) -> pd.Series: return numerator / denominator + if discount_rate_idv.empty or operational_life.empty: + raise ValueError("Cannot calculate PV Annuity due to missing data") + if not regions and not technologies: return pd.DataFrame( data=[], @@ -823,6 +826,10 @@ def pv_annuity( param PvAnnuity{r in REGION, t in TECHNOLOGY} := (1 - (1 + DiscountRate[r])^(-(OperationalLife[r,t]))) * (1 + DiscountRate[r]) / DiscountRate[r]; """ + + if discount_rate.empty or operational_life.empty: + raise ValueError("Cannot calculate PV Annuity due to missing data") + if regions and technologies: index = pd.MultiIndex.from_product( [regions, technologies], names=["REGION", "TECHNOLOGY"] @@ -873,6 +880,11 @@ def discount_factor( (1 + DiscountRate[r]) ^ (y - min{yy in YEAR} min(yy) + 0.5); """ + if discount_rate.empty: + raise ValueError( + "Cannot calculate discount factor due to missing discount rate" + ) + if regions and years: discount_rate["YEAR"] = [years] discount_factor = discount_rate.explode("YEAR").reset_index(level="REGION") @@ -917,6 +929,11 @@ def discount_factor_storage( (1 + DiscountRateStorage[r,s]) ^ (y - min{yy in YEAR} min(yy) + 0.0); """ + if discount_rate_storage.empty: + raise ValueError( + "Cannot calculate discount_factor_storage due to missing discount rate" + ) + if regions and years: index = pd.MultiIndex.from_product( [regions, storages, years], names=["REGION", "STORAGE", "YEAR"] diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index e3297cd..bcdf377 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -1,7 +1,7 @@ import logging from abc import abstractmethod from io import StringIO -from typing import Any, Dict, List, Set, TextIO, Tuple, Union +from typing import Any, Dict, TextIO, Tuple, Union import pandas as pd @@ -32,8 +32,10 @@ def read( """ if "input_data" in kwargs: input_data = kwargs["input_data"] + param_default_values = self._read_default_values(self.input_config) else: - input_data = None + input_data = {} + param_default_values = {} available_results = self.get_results_from_file( filepath, input_data @@ -41,10 +43,15 @@ def read( default_values = self._read_default_values(self.results_config) # type: Dict + input_data = self._expand_required_params(input_data, param_default_values) + results = self.calculate_results( available_results, input_data ) # type: Dict[str, pd.DataFrame] + if self.write_defaults: + results = self.write_default_results(results, input_data, default_values) + return results, default_values @abstractmethod @@ -73,6 +80,24 @@ def calculate_results( return results + def _expand_required_params( + self, + input_data: dict[str, pd.DataFrame], + param_defaults: dict[str, Any], + ) -> dict[str, pd.DataFrame]: + """Expands required default values for results processing""" + + if "DiscountRate" in input_data: + input_data["DiscountRate"] = self._expand_dataframe( + "DiscountRate", input_data, param_defaults + ) + if "DiscountRateIdv" in input_data: + input_data["DiscountRateIdv"] = self._expand_dataframe( + "DiscountRateIdv", input_data, param_defaults + ) + + return input_data + class ReadWideResults(ReadResults): def get_results_from_file(self, filepath, input_data): @@ -145,7 +170,7 @@ def _convert_wide_to_long(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]: return results -def check_duplicate_index(df: pd.DataFrame, columns: List, index: List) -> pd.DataFrame: +def check_duplicate_index(df: pd.DataFrame, columns: list, index: list) -> pd.DataFrame: """Catches pandas error when there are duplicate column indices""" if check_for_duplicates(index): index = rename_duplicate_column(index) @@ -156,12 +181,12 @@ def check_duplicate_index(df: pd.DataFrame, columns: List, index: List) -> pd.Da return df, index -def check_for_duplicates(index: List) -> bool: +def check_for_duplicates(index: list) -> bool: return len(set(index)) != len(index) -def identify_duplicate(index: List) -> Union[int, bool]: - elements = set() # type: Set +def identify_duplicate(index: list) -> Union[int, bool]: + elements = set() # type: set for counter, elem in enumerate(index): if elem in elements: return counter @@ -170,7 +195,7 @@ def identify_duplicate(index: List) -> Union[int, bool]: return False -def rename_duplicate_column(index: List) -> List: +def rename_duplicate_column(index: list) -> list: column = index.copy() location = identify_duplicate(column) if location: @@ -298,8 +323,13 @@ class ReadGlpk(ReadWideResults): Path to GLPK model file. Can be created using the `--wglp` flag. """ - def __init__(self, user_config: Dict[str, Dict], glpk_model: Union[str, TextIO]): - super().__init__(user_config) + def __init__( + self, + user_config: Dict[str, Dict], + glpk_model: Union[str, TextIO], + write_defaults: bool = False, + ): + super().__init__(user_config=user_config, write_defaults=write_defaults) if isinstance(glpk_model, str): with open(glpk_model, "r") as model_file: diff --git a/src/otoole/write_strategies.py b/src/otoole/write_strategies.py index d4472f8..921497a 100644 --- a/src/otoole/write_strategies.py +++ b/src/otoole/write_strategies.py @@ -152,8 +152,7 @@ def _write_parameter( default : int """ - if not self.write_defaults: - df = self._form_parameter(df, default) + df = self._form_parameter(df, default) handle.write("param default {} : {} :=\n".format(default, parameter_name)) df.to_csv( path_or_buf=handle, diff --git a/tests/conftest.py b/tests/conftest.py index bd41e71..d22edb5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -83,6 +83,33 @@ def discount_rate_storage(): return df +@fixture +def discount_rate_empty(): + df = pd.DataFrame( + data=[], + columns=["REGION", "VALUE"], + ).set_index(["REGION"]) + return df + + +@fixture +def discount_rate_idv_empty(): + df = pd.DataFrame( + data=[], + columns=["REGION", "TECHNOLOGY", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY"]) + return df + + +@fixture +def discount_rate_storage_empty(): + df = pd.DataFrame( + data=[], + columns=["REGION", "STORAGE", "VALUE"], + ).set_index(["REGION", "STORAGE"]) + return df + + @fixture def emission_activity_ratio(): df = pd.DataFrame( diff --git a/tests/results/test_results_package.py b/tests/results/test_results_package.py index 33c784b..418599c 100644 --- a/tests/results/test_results_package.py +++ b/tests/results/test_results_package.py @@ -669,6 +669,17 @@ def test_crf_no_tech_discount_rate(self, region, discount_rate, operational_life assert_frame_equal(actual, expected) + def test_crf_empty_discount_rate( + self, region, discount_rate_empty, operational_life + ): + technologies = ["GAS_EXTRACTION", "DUMMY"] + regions = region["VALUE"].to_list() + + with raises(ValueError): + capital_recovery_factor( + regions, technologies, discount_rate_empty, operational_life + ) + class TestPvAnnuity: def test_pva(self, region, discount_rate, operational_life): @@ -687,7 +698,7 @@ def test_pva(self, region, discount_rate, operational_life): assert_frame_equal(actual, expected) - def test_pva_null(self, discount_rate): + def test_pva_null(self, discount_rate, operational_life): actual = pv_annuity([], [], discount_rate, operational_life) @@ -698,6 +709,15 @@ def test_pva_null(self, discount_rate): assert_frame_equal(actual, expected) + def test_pva_empty_discount_rate( + self, region, discount_rate_empty, operational_life + ): + technologies = ["GAS_EXTRACTION", "DUMMY"] + regions = region["VALUE"].to_list() + + with raises(ValueError): + pv_annuity(regions, technologies, discount_rate_empty, operational_life) + class TestDiscountFactor: def test_df_start(self, region, year, discount_rate): @@ -774,6 +794,13 @@ def test_df_null(self, discount_rate): assert_frame_equal(actual, expected) + def test_df_empty_discount_rate(self, region, year, discount_rate_empty): + regions = region["VALUE"].to_list() + years = year["VALUE"].to_list() + + with raises(ValueError): + discount_factor(regions, years, discount_rate_empty, 1.0) + class TestDiscountFactorStorage: def test_dfs_start(self, region, year, discount_rate_storage): @@ -859,6 +886,18 @@ def test_df_null(self, discount_rate_storage): assert_frame_equal(actual, expected) + def test_df_storage_empty_discount_rate( + self, region, year, discount_rate_storage_empty + ): + storages = ["DAM"] + regions = region["VALUE"].to_list() + years = year["VALUE"].to_list() + + with raises(ValueError): + discount_factor_storage( + regions, storages, years, discount_rate_storage_empty, 1.0 + ) + class TestResultsPackage: def test_results_package_init(self): diff --git a/tests/test_input.py b/tests/test_input.py index e9a7b14..135d1e5 100644 --- a/tests/test_input.py +++ b/tests/test_input.py @@ -29,10 +29,21 @@ def capital_cost(): data=[ ["SIMPLICITY", "NGCC", 2014, 1.23], ["SIMPLICITY", "NGCC", 2015, 2.34], - ["SIMPLICITY", "NGCC", 2016, 3.45], - ["SIMPLICITY", "HYD1", 2014, 3.45], - ["SIMPLICITY", "HYD1", 2015, 2.34], - ["SIMPLICITY", "HYD1", 2016, 1.23], + ["SIMPLICITY", "HYD1", 2015, 3.45], + ["SIMPLICITY", "HYD1", 2016, 4.56], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + return df + + +@fixture +def new_capacity(): + df = pd.DataFrame( + data=[ + ["SIMPLICITY", "NGCC", 2016, 1.23], + ["SIMPLICITY", "HYD1", 2014, 2.34], + ["SIMPLICITY", "HYD1", 2015, 3.45], ], columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) @@ -49,15 +60,21 @@ def simple_default_values(): @fixture -def simple_input_data(region, year, technology, capital_cost): +def simple_input_data(region, year, technology, capital_cost, discount_rate): return { "REGION": region, "TECHNOLOGY": technology, "YEAR": year, "CapitalCost": capital_cost, + "DiscountRate": discount_rate, } +@fixture +def simple_available_results(new_capacity): + return {"NewCapacity": new_capacity} + + @fixture def simple_user_config(): return { @@ -65,9 +82,15 @@ def simple_user_config(): "indices": ["REGION", "TECHNOLOGY", "YEAR"], "type": "param", "dtype": "float", - "default": 0, + "default": -1, "short_name": "CAPEX", }, + "DiscountRate": { + "indices": ["REGION"], + "type": "param", + "dtype": "float", + "default": 0.25, + }, "REGION": { "dtype": "str", "type": "set", @@ -80,6 +103,12 @@ def simple_user_config(): "dtype": "int", "type": "set", }, + "NewCapacity": { + "indices": ["REGION", "TECHNOLOGY", "YEAR"], + "type": "result", + "dtype": "float", + "default": 20, + }, } @@ -115,191 +144,221 @@ def read( class TestExpandDefaults: - year = pd.DataFrame(data=[2014, 2015, 2016], columns=["VALUE"]) - region = pd.DataFrame(data=["SIMPLICITY"], columns=["VALUE"]) - technology = pd.DataFrame(data=["NGCC", "HYD1"], columns=["VALUE"]) - - def input_data_multi_index_no_defaults(region, technology, year): - capex_in = pd.DataFrame( - [ - ["SIMPLICITY", "HYD1", 2014, 2000], - ["SIMPLICITY", "HYD1", 2015, 1500], - ["SIMPLICITY", "HYD1", 2016, 1000], - ["SIMPLICITY", "NGCC", 2014, 1000], - ["SIMPLICITY", "NGCC", 2015, 900], - ["SIMPLICITY", "NGCC", 2016, 800], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - capex_out = capex_in.copy() - capex_out["VALUE"] = capex_out["VALUE"].astype(float) - - data = { - "CapitalCost": capex_in, - "TECHNOLOGY": technology, - "YEAR": year, - "REGION": region, - } - return data, "CapitalCost", capex_out - - def input_data_multi_index(region, technology, year): - capex_in = pd.DataFrame( - [ - ["SIMPLICITY", "NGCC", 2014, 1000], - ["SIMPLICITY", "NGCC", 2015, 900], - ["SIMPLICITY", "HYD1", 2015, 1500], - ["SIMPLICITY", "HYD1", 2016, 1000], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - capex_out = pd.DataFrame( - [ - ["SIMPLICITY", "HYD1", 2014, -1], - ["SIMPLICITY", "HYD1", 2015, 1500], - ["SIMPLICITY", "HYD1", 2016, 1000], - ["SIMPLICITY", "NGCC", 2014, 1000], - ["SIMPLICITY", "NGCC", 2015, 900], - ["SIMPLICITY", "NGCC", 2016, -1], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - capex_out["VALUE"] = capex_out["VALUE"].astype(float) + # capital costs fixtures - data = { - "CapitalCost": capex_in, - "TECHNOLOGY": technology, - "YEAR": year, - "REGION": region, - } - return data, "CapitalCost", capex_out + input_data_multi_index_full = pd.DataFrame( + [ + ["SIMPLICITY", "HYD1", 2014, 2000.0], + ["SIMPLICITY", "HYD1", 2015, 1500.0], + ["SIMPLICITY", "HYD1", 2016, 1000.0], + ["SIMPLICITY", "NGCC", 2014, 1000.0], + ["SIMPLICITY", "NGCC", 2015, 900.0], + ["SIMPLICITY", "NGCC", 2016, 800.0], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - def input_data_multi_index_empty(region, technology, year): - capex_in = pd.DataFrame( - [], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - capex_out = pd.DataFrame( - [ - ["SIMPLICITY", "HYD1", 2014, -1], - ["SIMPLICITY", "HYD1", 2015, -1], - ["SIMPLICITY", "HYD1", 2016, -1], - ["SIMPLICITY", "NGCC", 2014, -1], - ["SIMPLICITY", "NGCC", 2015, -1], - ["SIMPLICITY", "NGCC", 2016, -1], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - capex_out["VALUE"] = capex_out["VALUE"].astype(float) + output_data_multi_index_full = input_data_multi_index_full.copy() - data = { - "CapitalCost": capex_in, - "TECHNOLOGY": technology, - "YEAR": year, - "REGION": region, - } - return data, "CapitalCost", capex_out - - def input_data_single_index(region): - discount_rate_in = pd.DataFrame( - [["SIMPLICITY", 0.05]], columns=["REGION", "VALUE"] - ).set_index(["REGION"]) - discount_rate_out = discount_rate_in.copy() - discount_rate_out["VALUE"] = discount_rate_out["VALUE"].astype(float) - - data = { - "DiscountRate": discount_rate_in, - "REGION": region, - } - return data, "DiscountRate", discount_rate_out + input_data_multi_index_partial = pd.DataFrame( + [ + ["SIMPLICITY", "NGCC", 2014, 1000.0], + ["SIMPLICITY", "NGCC", 2015, 900.0], + ["SIMPLICITY", "HYD1", 2015, 1500.0], + ["SIMPLICITY", "HYD1", 2016, 1000.0], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - def input_data_single_index_empty(region): - discount_rate_in = pd.DataFrame([], columns=["REGION", "VALUE"]).set_index( - ["REGION"] - ) - discount_rate_out = pd.DataFrame( - [["SIMPLICITY", 0.25]], columns=["REGION", "VALUE"] - ).set_index(["REGION"]) - discount_rate_out["VALUE"] = discount_rate_out["VALUE"].astype(float) - - data = { - "DiscountRate": discount_rate_in, - "TECHNOLOGY": technology, - "YEAR": year, - "REGION": region, - } - return data, "DiscountRate", discount_rate_out - - @fixture - def result_data(region): - new_capacity_in = pd.DataFrame( - [ - ["SIMPLICITY", "HYD1", 2015, 100], - ["SIMPLICITY", "HYD1", 2016, 0.1], - ["SIMPLICITY", "NGCC", 2014, 0.5], - ["SIMPLICITY", "NGCC", 2015, 100], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - new_capacity_out = pd.DataFrame( - [ - ["SIMPLICITY", "HYD1", 2014, 20], - ["SIMPLICITY", "HYD1", 2015, 100], - ["SIMPLICITY", "HYD1", 2016, 0.1], - ["SIMPLICITY", "NGCC", 2014, 0.5], - ["SIMPLICITY", "NGCC", 2015, 100], - ["SIMPLICITY", "NGCC", 2016, 20], - ], - columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], - ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + output_data_multi_index_partial = pd.DataFrame( + [ + ["SIMPLICITY", "HYD1", 2014, -1.0], + ["SIMPLICITY", "HYD1", 2015, 1500.0], + ["SIMPLICITY", "HYD1", 2016, 1000.0], + ["SIMPLICITY", "NGCC", 2014, 1000.0], + ["SIMPLICITY", "NGCC", 2015, 900.0], + ["SIMPLICITY", "NGCC", 2016, -1.0], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) - data = { - "NewCapacity": new_capacity_in, - } - return data, "NewCapacity", new_capacity_out - - parameter_test_data = [ - input_data_multi_index_no_defaults(region, technology, year), - input_data_multi_index(region, technology, year), - input_data_multi_index_empty(region, technology, year), - input_data_single_index(region), - input_data_single_index_empty(region), + # discount rate fixtures + + input_data_multi_index_empty = pd.DataFrame( + [], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + + output_data_multi_index_empty = pd.DataFrame( + [ + ["SIMPLICITY", "HYD1", 2014, -1.0], + ["SIMPLICITY", "HYD1", 2015, -1.0], + ["SIMPLICITY", "HYD1", 2016, -1.0], + ["SIMPLICITY", "NGCC", 2014, -1.0], + ["SIMPLICITY", "NGCC", 2015, -1.0], + ["SIMPLICITY", "NGCC", 2016, -1.0], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + + input_data_single_index_full = pd.DataFrame( + [["SIMPLICITY", 0.05]], columns=["REGION", "VALUE"] + ).set_index(["REGION"]) + + output_data_single_index_full = input_data_single_index_full.copy() + + input_data_single_index_empty = pd.DataFrame( + [], columns=["REGION", "VALUE"] + ).set_index(["REGION"]) + + output_data_single_index_empty = pd.DataFrame( + [["SIMPLICITY", 0.25]], columns=["REGION", "VALUE"] + ).set_index(["REGION"]) + + # test expansion of dataframe + + test_data = [ + ("CapitalCost", input_data_multi_index_full, output_data_multi_index_full), + ( + "CapitalCost", + input_data_multi_index_partial, + output_data_multi_index_partial, + ), + ("CapitalCost", input_data_multi_index_empty, output_data_multi_index_empty), + ("DiscountRate", input_data_single_index_full, output_data_single_index_full), + ( + "DiscountRate", + input_data_single_index_empty, + output_data_single_index_empty, + ), ] - parameter_test_data_ids = [ - "multi_index_no_defaults", - "multi_index", + test_data_ids = [ + "multi_index_full", + "multi_index_partial", "multi_index_empty", - "single_index", + "single_index_full", "single_index_empty", ] @mark.parametrize( - "input_data,parameter,expected", - parameter_test_data, - ids=parameter_test_data_ids, + "name,input,expected", + test_data, + ids=test_data_ids, ) def test_expand_parameters_defaults( - self, user_config, simple_default_values, input_data, parameter, expected + self, + simple_user_config, + simple_default_values, + simple_input_data, + name, + input, + expected, ): - write_strategy = DummyWriteStrategy( - user_config=user_config, default_values=simple_default_values + input_data = simple_input_data.copy() + input_data[name] = input + + read_strategy = DummyReadStrategy(user_config=simple_user_config) + actual = read_strategy._expand_dataframe( + name, input_data, simple_default_values ) - write_strategy.input_data = input_data - actual = write_strategy._expand_defaults( - input_data, write_strategy.default_values + assert_frame_equal(actual, expected) + + def test_expand_results_key_error( + self, simple_user_config, simple_input_data, simple_default_values + ): + read_strategy = DummyReadStrategy( + user_config=simple_user_config, write_defaults=True + ) + + with raises(KeyError, match="SpecifiedAnnualDemand"): + read_strategy._expand_dataframe( + "SpecifiedAnnualDemand", simple_input_data, simple_default_values + ) + + # test get default dataframe + + test_data_defaults = [ + ("CapitalCost", output_data_multi_index_empty), + ("DiscountRate", output_data_single_index_empty), + ] + test_data_defaults_ids = [ + "multi_index", + "single_index", + ] + + @mark.parametrize( + "name,expected", + test_data_defaults, + ids=test_data_defaults_ids, + ) + def test_get_default_dataframe( + self, + simple_user_config, + simple_default_values, + simple_input_data, + name, + expected, + ): + + read_strategy = DummyReadStrategy(user_config=simple_user_config) + actual = read_strategy._get_default_dataframe( + name, simple_input_data, simple_default_values ) - assert_frame_equal(actual[parameter], expected) + assert_frame_equal(actual, expected) - def test_expand_result_defaults( - self, user_config, simple_default_values, simple_input_data, result_data + # test expand all input data + + def test_write_default_params( + self, simple_user_config, simple_input_data, simple_default_values ): - write_strategy = DummyWriteStrategy( - user_config=user_config, default_values=simple_default_values + read_strategy = DummyReadStrategy(user_config=simple_user_config) + actual_expanded = read_strategy.write_default_params( + simple_input_data, simple_default_values ) - write_strategy.input_data = simple_input_data - actual = write_strategy._expand_defaults( - result_data[0], write_strategy.default_values + actual = actual_expanded["CapitalCost"] + + expected = pd.DataFrame( + data=[ + ["SIMPLICITY", "HYD1", 2014, -1], + ["SIMPLICITY", "HYD1", 2015, 3.45], + ["SIMPLICITY", "HYD1", 2016, 4.56], + ["SIMPLICITY", "NGCC", 2014, 1.23], + ["SIMPLICITY", "NGCC", 2015, 2.34], + ["SIMPLICITY", "NGCC", 2016, -1], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + + assert_frame_equal(actual, expected) + + def test_write_default_results( + self, + simple_user_config, + simple_input_data, + simple_available_results, + simple_default_values, + ): + + read_strategy = DummyReadStrategy(user_config=simple_user_config) + actual_expanded = read_strategy.write_default_results( + simple_available_results, simple_input_data, simple_default_values ) - assert_frame_equal(actual[result_data[1]], result_data[2]) + + actual = actual_expanded["NewCapacity"] + + expected = pd.DataFrame( + data=[ + ["SIMPLICITY", "HYD1", 2014, 2.34], + ["SIMPLICITY", "HYD1", 2015, 3.45], + ["SIMPLICITY", "HYD1", 2016, 20], + ["SIMPLICITY", "NGCC", 2014, 20], + ["SIMPLICITY", "NGCC", 2015, 20], + ["SIMPLICITY", "NGCC", 2016, 1.23], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + + assert_frame_equal(actual, expected) class TestReadStrategy: @@ -315,8 +374,8 @@ class TestReadStrategy: ("set", "REGION", pd.DataFrame(columns=["VALUE"])), ) compare_read_to_expected_data = [ - [["CapitalCost", "REGION", "TECHNOLOGY", "YEAR"], False], - [["CAPEX", "REGION", "TECHNOLOGY", "YEAR"], True], + [["CapitalCost", "DiscountRate", "REGION", "TECHNOLOGY", "YEAR"], False], + [["CAPEX", "DiscountRate", "REGION", "TECHNOLOGY", "YEAR"], True], ] compare_read_to_expected_data_exception = [ ["CapitalCost", "REGION", "TECHNOLOGY"], diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 19a6a07..d917ff6 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -1,7 +1,6 @@ import os from io import StringIO from textwrap import dedent -from typing import List import pandas as pd from amply import Amply @@ -16,10 +15,18 @@ ReadGlpk, ReadGurobi, ReadHighs, + ReadResults, check_for_duplicates, identify_duplicate, rename_duplicate_column, ) +from otoole.utils import _read_file + + +# To instantiate abstract class ReadResults +class DummyReadResults(ReadResults): + def get_results_from_file(self, filepath, input_data): + raise NotImplementedError() class TestReadCplex: @@ -81,7 +88,6 @@ def test_convert_to_dataframe(self, user_config): reader = ReadCplex(user_config) with StringIO(input_file) as file_buffer: actual = reader._convert_to_dataframe(file_buffer) - # print(actual) expected = pd.DataFrame( [ ["NewCapacity", "SIMPLICITY,ETHPLANT,2015", 0.030000000000000027], @@ -100,7 +106,6 @@ def test_solution_to_dataframe(self, user_config): reader = ReadCplex(user_config) with StringIO(input_file) as file_buffer: actual = reader.read(file_buffer) - # print(actual) expected = ( pd.DataFrame( [ @@ -147,6 +152,32 @@ def test_solution_to_dataframe(self, user_config): ) pd.testing.assert_frame_equal(actual[0]["RateOfActivity"], expected) + def test_solution_to_dataframe_with_defaults(self, user_config): + input_file = self.cplex_data + + regions = pd.DataFrame(data=["SIMPLICITY"], columns=["VALUE"]) + technologies = pd.DataFrame(data=["ETHPLANT"], columns=["VALUE"]) + years = pd.DataFrame(data=[2014, 2015, 2016], columns=["VALUE"]) + input_data = {"REGION": regions, "TECHNOLOGY": technologies, "YEAR": years} + + reader = ReadCplex(user_config, write_defaults=True) + with StringIO(input_file) as file_buffer: + actual = reader.read(file_buffer, input_data=input_data) + expected = ( + pd.DataFrame( + [ + ["SIMPLICITY", "ETHPLANT", 2014, 0], + ["SIMPLICITY", "ETHPLANT", 2015, 0.030000000000000027], + ["SIMPLICITY", "ETHPLANT", 2016, 0.030999999999999917], + ], + columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"], + ) + .astype({"REGION": str, "TECHNOLOGY": str, "YEAR": "int64", "VALUE": float}) + .set_index(["REGION", "TECHNOLOGY", "YEAR"]) + ) + + pd.testing.assert_frame_equal(actual[0]["NewCapacity"], expected) + class TestReadGurobi: @@ -170,7 +201,6 @@ def test_convert_to_dataframe(self, user_config): reader = ReadGurobi(user_config) with StringIO(input_file) as file_buffer: actual = reader._convert_to_dataframe(file_buffer) - # print(actual) expected = pd.DataFrame( [ ["TotalDiscountedCost", "SIMPLICITY,2014", 1.9360385416218188e02], @@ -192,7 +222,6 @@ def test_solution_to_dataframe(self, user_config): reader = ReadGurobi(user_config) with StringIO(input_file) as file_buffer: actual = reader.read(file_buffer) - # print(actual) expected = ( pd.DataFrame( [ @@ -385,7 +414,7 @@ def test_read_cbc_to_dataframe(self, cbc_input, expected, user_config): ).set_index(["REGION", "EMISSION", "YEAR"]) }, ), - ] # type: List + ] # type: list @mark.parametrize( "results,expected", @@ -399,7 +428,7 @@ def test_convert_cbc_to_csv_long(self, results, expected, user_config): for name, df in actual.items(): pd.testing.assert_frame_equal(df, expected[name]) - test_data_3 = [(total_cost_cbc, {}, total_cost_otoole_df)] # type: List + test_data_3 = [(total_cost_cbc, {}, total_cost_otoole_df)] # type: list @mark.parametrize( "cbc_solution,input_data,expected", @@ -1072,6 +1101,21 @@ def test_catch_error_no_parameter(self, caplog, user_config): in caplog.text ) + def test_read_datafile_with_defaults(self, user_config): + datafile = os.path.join("tests", "fixtures", "simplicity.txt") + reader = ReadDatafile(user_config=user_config, write_defaults=True) + actual, _ = reader.read(datafile) + data = [ + ["SIMPLICITY", "DAM", 2014, 0.0], + ["SIMPLICITY", "DAM", 2015, 0.0], + ["SIMPLICITY", "DAM", 2016, 0.0], + ] + expected = pd.DataFrame( + data, columns=["REGION", "STORAGE", "YEAR", "VALUE"] + ).set_index(["REGION", "STORAGE", "YEAR"]) + + pd.testing.assert_frame_equal(actual["CapitalCostStorage"].iloc[:3], expected) + class TestReadExcel: def test_read_excel_yearsplit(self, user_config): @@ -1125,6 +1169,21 @@ def test_read_excel_yearsplit(self, user_config): assert (actual_data == expected).all() + def test_read_excel_with_defaults(self, user_config): + spreadsheet = os.path.join("tests", "fixtures", "combined_inputs.xlsx") + reader = ReadExcel(user_config=user_config, write_defaults=True) + actual, _ = reader.read(spreadsheet) + data = [ + ["09_ROK", "CO2", 2017, -1.0], + ["09_ROK", "CO2", 2018, -1.0], + ["09_ROK", "CO2", 2019, -1.0], + ] + expected = pd.DataFrame( + data, columns=["REGION", "EMISSION", "YEAR", "VALUE"] + ).set_index(["REGION", "EMISSION", "YEAR"]) + + pd.testing.assert_frame_equal(actual["AnnualEmissionLimit"].iloc[:3], expected) + def test_narrow_parameters(self, user_config): data = [ ["IW0016", 0.238356164, 0.238356164, 0.238356164], @@ -1241,6 +1300,26 @@ def test_read_default_values_csv(self, user_config): expected = None assert actual == expected + def test_read_csv_with_defaults(self): + user_config_path = os.path.join( + "tests", "fixtures", "super_simple", "super_simple.yaml" + ) + with open(user_config_path, "r") as config_file: + user_config = _read_file(config_file, ".yaml") + + filepath = os.path.join("tests", "fixtures", "super_simple", "csv") + reader = ReadCsv(user_config=user_config, write_defaults=True) + actual, _ = reader.read(filepath) + data = [ + ["BB", "gas_import", 2016, 0.0], + ["BB", "gas_plant", 2016, 1.03456], + ] + expected = pd.DataFrame( + data, columns=["REGION", "TECHNOLOGY", "YEAR", "VALUE"] + ).set_index(["REGION", "TECHNOLOGY", "YEAR"]) + + pd.testing.assert_frame_equal(actual["CapitalCost"], expected) + class TestReadTabular: """Methods shared for csv and excel""" @@ -1305,3 +1384,66 @@ def test_check_datatypes_invalid(self, user_config): with raises(ValueError): check_datatypes(df, user_config, "AvailabilityFactor") + + +class TestExpandRequiredParameters: + """Tests the expansion of required parameters for results processing""" + + region = pd.DataFrame(data=["SIMPLICITY"], columns=["VALUE"]) + + technology = pd.DataFrame(data=["NGCC"], columns=["VALUE"]) + + def test_no_expansion(self): + + user_config = { + "REGION": { + "dtype": "str", + "type": "set", + }, + } + + reader = DummyReadResults(user_config=user_config) + defaults = {} + input_data = {} + + actual = reader._expand_required_params(input_data, defaults) + + assert not actual + + def test_expansion(self, user_config, discount_rate_empty, discount_rate_idv_empty): + + user_config["DiscountRateIdv"] = { + "indices": ["REGION", "TECHNOLOGY"], + "type": "param", + "dtype": "float", + "default": 0.10, + } + + reader = DummyReadResults(user_config=user_config) + defaults = reader._read_default_values(user_config) + input_data = { + "REGION": self.region, + "TECHNOLOGY": self.technology, + "DiscountRate": discount_rate_empty, + "DiscountRateIdv": discount_rate_idv_empty, + } + + actual = reader._expand_required_params(input_data, defaults) + + actual_dr = actual["DiscountRate"] + + expected_dr = pd.DataFrame( + data=[["SIMPLICITY", 0.05]], + columns=["REGION", "VALUE"], + ).set_index(["REGION"]) + + pd.testing.assert_frame_equal(actual_dr, expected_dr) + + actual_dr_idv = actual["DiscountRateIdv"] + + expected_dr_idv = pd.DataFrame( + data=[["SIMPLICITY", "NGCC", 0.10]], + columns=["REGION", "TECHNOLOGY", "VALUE"], + ).set_index(["REGION", "TECHNOLOGY"]) + + pd.testing.assert_frame_equal(actual_dr_idv, expected_dr_idv)