diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml
new file mode 100644
index 00000000..6ea0b489
--- /dev/null
+++ b/.github/workflows/periodic_benchmarks.yaml
@@ -0,0 +1,100 @@
+# Initial Source: pybop-team/PyBop
+
+# This workflow periodically runs the benchmarks suite in benchmarks/
+# using asv and publish the results, effectively updating
+# the display website hosted in the pybop-bench repo
+
+# Steps:
+# - Benchmark all commits since the last one that was benchmarked
+# - Push results to pybop-bench repo
+# - Publish website
+name: Benchmarks
+on:
+ # Everyday at 12 pm UTC
+ schedule:
+ - cron: "0 12 * * *"
+ # Make it possible to trigger the
+ # workflow manually
+ workflow_dispatch:
+
+jobs:
+ benchmarks:
+ runs-on: [self-hosted, macOS, ARM64]
+ if: github.repository == 'pybop-team/PyBOP'
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install python & create virtualenv
+ shell: bash
+ run: |
+ eval "$(pyenv init -)"
+ pyenv install 3.12 -s
+ pyenv virtualenv 3.12 pybop-312-bench
+
+ - name: Install dependencies & run benchmarks
+ shell: bash
+ run: |
+ eval "$(pyenv init -)"
+ pyenv activate pybop-312-bench
+ python -m pip install -e .[all,dev]
+ python -m pip install asv[virtualenv]
+ python -m asv machine --machine "SelfHostedRunner"
+ python -m asv run --machine "SelfHostedRunner" NEW --show-stderr -v
+
+ - name: Upload results as artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: asv_periodic_results
+ path: results
+
+ - name: Uninstall pyenv-virtualenv & python
+ if: always()
+ shell: bash
+ run: |
+ eval "$(pyenv init -)"
+ pyenv activate pybop-312-bench
+ pyenv uninstall -f $( python --version )
+
+ publish-results:
+ name: Push and publish results
+ needs: benchmarks
+ runs-on: ubuntu-latest
+ if: github.repository == 'pybop-team/PyBOP'
+ steps:
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: 3.12
+
+ - name: Install asv
+ run: pip install asv
+
+ - name: Checkout pybop-bench repo
+ uses: actions/checkout@v4
+ with:
+ repository: pybop-team/pybop-bench
+ token: ${{ secrets.PUSH_BENCH_TOKEN }}
+
+ - name: Download results artifact
+ uses: actions/download-artifact@v4
+ with:
+ name: asv_periodic_results
+ path: new_results
+
+ - name: Copy new results and push to pybop-bench repo
+ env:
+ PUSH_BENCH_EMAIL: ${{ secrets.PUSH_BENCH_EMAIL }}
+ PUSH_BENCH_NAME: ${{ secrets.PUSH_BENCH_NAME }}
+ run: |
+ cp -vr new_results/* results
+ git config --global user.email "$PUSH_BENCH_EMAIL"
+ git config --global user.name "$PUSH_BENCH_NAME"
+ git add results
+ git commit -am "Add new benchmark results"
+ git push
+
+ - name: Publish results
+ run: |
+ asv publish
+ git fetch origin gh-pages:gh-pages
+ asv gh-pages
diff --git a/.gitignore b/.gitignore
index bc3caa2c..3c3bb708 100644
--- a/.gitignore
+++ b/.gitignore
@@ -310,3 +310,7 @@ $RECYCLE.BIN/
# Output JSON files
**/fit_ecm_parameters.json
+
+# Airspeed Velocity
+*.asv/
+results/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7bfc4528..02a04cf9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,7 @@
## Features
+- [#179](https://github.com/pybop-team/PyBOP/pull/203) - Adds `asv` configuration for benchmarking and initial benchmark suite.
- [#218](https://github.com/pybop-team/PyBOP/pull/218) - Adds likelihood base class, `GaussianLogLikelihoodKnownSigma`, `GaussianLogLikelihood`, and `ProbabilityBased` cost function. As well as addition of a maximum likelihood estimation (MLE) example.
- [#185](https://github.com/pybop-team/PyBOP/pull/185) - Adds a pull request template, additional nox sessions `quick` for standard tests + docs, `pre-commit` for pre-commit, `test` to run all standard tests, `doctest` for docs.
- [#215](https://github.com/pybop-team/PyBOP/pull/215) - Adds `release_workflow.md` and updates `release_action.yaml`
diff --git a/README.md b/README.md
index e4e347e1..e4342443 100644
--- a/README.md
+++ b/README.md
@@ -31,6 +31,9 @@
+
+
+
diff --git a/asv.conf.json b/asv.conf.json
new file mode 100644
index 00000000..fdd830ce
--- /dev/null
+++ b/asv.conf.json
@@ -0,0 +1,23 @@
+{
+ "version": 1,
+ "project": "PyBOP",
+ "project_url": "https://github.com/pybop-team/pybop",
+ "repo": ".",
+ "build_command": [
+ "python -m pip install build",
+ "python -m build --wheel -o {build_cache_dir} {build_dir}"
+ ],
+ "default_benchmark_timeout": 180,
+ "branches": ["develop"],
+ "environment_type": "virtualenv",
+ "matrix": {
+ "req":{
+ "pybamm": [],
+ "numpy": [],
+ "scipy": [],
+ "pints": []
+ }
+ },
+ "build_cache_dir": ".asv/cache",
+ "build_dir": ".asv/build"
+}
diff --git a/benchmarks/README.md b/benchmarks/README.md
new file mode 100644
index 00000000..55a014a5
--- /dev/null
+++ b/benchmarks/README.md
@@ -0,0 +1,104 @@
+# Benchmarking Directory for PyBOP
+
+Welcome to the benchmarking directory for PyBOP. We use `asv` (airspeed velocity) for benchmarking, which is a tool for running Python benchmarks over time in a consistent environment. This document will guide you through the setup, execution, and viewing of benchmarks.
+
+## Quick Links
+
+- [Airspeed Velocity (asv) Documentation](https://asv.readthedocs.io/)
+
+## Prerequisites
+
+Before you can run benchmarks, you need to ensure that `asv` is installed and that you have a working Python environment. It is also recommended to run benchmarks in a clean, dedicated virtual environment to avoid any side-effects from your local environment.
+
+### Installing `asv`
+
+You can install `asv` using `pip`. It's recommended to do this within a virtual environment:
+
+```bash
+pip install asv
+```
+
+## Setting Up Benchmarks
+
+The `benchmarks` directory already contains a set of benchmarks for the package. To add or modify benchmarks, edit the `.py` files within this directory.
+
+Each benchmark file should contain one or more classes with methods that `asv` will automatically recognize as benchmarks. Here's an example structure for a benchmark file:
+
+```python
+class ExampleBenchmarks:
+ def setup(self):
+ # Code to run before each benchmark method is executed
+ pass
+
+ def time_example_benchmark(self):
+ # The actual benchmark code
+ pass
+
+ def teardown(self):
+ # Code to run after each benchmark method is executed
+ pass
+```
+
+## Running Benchmarks
+
+With `asv` installed and your benchmarks set up, you can now run benchmarks using the following standard `asv` commands:
+
+### Running All Benchmarks
+
+To run all benchmarks in your python env:
+
+```bash
+asv run
+```
+
+This will test the current state of your codebase by default. You can specify a range of commits to run benchmarks against by appending a commit range to the command, like so:
+
+```bash
+asv run ..
+```
+
+For quick benchmarking, pass the `--quick` argument to `asv run`. This runs each benchmark once and returns the singular value.
+
+```bash
+asv run --quick
+```
+
+### Running Specific Benchmarks
+
+To run a specific benchmark, use:
+
+```bash
+asv run --bench
+```
+
+### Running Benchmarks for a Specific Environment
+
+To run benchmarks against a specific Python version:
+
+```bash
+asv run --python=same # To use the same Python version as the current environment
+asv run --python=3.8 # To specify the Python version
+```
+
+## Viewing Benchmark Results
+
+After running benchmarks, `asv` will generate results which can be viewed as a web page:
+
+```bash
+asv publish
+asv preview
+```
+
+Now you can open your web browser to the URL provided by `asv` to view the results.
+
+## Continuous Benchmarking
+
+You can also set up `asv` for continuous benchmarking where it will track the performance over time. This typically involves integration with a continuous integration (CI) system.
+
+For more detailed instructions on setting up continuous benchmarking, consult the [asv documentation](https://asv.readthedocs.io/en/stable/using.html#continuous-benchmarking).
+
+## Reporting Issues
+
+If you encounter any issues or have suggestions for improving the benchmarks, please open an issue or a pull request in the project repository.
+
+Thank you for contributing to the performance of the package!
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/benchmarks/benchmark_model.py b/benchmarks/benchmark_model.py
new file mode 100644
index 00000000..ed53914c
--- /dev/null
+++ b/benchmarks/benchmark_model.py
@@ -0,0 +1,81 @@
+import pybop
+import numpy as np
+from .benchmark_utils import set_random_seed
+
+
+class BenchmarkModel:
+ param_names = ["model", "parameter_set"]
+ params = [
+ [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe],
+ ["Chen2020"],
+ ]
+
+ def setup(self, model, parameter_set):
+ """
+ Setup the model and problem for predict and simulate benchmarks.
+
+ Args:
+ model (pybop.Model): The model class to be benchmarked.
+ parameter_set (str): The name of the parameter set to be used.
+ """
+ # Set random seed
+ set_random_seed()
+
+ # Create model instance
+ self.model = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set))
+
+ # Define fitting parameters
+ parameters = [
+ pybop.Parameter(
+ "Current function [A]",
+ prior=pybop.Gaussian(0.4, 0.02),
+ bounds=[0.2, 0.7],
+ initial_value=0.4,
+ )
+ ]
+
+ # Generate synthetic data
+ sigma = 0.001
+ self.t_eval = np.arange(0, 900, 2)
+ values = self.model.predict(t_eval=self.t_eval)
+ corrupt_values = values["Voltage [V]"].data + np.random.normal(
+ 0, sigma, len(self.t_eval)
+ )
+
+ self.inputs = {
+ "Current function [A]": 0.4,
+ }
+
+ # Create dataset
+ dataset = pybop.Dataset(
+ {
+ "Time [s]": self.t_eval,
+ "Current function [A]": values["Current [A]"].data,
+ "Voltage [V]": corrupt_values,
+ }
+ )
+
+ # Create fitting problem
+ self.problem = pybop.FittingProblem(
+ model=self.model, dataset=dataset, parameters=parameters, init_soc=0.5
+ )
+
+ def time_model_predict(self, model, parameter_set):
+ """
+ Benchmark the predict method of the model.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked.
+ parameter_set (str): The name of the parameter set being used.
+ """
+ self.model.predict(inputs=self.inputs, t_eval=self.t_eval)
+
+ def time_model_simulate(self, model, parameter_set):
+ """
+ Benchmark the simulate method of the model.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked.
+ parameter_set (str): The name of the parameter set being used.
+ """
+ self.problem._model.simulate(inputs=self.inputs, t_eval=self.t_eval)
diff --git a/benchmarks/benchmark_optim_construction.py b/benchmarks/benchmark_optim_construction.py
new file mode 100644
index 00000000..d92f2ec3
--- /dev/null
+++ b/benchmarks/benchmark_optim_construction.py
@@ -0,0 +1,90 @@
+import pybop
+import numpy as np
+from .benchmark_utils import set_random_seed
+
+
+class BenchmarkOptimisationConstruction:
+ param_names = ["model", "parameter_set", "optimiser"]
+ params = [
+ [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe],
+ ["Chen2020"],
+ [pybop.CMAES],
+ ]
+
+ def setup(self, model, parameter_set, optimiser):
+ """
+ Set up the model, problem, and cost for optimization benchmarking.
+
+ Args:
+ model (pybop.Model): The model class to be benchmarked.
+ parameter_set (str): The name of the parameter set to be used.
+ optimiser (pybop.Optimiser): The optimizer class to be used.
+ """
+ # Set random seed
+ set_random_seed()
+
+ # Create model instance
+ model_instance = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set))
+
+ # Define fitting parameters
+ parameters = [
+ pybop.Parameter(
+ "Negative electrode active material volume fraction",
+ prior=pybop.Gaussian(0.6, 0.02),
+ bounds=[0.375, 0.7],
+ initial_value=0.63,
+ ),
+ pybop.Parameter(
+ "Positive electrode active material volume fraction",
+ prior=pybop.Gaussian(0.5, 0.02),
+ bounds=[0.375, 0.625],
+ initial_value=0.51,
+ ),
+ ]
+
+ # Generate synthetic data
+ sigma = 0.001
+ t_eval = np.arange(0, 900, 2)
+ values = model_instance.predict(t_eval=t_eval)
+ corrupt_values = values["Voltage [V]"].data + np.random.normal(
+ 0, sigma, len(t_eval)
+ )
+
+ # Create dataset
+ dataset = pybop.Dataset(
+ {
+ "Time [s]": t_eval,
+ "Current function [A]": values["Current [A]"].data,
+ "Voltage [V]": corrupt_values,
+ }
+ )
+
+ # Create fitting problem
+ problem = pybop.FittingProblem(
+ model=model_instance, dataset=dataset, parameters=parameters
+ )
+
+ # Create cost function
+ self.cost = pybop.SumSquaredError(problem=problem)
+
+ def time_optimisation_construction(self, model, parameter_set, optimiser):
+ """
+ Benchmark the construction of the optimization class.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked.
+ parameter_set (str): The name of the parameter set being used.
+ optimiser (pybop.Optimiser): The optimizer class being used.
+ """
+ self.optim = pybop.Optimisation(self.cost, optimiser=optimiser)
+
+ def time_cost_evaluate(self, model, parameter_set, optimiser):
+ """
+ Benchmark the cost function evaluation.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked.
+ parameter_set (str): The name of the parameter set being used.
+ optimiser (pybop.Optimiser): The optimizer class being used.
+ """
+ self.cost([0.63, 0.51])
diff --git a/benchmarks/benchmark_parameterisation.py b/benchmarks/benchmark_parameterisation.py
new file mode 100644
index 00000000..4315f89b
--- /dev/null
+++ b/benchmarks/benchmark_parameterisation.py
@@ -0,0 +1,133 @@
+import pybop
+import numpy as np
+from .benchmark_utils import set_random_seed
+
+
+class BenchmarkParameterisation:
+ param_names = ["model", "parameter_set", "optimiser"]
+ params = [
+ [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe],
+ ["Chen2020"],
+ [
+ pybop.SciPyMinimize,
+ pybop.SciPyDifferentialEvolution,
+ pybop.Adam,
+ pybop.CMAES,
+ pybop.GradientDescent,
+ pybop.IRPropMin,
+ pybop.PSO,
+ pybop.SNES,
+ pybop.XNES,
+ ],
+ ]
+
+ def setup(self, model, parameter_set, optimiser):
+ """
+ Set up the parameterization problem for benchmarking.
+
+ Args:
+ model (pybop.Model): The model class to be benchmarked.
+ parameter_set (str): The name of the parameter set to be used.
+ optimiser (pybop.Optimiser): The optimizer class to be used.
+ """
+ # Set random seed
+ set_random_seed()
+
+ # Create model instance
+ params = pybop.ParameterSet.pybamm(parameter_set)
+ params.update(
+ {
+ "Negative electrode active material volume fraction": 0.63,
+ "Positive electrode active material volume fraction": 0.51,
+ }
+ )
+ model_instance = model(parameter_set=params)
+
+ # Define fitting parameters
+ parameters = [
+ pybop.Parameter(
+ "Negative electrode active material volume fraction",
+ prior=pybop.Gaussian(0.55, 0.03),
+ bounds=[0.375, 0.7],
+ ),
+ pybop.Parameter(
+ "Positive electrode active material volume fraction",
+ prior=pybop.Gaussian(0.55, 0.03),
+ bounds=[0.375, 0.7],
+ ),
+ ]
+
+ # Generate synthetic data
+ sigma = 0.003
+ t_eval = np.arange(0, 900, 2)
+ values = model_instance.predict(t_eval=t_eval)
+ corrupt_values = values["Voltage [V]"].data + np.random.normal(
+ 0, sigma, len(t_eval)
+ )
+
+ # Create dataset
+ dataset = pybop.Dataset(
+ {
+ "Time [s]": t_eval,
+ "Current function [A]": values["Current [A]"].data,
+ "Voltage [V]": corrupt_values,
+ }
+ )
+
+ # Create fitting problem
+ problem = pybop.FittingProblem(model_instance, parameters, dataset)
+
+ # Create cost function
+ cost = pybop.SumSquaredError(problem=problem)
+
+ # Create optimization instance
+ self.optim = pybop.Optimisation(cost, optimiser=optimiser)
+ if optimiser in [pybop.GradientDescent]:
+ self.optim.optimiser.set_learning_rate(
+ 0.008
+ ) # Compromise between stability & performance
+
+ def time_parameterisation(self, model, parameter_set, optimiser):
+ """
+ Benchmark the parameterization process. Optimiser options are left at high values
+ to ensure the threshold is met and the optimisation process is completed.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked (unused).
+ parameter_set (str): The name of the parameter set being used (unused).
+ optimiser (pybop.Optimiser): The optimizer class being used (unused).
+ """
+ # Set optimizer options for consistent benchmarking
+ self.optim.set_max_unchanged_iterations(iterations=25, threshold=1e-5)
+ self.optim.set_max_iterations(250)
+ self.optim.set_min_iterations(2)
+ x, _ = self.optim.run()
+ return x
+
+ def track_results(self, model, parameter_set, optimiser):
+ """
+ Track the results of the optimization.
+ Note: These results will be different than the time_parameterisation
+ as they are ran seperately. These results should be used to verify the
+ optimisation algorithm typically converges.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked (unused).
+ parameter_set (str): The name of the parameter set being used (unused).
+ optimiser (pybop.Optimiser): The optimizer class being used (unused).
+ """
+ x = self.time_parameterisation(model, parameter_set, optimiser)
+
+ return tuple(x)
+
+ def time_optimiser_ask(self, model, parameter_set, optimiser):
+ """
+ Benchmark the optimizer's ask method.
+
+ Args:
+ model (pybop.Model): The model class being benchmarked (unused).
+ parameter_set (str): The name of the parameter set being used (unused).
+ optimiser (pybop.Optimiser): The optimizer class being used.
+ """
+ if optimiser not in [pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution]:
+ self.optim.optimiser.ask()
diff --git a/benchmarks/benchmark_utils.py b/benchmarks/benchmark_utils.py
new file mode 100644
index 00000000..3126e8bb
--- /dev/null
+++ b/benchmarks/benchmark_utils.py
@@ -0,0 +1,5 @@
+import numpy as np
+
+
+def set_random_seed(seed_value=8):
+ np.random.seed(seed_value)
diff --git a/noxfile.py b/noxfile.py
index e946d821..0695f011 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -112,6 +112,14 @@ def run_quick(session):
run_doc_tests(session)
+@nox.session
+def benchmarks(session):
+ """Run the benchmarks."""
+ session.install("-e", ".[all,dev]", silent=False)
+ session.install("asv[virtualenv]")
+ session.run("asv", "run", "--show-stderr", "--python=same")
+
+
@nox.session
def docs(session):
"""
diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py
index 3eb839ef..f122535e 100644
--- a/pybop/_optimisation.py
+++ b/pybop/_optimisation.py
@@ -468,7 +468,7 @@ def set_min_iterations(self, iterations=2):
raise ValueError("Minimum number of iterations cannot be negative.")
self._min_iterations = iterations
- def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5):
+ def set_max_unchanged_iterations(self, iterations=15, threshold=1e-5):
"""
Set the maximum number of iterations without significant change as a stopping criterion.
Credit: PINTS
@@ -476,7 +476,7 @@ def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5):
Parameters
----------
iterations : int, optional
- The maximum number of unchanged iterations to run (default is 25).
+ The maximum number of unchanged iterations to run (default is 15).
Set to `None` to remove this stopping criterion.
threshold : float, optional
The minimum significant change in the objective function value that resets the unchanged iteration counter (default is 1e-5).
diff --git a/pyproject.toml b/pyproject.toml
index 539745ad..8f3a0375 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,7 +21,6 @@ dependencies = [
"pybamm>=23.5",
"numpy>=1.16",
"scipy>=1.3",
- "pandas>=1.0",
"pints>=0.5",
]