From 7d3051a3d597dfb4cafaf9030fe9029b8b51b589 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 16 May 2024 17:25:55 +0100 Subject: [PATCH 01/35] Add a WeightedCost --- examples/scripts/spm_weighted_cost.py | 66 ++++++++++++++++++ pybop/__init__.py | 2 +- pybop/costs/_likelihoods.py | 38 ++++++----- pybop/costs/base_cost.py | 96 +++++++++++++++++++++++++++ pybop/costs/design_costs.py | 2 + pybop/costs/fitting_costs.py | 53 ++++++++++----- 6 files changed, 223 insertions(+), 34 deletions(-) create mode 100644 examples/scripts/spm_weighted_cost.py diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py new file mode 100644 index 00000000..4bd8380e --- /dev/null +++ b/examples/scripts/spm_weighted_cost.py @@ -0,0 +1,66 @@ +import numpy as np + +import pybop + +# Parameter set and model definition +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +model = pybop.lithium_ion.SPM(parameter_set=parameter_set) + +# Fitting parameters +parameters = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.05), + bounds=[0.4, 0.7], + ), +] + +# Generate data +sigma = 0.001 +t_eval = np.arange(0, 900, 3) +values = model.predict(t_eval=t_eval) +corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } +) + +# Generate problem, cost function, and optimisation class +problem = pybop.FittingProblem(model, parameters, dataset) +cost1 = pybop.SumSquaredError(problem) +cost2 = pybop.RootMeanSquaredError(problem) +weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) + +for cost in [weighted_cost, cost1, cost2]: + optim = pybop.Optimisation(cost, optimiser=pybop.IRPropMin) + optim.set_max_iterations(60) + + # Run the optimisation + x, final_cost = optim.run() + print( + "True parameters:", + [ + parameters[0].true_value, + parameters[1].true_value, + ], + ) + print("Estimated parameters:", x) + + # Plot the timeseries output + pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") + + # Plot convergence + pybop.plot_convergence(optim) + + # Plot the cost landscape with optimisation path + pybop.plot2d(optim, steps=15) diff --git a/pybop/__init__.py b/pybop/__init__.py index 034dcb4a..6419f8f4 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -55,7 +55,7 @@ # # Cost function class # -from .costs.base_cost import BaseCost +from .costs.base_cost import BaseCost, WeightedCost from .costs.fitting_costs import ( RootMeanSquaredError, SumSquaredError, diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 91374cc0..848c34c2 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -62,20 +62,22 @@ def __init__(self, problem, sigma): def _evaluate(self, x, grad=None): """ - Calls the problem.evaluate method and calculates - the log-likelihood + Calculates the log-likelihood. """ - y = self.problem.evaluate(x) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): return -np.float64(np.inf) # prediction doesn't match target e = np.array( [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - y[signal]) ** 2) + + self._multip + * np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2 + ) ) for signal in self.signal ] @@ -88,20 +90,26 @@ def _evaluate(self, x, grad=None): def _evaluateS1(self, x, grad=None): """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calculates the log-likelihood and sensitivities. """ - y, dy = self.problem.evaluateS1(x) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): likelihood = np.float64(np.inf) dl = self._dl * np.ones(self.n_parameters) return -likelihood, -dl - r = np.array([self._target[signal] - y[signal] for signal in self.signal]) + r = np.array( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) likelihood = self._evaluate(x) - dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1) + dl = np.sum( + (self.sigma2 * np.sum((r * self._current_sensitivities.T), axis=2)), axis=1 + ) return likelihood, dl @@ -119,6 +127,7 @@ def __init__(self, problem): super(GaussianLogLikelihood, self).__init__(problem) self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) self._dl = np.ones(self._n_parameters + self.n_outputs) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ @@ -161,8 +170,7 @@ def _evaluate(self, x, grad=None): def _evaluateS1(self, x, grad=None): """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calculates the log-likelihood and sensitivities. """ sigma = np.asarray(x[-self.n_outputs :]) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index f3ac9517..28fdff7f 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -39,6 +39,7 @@ def __init__(self, problem=None, sigma=None): self.bounds = None self.sigma0 = sigma self._minimising = True + self._fixed_problem = True if isinstance(self.problem, BaseProblem): self._target = problem._target self.parameters = problem.parameters @@ -82,6 +83,9 @@ def evaluate(self, x, grad=None): If an error occurs during the calculation of the cost. """ try: + if self._fixed_problem: + self._current_prediction = self.problem.evaluate(x) + if self._minimising: return self._evaluate(x, grad) else: # minimise the negative cost @@ -140,6 +144,11 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ try: + if self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(x) + ) + if self._minimising: return self._evaluateS1(x) else: # minimise the negative cost @@ -173,3 +182,90 @@ def _evaluateS1(self, x): If the method has not been implemented by the subclass. """ raise NotImplementedError + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + """ + + def __init__(self, cost_list, weights=None): + self.cost_list = cost_list + self.weights = weights + self._different_problems = False + + if not isinstance(self.cost_list, list): + raise TypeError("Expected a list of costs.") + if self.weights is None: + self.weights = np.ones(len(cost_list)) + elif isinstance(self.weights, list): + self.weights = np.array(self.weights) + if not isinstance(self.weights, np.ndarray): + raise TypeError( + "Expected a list or array of weights the same length as cost_list." + ) + if not len(self.weights) == len(self.cost_list): + raise ValueError( + "Expected a list or array of weights the same length as cost_list." + ) + + # Check if all costs depend on the same problem + for cost in self.cost_list: + if hasattr(cost, "problem") and ( + not cost._fixed_problem or cost.problem is not self.cost_list[0].problem + ): + self._different_problems = True + + if not self._different_problems: + super(WeightedCost, self).__init__(self.cost_list[0].problem) + self._fixed_problem = False + else: + super(WeightedCost, self).__init__() + + def _evaluate(self, x, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + """ + e = np.empty_like(self.cost_list) + + if not self._different_problems: + current_prediction = self.problem.evaluate(x) + + for i, cost in enumerate(self.cost_list): + if self._different_problems: + cost._current_prediction = cost.problem.evaluate(x) + else: + cost._current_prediction = current_prediction + e[i] = cost._evaluate(x, grad) + + return np.dot(e, self.weights) + + def _evaluateS1(self, x): + """ + Compute the cost and its gradient with respect to the parameters. + """ + e = np.empty_like(self.cost_list) + de = np.empty((len(self.parameters), len(self.cost_list))) + + if not self._different_problems: + current_prediction, current_sensitivities = self.problem.evaluateS1(x) + + for i, cost in enumerate(self.cost_list): + if self._different_problems: + cost._current_prediction, cost._current_sensitivities = ( + cost.problem.evaluateS1(x) + ) + else: + cost._current_prediction, cost._current_sensitivities = ( + current_prediction, + current_sensitivities, + ) + e[i], de[:, i] = cost._evaluateS1(x) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index b02940bf..aff62191 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -98,6 +98,7 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(GravimetricEnergyDensity, self).__init__(problem, update_capacity) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ @@ -157,6 +158,7 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(VolumetricEnergyDensity, self).__init__(problem, update_capacity) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 0665454b..24c78fff 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -41,15 +41,19 @@ def _evaluate(self, x, grad=None): The root mean square error. """ - prediction = self.problem.evaluate(x) - for key in self.signal: - if len(prediction.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): return np.float64(np.inf) # prediction doesn't match target e = np.array( [ - np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2)) + np.sqrt( + np.mean( + (self._current_prediction[signal] - self._target[signal]) ** 2 + ) + ) for signal in self.signal ] ) @@ -79,18 +83,24 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): e = np.float64(np.inf) de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.array( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * dy.T), axis=2) / ( - np.sqrt(np.mean((r * dy.T) ** 2, axis=2)) + np.finfo(float).eps + de = np.mean((r * self._current_sensitivities.T), axis=2) / ( + np.sqrt(np.mean((r * self._current_sensitivities.T) ** 2, axis=2)) + + np.finfo(float).eps ) if self.n_outputs == 1: @@ -155,15 +165,15 @@ def _evaluate(self, x, grad=None): float The sum of squared errors. """ - prediction = self.problem.evaluate(x) - for key in self.signal: - if len(prediction.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): return np.float64(np.inf) # prediction doesn't match target e = np.array( [ - np.sum(((prediction[signal] - self._target[signal]) ** 2)) + np.sum(((self._current_prediction[signal] - self._target[signal]) ** 2)) for signal in self.signal ] ) @@ -192,16 +202,22 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): + if len(self._current_prediction.get(key, [])) != len( + self._target.get(key, []) + ): e = np.float64(np.inf) de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.array( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sum(np.sum(r**2, axis=0), axis=0) - de = 2 * np.sum(np.sum((r * dy.T), axis=2), axis=1) + de = 2 * np.sum(np.sum((r * self._current_sensitivities.T), axis=2), axis=1) return e, de @@ -235,6 +251,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, x, grad=None): """ From 7898e8868ae55e2a853728998fd3c1465b7c52e6 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 16 May 2024 19:06:48 +0100 Subject: [PATCH 02/35] Fix setting --- pybop/costs/base_cost.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 28fdff7f..b6fa4885 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -221,9 +221,9 @@ def __init__(self, cost_list, weights=None): if not self._different_problems: super(WeightedCost, self).__init__(self.cost_list[0].problem) - self._fixed_problem = False else: super(WeightedCost, self).__init__() + self._fixed_problem = False def _evaluate(self, x, grad=None): """ From 7a9bcc4f7d59be349c12630826bdaed56b62133f Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 16 May 2024 19:08:14 +0100 Subject: [PATCH 03/35] Add tests --- tests/unit/test_cost.py | 50 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 6ffeb58e..3ed5245e 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -1,3 +1,5 @@ +from copy import copy + import numpy as np import pytest @@ -227,3 +229,51 @@ def test_energy_density_costs( # Compute after updating nominal capacity cost = cost_class(problem, update_capacity=True) cost([0.4]) + + @pytest.mark.unit + def test_weighted_cost(self, problem, x0): + cost1 = pybop.SumSquaredError(problem) + cost2 = pybop.RootMeanSquaredError(problem) + + # Test with and without weights + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 1]) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + weighted_cost = pybop.WeightedCost( + cost_list=[cost1, cost2], weights=np.array([1, 1]) + ) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + with pytest.raises( + TypeError, + match="Expected a list or array of weights the same length as cost_list.", + ): + weighted_cost = pybop.WeightedCost( + cost_list=[cost1, cost2], weights="Invalid string" + ) + with pytest.raises( + ValueError, + match="Expected a list or array of weights the same length as cost_list.", + ): + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1]) + + # Test with and without different problems + weighted_cost_2 = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) + assert weighted_cost_2._different_problems is False + assert weighted_cost_2.problem is problem + assert weighted_cost_2(x0) >= 0 + + cost3 = pybop.RootMeanSquaredError(copy(problem)) + weighted_cost_3 = pybop.WeightedCost(cost_list=[cost1, cost3], weights=[1, 100]) + assert weighted_cost_3._different_problems is True + assert weighted_cost_3.problem is None + assert weighted_cost_3(x0) >= 0 + + np.testing.assert_allclose( + weighted_cost_2._evaluate(x0), weighted_cost_3._evaluate(x0), atol=1e-5 + ) + weighted_cost_3.parameters = problem.parameters + errors_2, sensitivities_2 = weighted_cost_2._evaluateS1(x0) + errors_3, sensitivities_3 = weighted_cost_3._evaluateS1(x0) + np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) + np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) From a1c2ec6c15754e06a962864b234bcd83184fa399 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 23 May 2024 17:26:26 +0100 Subject: [PATCH 04/35] Update base_cost.py --- pybop/costs/base_cost.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 9c2ec692..a734ccbd 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -38,7 +38,7 @@ def __init__(self, problem=None, sigma=None): self.x0 = None self.bounds = None self.sigma0 = sigma - self._fixed_problem = True + self._fixed_problem = False if isinstance(self.problem, BaseProblem): self._target = problem._target self.parameters = problem.parameters @@ -48,6 +48,7 @@ def __init__(self, problem=None, sigma=None): self.signal = problem.signal self._n_parameters = problem.n_parameters self.sigma0 = sigma or problem.sigma0 or np.zeros(self._n_parameters) + self._fixed_problem = True @property def n_parameters(self): From 501064d37bd5b7c2a4c3444d78277707cf82a41d Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 24 May 2024 17:10:01 +0100 Subject: [PATCH 05/35] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e26270d..27e3b8ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script. - [#236](https://github.com/pybop-team/PyBOP/issues/236) - Restructures the optimiser classes, adds a new optimisation API through direct construction and keyword arguments, and fixes the setting of `max_iterations`, and `_minimising`. Introduces `pybop.BaseOptimiser`, `pybop.BasePintsOptimiser`, and `pybop.BaseSciPyOptimiser` classes. - [#321](https://github.com/pybop-team/PyBOP/pull/321) - Updates Prior classes with BaseClass, adds a `problem.sample_initial_conditions` method to improve stability of SciPy.Minimize optimiser. - [#249](https://github.com/pybop-team/PyBOP/pull/249) - Add WeppnerHuggins model and GITT example. From 5fa3db4e84b65641edc48cb1bdbc582331b106d5 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 10 Jun 2024 10:41:30 +0100 Subject: [PATCH 06/35] Update imports --- pybop/costs/base_cost.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index e6c2f540..558a9635 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,3 +1,5 @@ +import numpy as np + from pybop import BaseProblem From 23b10992a87894b3d99bd1078d6280d2e707807c Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 10 Jun 2024 11:19:42 +0100 Subject: [PATCH 07/35] Update x0 to [0.5] --- tests/unit/test_cost.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index fc643ef4..3f230fda 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -234,7 +234,7 @@ def test_design_costs( cost([0.4]) @pytest.mark.unit - def test_weighted_cost(self, problem, x0): + def test_weighted_cost(self, problem): cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) @@ -264,19 +264,19 @@ def test_weighted_cost(self, problem, x0): weighted_cost_2 = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) assert weighted_cost_2._different_problems is False assert weighted_cost_2.problem is problem - assert weighted_cost_2(x0) >= 0 + assert weighted_cost_2([0.5]) >= 0 cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost_list=[cost1, cost3], weights=[1, 100]) assert weighted_cost_3._different_problems is True assert weighted_cost_3.problem is None - assert weighted_cost_3(x0) >= 0 + assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( - weighted_cost_2._evaluate(x0), weighted_cost_3._evaluate(x0), atol=1e-5 + weighted_cost_2._evaluate([0.5]), weighted_cost_3._evaluate([0.5]), atol=1e-5 ) weighted_cost_3.parameters = problem.parameters - errors_2, sensitivities_2 = weighted_cost_2._evaluateS1(x0) - errors_3, sensitivities_3 = weighted_cost_3._evaluateS1(x0) + errors_2, sensitivities_2 = weighted_cost_2._evaluateS1([0.5]) + errors_3, sensitivities_3 = weighted_cost_3._evaluateS1([0.5]) np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) From d6708d6e90a1e07998e257849c5ff481eb208b20 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 10:20:01 +0000 Subject: [PATCH 08/35] style: pre-commit fixes --- tests/unit/test_cost.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 3f230fda..f1e1fc4b 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -273,7 +273,9 @@ def test_weighted_cost(self, problem): assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( - weighted_cost_2._evaluate([0.5]), weighted_cost_3._evaluate([0.5]), atol=1e-5 + weighted_cost_2._evaluate([0.5]), + weighted_cost_3._evaluate([0.5]), + atol=1e-5, ) weighted_cost_3.parameters = problem.parameters errors_2, sensitivities_2 = weighted_cost_2._evaluateS1([0.5]) From 37ac6e2e0898b14470c020c30a89e7574c65c54e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 19:03:53 +0000 Subject: [PATCH 09/35] style: pre-commit fixes --- pybop/costs/fitting_costs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 069875c2..c63784e6 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -98,7 +98,9 @@ def _evaluateS1(self, x): ] ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * self._current_sensitivities.T), axis=2) / (e + np.finfo(float).eps) + de = np.mean((r * self._current_sensitivities.T), axis=2) / ( + e + np.finfo(float).eps + ) if self.n_outputs == 1: return e.item(), de.flatten() From 1c540d95514c5469e2c1bb5c5098461259ca4c87 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:42:14 +0100 Subject: [PATCH 10/35] Update spm_weighted_cost.py --- examples/scripts/spm_weighted_cost.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 4bd8380e..14ba213d 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -7,18 +7,20 @@ model = pybop.lithium_ion.SPM(parameter_set=parameter_set) # Fitting parameters -parameters = [ +parameters = pybop.Parameters( pybop.Parameter( "Negative electrode active material volume fraction", prior=pybop.Gaussian(0.68, 0.05), bounds=[0.5, 0.8], + true_value=parameter_set["Negative electrode active material volume fraction"], ), pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.58, 0.05), bounds=[0.4, 0.7], + true_value=parameter_set["Positive electrode active material volume fraction"], ), -] +) # Generate data sigma = 0.001 @@ -42,18 +44,11 @@ weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) for cost in [weighted_cost, cost1, cost2]: - optim = pybop.Optimisation(cost, optimiser=pybop.IRPropMin) - optim.set_max_iterations(60) + optim = pybop.IRPropMin(cost, max_iterations=60) # Run the optimisation x, final_cost = optim.run() - print( - "True parameters:", - [ - parameters[0].true_value, - parameters[1].true_value, - ], - ) + print("True parameters:", parameters.true_value()) print("Estimated parameters:", x) # Plot the timeseries output From 3a9e10a08700b3fe4f3778a4b8d15b80443204a9 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 11 Jun 2024 15:21:58 +0100 Subject: [PATCH 11/35] Update TypeError with test --- pybop/costs/base_cost.py | 4 +++- tests/unit/test_cost.py | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 558a9635..c385a51f 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -179,7 +179,9 @@ def __init__(self, cost_list, weights=None): self._different_problems = False if not isinstance(self.cost_list, list): - raise TypeError("Expected a list of costs.") + raise TypeError( + f"Expected a list of costs. Received {type(self.cost_list)}" + ) if self.weights is None: self.weights = np.ones(len(cost_list)) elif isinstance(self.weights, list): diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index f1e1fc4b..97286255 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -247,6 +247,11 @@ def test_weighted_cost(self, problem): cost_list=[cost1, cost2], weights=np.array([1, 1]) ) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + with pytest.raises( + TypeError, + match=r"Expected a list of costs.", + ): + weighted_cost = pybop.WeightedCost(cost_list="Invalid string") with pytest.raises( TypeError, match="Expected a list or array of weights the same length as cost_list.", From be1c5662ec9d58d314ddcbfe21738d6ff654dc82 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 4 Jul 2024 21:42:35 +0100 Subject: [PATCH 12/35] Update spm_weighted_cost.py --- examples/scripts/spm_weighted_cost.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 14ba213d..55743461 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -52,7 +52,7 @@ print("Estimated parameters:", x) # Plot the timeseries output - pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") + pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) From f608aa722b078ae8ae6ec3c930a96db2ab41a083 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:06:05 +0100 Subject: [PATCH 13/35] Update evaluate and _evaluate --- pybop/costs/base_cost.py | 66 +++++++++++++++++++++++++++--------- pybop/costs/fitting_costs.py | 8 ++++- tests/unit/test_cost.py | 27 ++++++++++----- 3 files changed, 76 insertions(+), 25 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 4fb0fd01..7fae9ee9 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -208,46 +208,80 @@ def __init__(self, cost_list, weights=None): else: super(WeightedCost, self).__init__() self._fixed_problem = False + for cost in self.cost_list: + self.parameters.join(cost.parameters) - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. """ e = np.empty_like(self.cost_list) - if not self._different_problems: - current_prediction = self.problem.evaluate(x) + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction = self.problem.evaluate(inputs) for i, cost in enumerate(self.cost_list): - if self._different_problems: - cost._current_prediction = cost.problem.evaluate(x) + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction = cost.problem.evaluate(inputs) else: - cost._current_prediction = current_prediction - e[i] = cost._evaluate(x, grad) + cost._current_prediction = self._current_prediction + e[i] = cost._evaluate(inputs, grad) return np.dot(e, self.weights) - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ - Compute the cost and its gradient with respect to the parameters. + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. """ e = np.empty_like(self.cost_list) de = np.empty((len(self.parameters), len(self.cost_list))) - if not self._different_problems: - current_prediction, current_sensitivities = self.problem.evaluateS1(x) + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(inputs) + ) for i, cost in enumerate(self.cost_list): - if self._different_problems: + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(x) + cost.problem.evaluateS1(inputs) ) else: cost._current_prediction, cost._current_sensitivities = ( - current_prediction, - current_sensitivities, + self._current_prediction, + self._current_sensitivities, ) - e[i], de[:, i] = cost._evaluateS1(x) + e[i], de[:, i] = cost._evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index d907c06f..08eef239 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -275,7 +275,7 @@ def _evaluate(self, inputs: Inputs, grad=None): ) return -log_likelihood - def evaluateS1(self, inputs: Inputs): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. @@ -347,6 +347,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The maximum a posteriori cost. """ + if self._fixed_problem: + self.likelihood._current_prediction = self._current_prediction + log_likelihood = self.likelihood._evaluate(inputs) log_prior = sum( self.parameters[key].prior.logpdf(value) for key, value in inputs.items() @@ -376,6 +379,9 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ + if self._fixed_problem: + self.likelihood._current_prediction = self._current_prediction + log_likelihood, dl = self.likelihood._evaluateS1(inputs) log_prior = sum( self.parameters[key].prior.logpdf(inputs[key]) for key in inputs.keys() diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 990682eb..cc45deed 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -285,24 +285,35 @@ def test_weighted_cost(self, problem): weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1]) # Test with and without different problems - weighted_cost_2 = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) + weight = 100 + weighted_cost_2 = pybop.WeightedCost( + cost_list=[cost1, cost2], weights=[1, weight] + ) assert weighted_cost_2._different_problems is False + assert weighted_cost_2._fixed_problem is True assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost_2.evaluate([0.6]), + cost1([0.6]) + weight * cost2([0.6]), + atol=1e-5, + ) cost3 = pybop.RootMeanSquaredError(copy(problem)) - weighted_cost_3 = pybop.WeightedCost(cost_list=[cost1, cost3], weights=[1, 100]) + weighted_cost_3 = pybop.WeightedCost( + cost_list=[cost1, cost3], weights=[1, weight] + ) assert weighted_cost_3._different_problems is True + assert weighted_cost_3._fixed_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 - np.testing.assert_allclose( - weighted_cost_2._evaluate([0.5]), - weighted_cost_3._evaluate([0.5]), + weighted_cost_3.evaluate([0.6]), + cost1([0.6]) + weight * cost3([0.6]), atol=1e-5, ) - weighted_cost_3.parameters = problem.parameters - errors_2, sensitivities_2 = weighted_cost_2._evaluateS1([0.5]) - errors_3, sensitivities_3 = weighted_cost_3._evaluateS1([0.5]) + + errors_2, sensitivities_2 = weighted_cost_2.evaluateS1([0.5]) + errors_3, sensitivities_3 = weighted_cost_3.evaluateS1([0.5]) np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) From 68b763d2e92f8dd7d0269d811cd7309d72dc3912 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:23:12 +0100 Subject: [PATCH 14/35] Pass current_sensitivities to MAP --- pybop/costs/fitting_costs.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 08eef239..8634e16f 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -380,7 +380,10 @@ def _evaluateS1(self, inputs: Inputs): If an error occurs during the calculation of the cost or gradient. """ if self._fixed_problem: - self.likelihood._current_prediction = self._current_prediction + ( + self.likelihood._current_prediction, + self.likelihood._current_sensitivities, + ) = self._current_prediction, self._current_sensitivities log_likelihood, dl = self.likelihood._evaluateS1(inputs) log_prior = sum( From 8c2632dfd0b636488d2e6c8eac55002798914e08 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:10:52 +0100 Subject: [PATCH 15/35] Add test_weighted_design_cost --- pybop/costs/base_cost.py | 6 ++++-- tests/unit/test_cost.py | 43 +++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 7fae9ee9..b285ef26 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -198,13 +198,15 @@ def __init__(self, cost_list, weights=None): # Check if all costs depend on the same problem for cost in self.cost_list: - if hasattr(cost, "problem") and ( - not cost._fixed_problem or cost.problem is not self.cost_list[0].problem + if ( + hasattr(cost, "problem") + and cost.problem is not self.cost_list[0].problem ): self._different_problems = True if not self._different_problems: super(WeightedCost, self).__init__(self.cost_list[0].problem) + self._fixed_problem = self.cost_list[0]._fixed_problem else: super(WeightedCost, self).__init__() self._fixed_problem = False diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index cc45deed..0db06b05 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -200,6 +200,12 @@ def test_costs(self, cost): # Test treatment of simulations that terminated early # by variation of the cut-off voltage. + @pytest.fixture + def design_problem(self, model, parameters, experiment, signal): + return pybop.DesignProblem( + model, parameters, experiment, signal=signal, init_soc=0.5 + ) + @pytest.mark.parametrize( "cost_class", [ @@ -209,21 +215,9 @@ def test_costs(self, cost): ], ) @pytest.mark.unit - def test_design_costs( - self, - cost_class, - model, - parameters, - experiment, - signal, - ): - # Construct Problem - problem = pybop.DesignProblem( - model, parameters, experiment, signal=signal, init_soc=0.5 - ) - + def test_design_costs(self, cost_class, design_problem): # Construct Cost - cost = cost_class(problem) + cost = cost_class(design_problem) if cost_class in [pybop.DesignCost]: with pytest.raises(NotImplementedError): @@ -249,11 +243,11 @@ def test_design_costs( cost(["StringInputShouldNotWork"]) # Compute after updating nominal capacity - cost = cost_class(problem, update_capacity=True) + cost = cost_class(design_problem, update_capacity=True) cost([0.4]) @pytest.mark.unit - def test_weighted_cost(self, problem): + def test_weighted_fitting_cost(self, problem): cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) @@ -317,3 +311,20 @@ def test_weighted_cost(self, problem): errors_3, sensitivities_3 = weighted_cost_3.evaluateS1([0.5]) np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) + + @pytest.mark.unit + def test_weighted_design_cost(self, design_problem): + cost1 = pybop.GravimetricEnergyDensity(design_problem) + cost2 = pybop.RootMeanSquaredError(design_problem) + + # Test with and without weights + weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + assert weighted_cost._different_problems is False + assert weighted_cost._fixed_problem is False + assert weighted_cost.problem is design_problem + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) From c189d7a2ccc3ae6ab3c847ecd6c209e797d2b963 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:35:53 +0000 Subject: [PATCH 16/35] style: pre-commit fixes --- pybop/costs/_likelihoods.py | 43 ++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index d05e0e9d..696ae01d 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -45,7 +45,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf # prediction length doesn't match target @@ -53,7 +54,10 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - self._current_prediction[signal]) ** 2.0) + + self._multip + * np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2.0 + ) ) for signal in self.signal ] @@ -66,14 +70,22 @@ def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: Calculates the log-likelihood and gradient. """ if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf, -self._dl likelihood = self._evaluate(inputs) - r = np.asarray([self._target[signal] - self._current_prediction[signal] for signal in self.signal]) - dl = np.sum((np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1) + r = np.asarray( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) + dl = np.sum( + (np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1 + ) return likelihood, dl @@ -193,7 +205,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo return -np.inf if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf # prediction length doesn't match target @@ -202,7 +215,9 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum((self._target[signal] - self._current_prediction[signal]) ** 2.0) + - np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2.0 + ) / (2.0 * sigma**2.0) ) for signal in self.signal @@ -232,14 +247,22 @@ def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: return -np.inf, -self._dl if any( - len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) + for key in self.signal ): return -np.inf, -self._dl likelihood = self._evaluate(inputs) - r = np.asarray([self._target[signal] - self._current_prediction[signal] for signal in self.signal]) - dl = np.sum((np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1) + r = np.asarray( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) + dl = np.sum( + (np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1 + ) dsigma = ( -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) ) / self._dsigma_scale From 2b1ae4c6af54fc719a3c3dabea9d3594663bbb24 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 5 Jul 2024 14:08:51 +0100 Subject: [PATCH 17/35] Add evaluate back into GaussianLogLikelihood --- pybop/costs/_likelihoods.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 696ae01d..8b867d4e 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -204,6 +204,9 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo if np.any(sigma <= 0): return -np.inf + self._current_prediction = self.problem.evaluate( + self.problem.parameters.as_dict() + ) if any( len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal @@ -246,6 +249,9 @@ def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: if np.any(sigma <= 0): return -np.inf, -self._dl + self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( + self.problem.parameters.as_dict() + ) if any( len(self._current_prediction.get(key, [])) != len(self._target.get(key, [])) for key in self.signal From 8c41327a302e6196074be3519f6673edee34ad57 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 11 Jul 2024 11:44:28 +0100 Subject: [PATCH 18/35] Update to super() --- pybop/costs/base_cost.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 322d082f..0bd38192 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -205,10 +205,10 @@ def __init__(self, cost_list, weights=None): self._different_problems = True if not self._different_problems: - super(WeightedCost, self).__init__(self.cost_list[0].problem) + super().__init__(self.cost_list[0].problem) self._fixed_problem = self.cost_list[0]._fixed_problem else: - super(WeightedCost, self).__init__() + super().__init__() self._fixed_problem = False for cost in self.cost_list: self.parameters.join(cost.parameters) From 80a803e4251a05a7d75239ea8d4f68e4814985f7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 16:42:55 +0000 Subject: [PATCH 19/35] style: pre-commit fixes --- pybop/costs/_likelihoods.py | 2 +- pybop/costs/base_cost.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 30870415..43dc326a 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -324,7 +324,7 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if self._fixed_problem: self.likelihood._current_prediction = self._current_prediction log_likelihood = self.likelihood._evaluate(inputs) - + posterior = log_likelihood + log_prior return posterior diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 4226ed78..eb2038b6 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,6 +1,7 @@ -import numpy as np from typing import Union +import numpy as np + from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters From f18552349c6b5447d48601bf1c64505ab157e450 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:48:53 +0100 Subject: [PATCH 20/35] Update prediction to self._current_prediction --- pybop/costs/fitting_costs.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 2ff46586..93c50a75 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -239,7 +239,10 @@ def _evaluate(self, inputs: Inputs, grad=None): e = np.asarray( [ - np.sum(np.abs(prediction[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) ** (1 / self.p) for signal in self.signal ] @@ -344,7 +347,10 @@ def _evaluate(self, inputs: Inputs, grad=None): e = np.asarray( [ - np.sum(np.abs(prediction[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) for signal in self.signal ] ) From 23b77e895d9ee7aa23413ac1bad457192b4450f5 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:52:01 +0100 Subject: [PATCH 21/35] Update y to self._current_prediction --- pybop/costs/fitting_costs.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 93c50a75..18cc752d 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -273,16 +273,24 @@ def _evaluateS1(self, inputs): if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.asarray( [ - np.sum(np.abs(y[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) ** (1 / self.p) for signal in self.signal ] ) de = np.sum( - np.sum(r ** (self.p - 1) * dy.T, axis=2) + np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2) / (e ** (self.p - 1) + np.finfo(float).eps), axis=1, ) @@ -380,9 +388,16 @@ def _evaluateS1(self, inputs): if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sum(np.sum(np.abs(r) ** self.p)) - de = self.p * np.sum(np.sum(r ** (self.p - 1) * dy.T, axis=2), axis=1) + de = self.p * np.sum( + np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2), axis=1 + ) return e, de From 5157a8d8c96390e202877bb81af984ac50dda78d Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 16 Jul 2024 17:10:20 +0100 Subject: [PATCH 22/35] Update cost_list to args --- examples/scripts/spm_weighted_cost.py | 2 +- pybop/costs/base_cost.py | 48 ++++++++++++++------------- tests/unit/test_cost.py | 32 +++++++----------- 3 files changed, 38 insertions(+), 44 deletions(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 55743461..74c43a33 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -41,7 +41,7 @@ problem = pybop.FittingProblem(model, parameters, dataset) cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) -weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 100]) +weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100]) for cost in [weighted_cost, cost1, cost2]: optim = pybop.IRPropMin(cost, max_iterations=60) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index eb2038b6..9a359053 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -212,45 +212,47 @@ class WeightedCost(BaseCost): a single weighted cost function. Inherits all parameters and attributes from ``BaseCost``. + + Additional Attributes + --------------------- + costs : List[pybop.BaseCost] + A list of PyBOP cost objects. """ - def __init__(self, cost_list, weights=None): - self.cost_list = cost_list + def __init__(self, *args, weights=None): + self.costs = [] + for cost in args: + if not isinstance(cost, BaseCost): + raise TypeError(f"Received {type(cost)} instead of cost object.") + self.costs.append(cost) self.weights = weights self._different_problems = False - if not isinstance(self.cost_list, list): - raise TypeError( - f"Expected a list of costs. Received {type(self.cost_list)}" - ) if self.weights is None: - self.weights = np.ones(len(cost_list)) + self.weights = np.ones(len(self.costs)) elif isinstance(self.weights, list): self.weights = np.array(self.weights) if not isinstance(self.weights, np.ndarray): raise TypeError( - "Expected a list or array of weights the same length as cost_list." + "Expected a list or array of weights the same length as costs." ) - if not len(self.weights) == len(self.cost_list): + if not len(self.weights) == len(self.costs): raise ValueError( - "Expected a list or array of weights the same length as cost_list." + "Expected a list or array of weights the same length as costs." ) # Check if all costs depend on the same problem - for cost in self.cost_list: - if ( - hasattr(cost, "problem") - and cost.problem is not self.cost_list[0].problem - ): + for cost in self.costs: + if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: self._different_problems = True if not self._different_problems: - super().__init__(self.cost_list[0].problem) - self._fixed_problem = self.cost_list[0]._fixed_problem + super().__init__(self.costs[0].problem) + self._fixed_problem = self.costs[0]._fixed_problem else: super().__init__() self._fixed_problem = False - for cost in self.cost_list: + for cost in self.costs: self.parameters.join(cost.parameters) def _evaluate(self, inputs: Inputs, grad=None): @@ -270,14 +272,14 @@ def _evaluate(self, inputs: Inputs, grad=None): float The weighted cost value. """ - e = np.empty_like(self.cost_list) + e = np.empty_like(self.costs) if not self._fixed_problem and self._different_problems: self.parameters.update(values=list(inputs.values())) elif not self._fixed_problem: self._current_prediction = self.problem.evaluate(inputs) - for i, cost in enumerate(self.cost_list): + for i, cost in enumerate(self.costs): if not self._fixed_problem and self._different_problems: inputs = cost.parameters.as_dict() cost._current_prediction = cost.problem.evaluate(inputs) @@ -302,8 +304,8 @@ def _evaluateS1(self, inputs: Inputs): A tuple containing the cost and the gradient. The cost is a float, and the gradient is an array-like of the same length as `x`. """ - e = np.empty_like(self.cost_list) - de = np.empty((len(self.parameters), len(self.cost_list))) + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) if not self._fixed_problem and self._different_problems: self.parameters.update(values=list(inputs.values())) @@ -312,7 +314,7 @@ def _evaluateS1(self, inputs: Inputs): self.problem.evaluateS1(inputs) ) - for i, cost in enumerate(self.cost_list): + for i, cost in enumerate(self.costs): if not self._fixed_problem and self._different_problems: inputs = cost.parameters.as_dict() cost._current_prediction, cost._current_sensitivities = ( diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 2c9c2d5e..ac6e5877 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -304,37 +304,31 @@ def test_weighted_fitting_cost(self, problem): cost2 = pybop.RootMeanSquaredError(problem) # Test with and without weights - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + weighted_cost = pybop.WeightedCost(cost1, cost2) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1, 1]) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) - weighted_cost = pybop.WeightedCost( - cost_list=[cost1, cost2], weights=np.array([1, 1]) - ) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=np.array([1, 1])) np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) with pytest.raises( TypeError, - match=r"Expected a list of costs.", + match=r"Received instead of cost object.", ): - weighted_cost = pybop.WeightedCost(cost_list="Invalid string") + weighted_cost = pybop.WeightedCost("Invalid string") with pytest.raises( TypeError, - match="Expected a list or array of weights the same length as cost_list.", + match="Expected a list or array of weights the same length as costs.", ): - weighted_cost = pybop.WeightedCost( - cost_list=[cost1, cost2], weights="Invalid string" - ) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights="Invalid string") with pytest.raises( ValueError, - match="Expected a list or array of weights the same length as cost_list.", + match="Expected a list or array of weights the same length as costs.", ): - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2], weights=[1]) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) # Test with and without different problems weight = 100 - weighted_cost_2 = pybop.WeightedCost( - cost_list=[cost1, cost2], weights=[1, weight] - ) + weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) assert weighted_cost_2._different_problems is False assert weighted_cost_2._fixed_problem is True assert weighted_cost_2.problem is problem @@ -346,9 +340,7 @@ def test_weighted_fitting_cost(self, problem): ) cost3 = pybop.RootMeanSquaredError(copy(problem)) - weighted_cost_3 = pybop.WeightedCost( - cost_list=[cost1, cost3], weights=[1, weight] - ) + weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) assert weighted_cost_3._different_problems is True assert weighted_cost_3._fixed_problem is False assert weighted_cost_3.problem is None @@ -370,7 +362,7 @@ def test_weighted_design_cost(self, design_problem): cost2 = pybop.RootMeanSquaredError(design_problem) # Test with and without weights - weighted_cost = pybop.WeightedCost(cost_list=[cost1, cost2]) + weighted_cost = pybop.WeightedCost(cost1, cost2) assert weighted_cost._different_problems is False assert weighted_cost._fixed_problem is False assert weighted_cost.problem is design_problem From 3e220b16ccc670ce071f1f4019be2ab1de0a594e Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 16 Jul 2024 18:04:58 +0100 Subject: [PATCH 23/35] Remove repeated line --- pybop/costs/_likelihoods.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 43dc326a..1f96e2fb 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -241,7 +241,6 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( self.problem.parameters.as_dict() ) - y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) if not self.verify_prediction(self._current_prediction): return -np.inf, -self._de * np.ones(self.n_parameters) From 95600be95bd5994630023efbb2d6581c9a3ea9d2 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 16 Jul 2024 18:05:15 +0100 Subject: [PATCH 24/35] Add descriptions --- pybop/costs/base_cost.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 9a359053..b3cf7ae1 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Optional, Union import numpy as np @@ -24,9 +24,15 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. + + Additional Attributes + --------------------- + _fixed_problem : bool + If True, the problem does not need to be rebuilt before the cost is + calculated (default: False). """ - def __init__(self, problem=None): + def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem self._fixed_problem = False @@ -215,11 +221,16 @@ class WeightedCost(BaseCost): Additional Attributes --------------------- - costs : List[pybop.BaseCost] + costs : list[pybop.BaseCost] A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). """ - def __init__(self, *args, weights=None): + def __init__(self, *args, weights: Optional[list[float]] = None): self.costs = [] for cost in args: if not isinstance(cost, BaseCost): From 8c7dddb76d87c23432a3fd1bd45e1f2b91015325 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 17 Jul 2024 17:49:17 +0100 Subject: [PATCH 25/35] refactor: Integrates DesignProblems into weighted_cost structure, adds error checking for different problem class, general refactor for improved clarity --- examples/scripts/spm_weighted_cost.py | 27 +++-- pybop/costs/_likelihoods.py | 55 +++------- pybop/costs/base_cost.py | 146 +++++++++++++------------- pybop/costs/design_costs.py | 114 ++++++++++---------- pybop/costs/fitting_costs.py | 73 ++++--------- pybop/optimisers/base_optimiser.py | 11 +- tests/unit/test_cost.py | 46 +++++--- 7 files changed, 228 insertions(+), 244 deletions(-) diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 74c43a33..43c5cd00 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -24,24 +24,37 @@ # Generate data sigma = 0.001 -t_eval = np.arange(0, 900, 3) -values = model.predict(t_eval=t_eval) -corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) +init_soc = 0.5 +experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (3 second period)", + "Charge at 0.5C for 3 minutes (3 second period)", + ), + ] + * 2 +) +values = model.predict(experiment=experiment, init_soc=init_soc) + + +def noise(sigma): + return np.random.normal(0, sigma, len(values["Voltage [V]"].data)) + # Form dataset dataset = pybop.Dataset( { - "Time [s]": t_eval, + "Time [s]": values["Time [s]"].data, "Current function [A]": values["Current [A]"].data, - "Voltage [V]": corrupt_values, + "Voltage [V]": values["Voltage [V]"].data + noise(sigma), } ) # Generate problem, cost function, and optimisation class -problem = pybop.FittingProblem(model, parameters, dataset) +problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) -weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100]) +weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[0.1, 1]) for cost in [weighted_cost, cost1, cost2]: optim = pybop.IRPropMin(cost, max_iterations=60) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 1f96e2fb..0751b433 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -43,7 +43,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo """ Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return -np.inf e = np.asarray( @@ -51,9 +51,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._offset + self._multip - * np.sum( - (self._target[signal] - self._current_prediction[signal]) ** 2.0 - ) + * np.sum((self._target[signal] - self.y[signal]) ** 2.0) ) for signal in self.signal ] @@ -65,20 +63,15 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: """ Calculates the log-likelihood and gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) r = np.asarray( - [ - self._target[signal] - self._current_prediction[signal] - for signal in self.signal - ] - ) - dl = np.sum( - (np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1 + [self._target[signal] - self.y[signal] for signal in self.signal] ) + dl = np.sum((np.sum((r * self.dy.T), axis=2) / self.sigma2), axis=1) return likelihood, dl @@ -123,7 +116,7 @@ def __init__( super().__init__(problem) self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._fixed_problem = False # keep problem evaluation within _evaluate + self._predict = False # keep problem evaluation within _evaluate self.sigma = Parameters() self._add_sigma_parameters(sigma0) @@ -196,10 +189,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo if np.any(sigma <= 0): return -np.inf - self._current_prediction = self.problem.evaluate( - self.problem.parameters.as_dict() - ) - if not self.verify_prediction(self._current_prediction): + self.y = self.problem.evaluate(self.problem.parameters.as_dict()) + if not self.verify_prediction(self.y): return -np.inf e = np.asarray( @@ -207,9 +198,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum( - (self._target[signal] - self._current_prediction[signal]) ** 2.0 - ) + - np.sum((self._target[signal] - self.y[signal]) ** 2.0) / (2.0 * sigma**2.0) ) for signal in self.signal @@ -238,23 +227,16 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if np.any(sigma <= 0): return -np.inf, -self._de * np.ones(self.n_parameters) - self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( - self.problem.parameters.as_dict() - ) - if not self.verify_prediction(self._current_prediction): + self.y, self.dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + if not self.verify_prediction(self.y): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) r = np.asarray( - [ - self._target[signal] - self._current_prediction[signal] - for signal in self.signal - ] - ) - dl = np.sum( - (np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1 + [self._target[signal] - self.y[signal] for signal in self.signal] ) + dl = np.sum((np.sum((r * self.dy.T), axis=2) / (sigma**2.0)), axis=1) dsigma = ( -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) ) / self._dsigma_scale @@ -320,9 +302,7 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf - if self._fixed_problem: - self.likelihood._current_prediction = self._current_prediction - log_likelihood = self.likelihood._evaluate(inputs) + log_likelihood = self.likelihood.evaluate(inputs) posterior = log_likelihood + log_prior return posterior @@ -354,12 +334,7 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) - if self._fixed_problem: - ( - self.likelihood._current_prediction, - self.likelihood._current_sensitivities, - ) = self._current_prediction, self._current_sensitivities - log_likelihood, dl = self.likelihood._evaluateS1(inputs) + log_likelihood, dl = self.likelihood.evaluateS1(inputs) # Compute a finite difference approximation of the gradient of the log prior delta = self.parameters.initial_value() * self.gradient_step diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index b3cf7ae1..974227f9 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,8 +1,10 @@ +import copy +import warnings from typing import Optional, Union import numpy as np -from pybop import BaseProblem +from pybop import BaseProblem, DesignProblem from pybop.parameters.parameter import Inputs, Parameters @@ -24,25 +26,25 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. - - Additional Attributes - --------------------- - _fixed_problem : bool - If True, the problem does not need to be rebuilt before the cost is - calculated (default: False). + _predict : bool + If False, the problem will be evaluated outside the self.evaluate() method + before the cost is calculated (default: False). """ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem - self._fixed_problem = False + self.verbose = False + self._predict = False + self.y = None + self.dy = None self.set_fail_gradient() if isinstance(self.problem, BaseProblem): self._target = self.problem._target self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self._fixed_problem = True + self._predict = True @property def n_parameters(self): @@ -79,8 +81,8 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: - if self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) + if self._predict: + self.y = self.problem.evaluate(inputs) return self._evaluate(inputs, grad) @@ -139,10 +141,8 @@ def evaluateS1(self, inputs: Union[Inputs, list]): inputs = self.parameters.verify(inputs) try: - if self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) + if self._predict: + self.y, self.dy = self.problem.evaluateS1(inputs) return self._evaluateS1(inputs) @@ -219,52 +219,59 @@ class WeightedCost(BaseCost): Inherits all parameters and attributes from ``BaseCost``. - Additional Attributes + Attributes --------------------- costs : list[pybop.BaseCost] A list of PyBOP cost objects. weights : list[float] A list of values with which to weight the cost values. - _different_problems : bool + _has_different_problems : bool If True, the problem for each cost is evaluated independently during each evaluation of the cost (default: False). """ - def __init__(self, *args, weights: Optional[list[float]] = None): - self.costs = [] - for cost in args: - if not isinstance(cost, BaseCost): - raise TypeError(f"Received {type(cost)} instead of cost object.") - self.costs.append(cost) - self.weights = weights - self._different_problems = False - - if self.weights is None: + def __init__(self, *costs, weights: Optional[list[float]] = None): + if not all(isinstance(cost, BaseCost) for cost in costs): + raise TypeError("All costs must be instances of BaseCost.") + self.costs = [copy.copy(cost) for cost in costs] + self._has_different_problems = False + self.minimising = not any( + isinstance(cost.problem, DesignProblem) for cost in self.costs + ) + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") + + # Check if weights are provided + if weights is not None: + try: + self.weights = np.asarray(weights, dtype=float) + except ValueError: + raise ValueError("Weights must be numeric values.") from None + + if self.weights.size != len(self.costs): + raise ValueError("Number of weights must match number of costs.") + else: self.weights = np.ones(len(self.costs)) - elif isinstance(self.weights, list): - self.weights = np.array(self.weights) - if not isinstance(self.weights, np.ndarray): - raise TypeError( - "Expected a list or array of weights the same length as costs." - ) - if not len(self.weights) == len(self.costs): - raise ValueError( - "Expected a list or array of weights the same length as costs." - ) # Check if all costs depend on the same problem - for cost in self.costs: - if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: - self._different_problems = True + self._has_different_problems = any( + hasattr(cost, "problem") and cost.problem is not self.costs[0].problem + for cost in self.costs[1:] + ) - if not self._different_problems: - super().__init__(self.costs[0].problem) - self._fixed_problem = self.costs[0]._fixed_problem - else: + if self._has_different_problems: super().__init__() - self._fixed_problem = False for cost in self.costs: self.parameters.join(cost.parameters) + else: + super().__init__(self.costs[0].problem) + self._predict = False + for cost in self.costs: + cost._predict = False + + # Catch UserWarnings as exceptions + if not self.minimising: + warnings.filterwarnings("error", category=UserWarning) def _evaluate(self, inputs: Inputs, grad=None): """ @@ -285,18 +292,22 @@ def _evaluate(self, inputs: Inputs, grad=None): """ e = np.empty_like(self.costs) - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + try: + with warnings.catch_warnings(): + self.y = self.problem.evaluate(inputs) + except UserWarning as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return -np.inf for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction = cost.problem.evaluate(inputs) - else: - cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) + if not self._has_different_problems: + cost.y = self.y + e[i] = cost.evaluate(inputs) return np.dot(e, self.weights) @@ -318,25 +329,16 @@ def _evaluateS1(self, inputs: Inputs): e = np.empty_like(self.costs) de = np.empty((len(self.parameters), len(self.costs))) - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + self.y, self.dy = self.problem.evaluateS1(inputs) for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(inputs) - ) - else: - cost._current_prediction, cost._current_sensitivities = ( - self._current_prediction, - self._current_sensitivities, - ) - e[i], de[:, i] = cost._evaluateS1(inputs) + if not self._has_different_problems: + cost.y, cost.dy = (self.y, self.dy) + e[i], de[:, i] = cost.evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 738dfe61..06649cb6 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -1,4 +1,5 @@ import warnings +from typing import Union import numpy as np @@ -65,25 +66,56 @@ def update_simulation_data(self, inputs: Inputs): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, inputs: Inputs, grad=None): + def evaluate(self, inputs: Union[Inputs, list], grad=None): """ - Computes the value of the cost function. - - This method must be implemented by subclasses. + Call the evaluate function for a given set of parameters. Parameters ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. + inputs : Inputs or array-like + The parameters for which to compute the cost and gradient. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The calculated cost function value. Raises ------ - NotImplementedError - If the method has not been implemented by the subclass. + ValueError + If an error occurs during the calculation of the cost. """ - raise NotImplementedError + inputs = self.parameters.verify(inputs) + + try: + with warnings.catch_warnings(): + # Convert UserWarning to an exception + warnings.filterwarnings("error", category=UserWarning) + if self._predict: + if self.update_capacity: + self.problem.model.approximate_capacity(inputs) + self.y = self.problem.evaluate(inputs) + + return self._evaluate(inputs, grad) + + # Catch NotImplementedError and raise it + except NotImplementedError as e: + raise e + + # Catch infeasible solutions and return infinity + except UserWarning as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return -np.inf + + # Catch any other exception and return infinity + except Exception as e: + if self.verbose: + print(f"An error occurred during the evaluation: {e}") + return -np.inf class GravimetricEnergyDensity(DesignCost): @@ -98,7 +130,6 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -116,31 +147,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The gravimetric energy density or -infinity in case of infeasible parameters. """ - try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - solution = self.problem.evaluate(inputs) - - voltage, current = solution["Voltage [V]"], solution["Current [A]"] - energy_density = np.trapz(voltage * current, dx=self.dt) / ( - 3600 * self.problem.model.cell_mass(self.parameter_set) - ) + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] + energy_density = np.trapz(voltage * current, dx=self.dt) / ( + 3600 * self.problem.model.cell_mass(self.parameter_set) + ) - return energy_density - - # Catch infeasible solutions and return infinity - except UserWarning as e: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - # Catch any other exception and return infinity - except Exception as e: - print(f"An error occurred during the evaluation: {e}") - return -np.inf + return energy_density class VolumetricEnergyDensity(DesignCost): @@ -155,7 +167,6 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -173,28 +184,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The volumetric energy density or -infinity in case of infeasible parameters. """ - try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - solution = self.problem.evaluate(inputs) + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] + energy_density = np.trapz(voltage * current, dx=self.dt) / ( + 3600 * self.problem.model.cell_volume(self.parameter_set) + ) - voltage, current = solution["Voltage [V]"], solution["Current [A]"] - energy_density = np.trapz(voltage * current, dx=self.dt) / ( - 3600 * self.problem.model.cell_volume(self.parameter_set) - ) - - return energy_density - - # Catch infeasible solutions and return infinity - except UserWarning as e: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - # Catch any other exception and return infinity - except Exception as e: - print(f"An error occurred during the evaluation: {e}") - return -np.inf + return energy_density diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 18cc752d..cfcc32ef 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -38,16 +38,12 @@ def _evaluate(self, inputs: Inputs, grad=None): The root mean square error. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sqrt( - np.mean( - (self._current_prediction[signal] - self._target[signal]) ** 2 - ) - ) + np.sqrt(np.mean((self.y[signal] - self._target[signal]) ** 2)) for signal in self.signal ] ) @@ -74,19 +70,14 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * self._current_sensitivities.T), axis=2) / ( - e + np.finfo(float).eps - ) + de = np.mean((r * self.dy.T), axis=2) / (e + np.finfo(float).eps) if self.n_outputs == 1: return e.item(), de.flatten() @@ -132,12 +123,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Squared Error. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum((self._current_prediction[signal] - self._target[signal]) ** 2) + np.sum((self.y[signal] - self._target[signal]) ** 2) for signal in self.signal ] ) @@ -164,17 +155,14 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sum(np.sum(r**2, axis=0), axis=0) - de = 2 * np.sum(np.sum((r * self._current_sensitivities.T), axis=2), axis=1) + de = 2 * np.sum(np.sum((r * self.dy.T), axis=2), axis=1) return e, de @@ -234,15 +222,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Minkowski cost. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) ** (1 / self.p) for signal in self.signal ] @@ -270,27 +255,21 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) ** (1 / self.p) for signal in self.signal ] ) de = np.sum( - np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2) + np.sum(r ** (self.p - 1) * self.dy.T, axis=2) / (e ** (self.p - 1) + np.finfo(float).eps), axis=1, ) @@ -350,15 +329,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Power cost. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) for signal in self.signal ] ) @@ -385,19 +361,14 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sum(np.sum(np.abs(r) ** self.p)) - de = self.p * np.sum( - np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2), axis=1 - ) + de = self.p * np.sum(np.sum(r ** (self.p - 1) * self.dy.T, axis=2), axis=1) return e, de @@ -416,7 +387,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - self._fixed_problem = False # keep problem evaluation within _evaluate + self._predict = False # evaluate problem within cost._evaluate() def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 2b00b234..b1bbb0ab 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -3,7 +3,14 @@ import numpy as np -from pybop import BaseCost, BaseLikelihood, DesignCost, Parameter, Parameters +from pybop import ( + BaseCost, + BaseLikelihood, + DesignCost, + Parameter, + Parameters, + WeightedCost, +) class BaseOptimiser: @@ -67,6 +74,8 @@ def __init__( self.cost = cost self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() + if isinstance(cost, WeightedCost): + self.minimising = cost.minimising if isinstance(cost, (BaseLikelihood, DesignCost)): self.minimising = False diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index ac6e5877..b44376b4 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -312,25 +312,25 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) with pytest.raises( TypeError, - match=r"Received instead of cost object.", + match="All costs must be instances of BaseCost.", ): - weighted_cost = pybop.WeightedCost("Invalid string") + weighted_cost = pybop.WeightedCost(cost1.problem) with pytest.raises( - TypeError, - match="Expected a list or array of weights the same length as costs.", + ValueError, + match="Weights must be numeric values.", ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights="Invalid string") with pytest.raises( ValueError, - match="Expected a list or array of weights the same length as costs.", + match="Number of weights must match number of costs.", ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) # Test with and without different problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._different_problems is False - assert weighted_cost_2._fixed_problem is True + assert weighted_cost_2._has_different_problems is False + assert weighted_cost_2._predict is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -341,8 +341,8 @@ def test_weighted_fitting_cost(self, problem): cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._different_problems is True - assert weighted_cost_3._fixed_problem is False + assert weighted_cost_3._has_different_problems is True + assert weighted_cost_3._predict is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -356,15 +356,27 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) + # Test MAP explicitly + cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) + weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) + assert weighted_cost_4._has_different_problems is False + assert weighted_cost_4._predict is False + assert weighted_cost_4([0.5]) <= 0 + np.testing.assert_allclose( + weighted_cost_4.evaluate([0.6]), + cost1([0.6]) + weight * cost4([0.6]), + atol=1e-5, + ) + @pytest.mark.unit def test_weighted_design_cost(self, design_problem): cost1 = pybop.GravimetricEnergyDensity(design_problem) - cost2 = pybop.RootMeanSquaredError(design_problem) + cost2 = pybop.VolumetricEnergyDensity(design_problem) # Test with and without weights weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._different_problems is False - assert weighted_cost._fixed_problem is False + assert weighted_cost._has_different_problems is False + assert weighted_cost._predict is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( @@ -372,3 +384,13 @@ def test_weighted_design_cost(self, design_problem): cost1([0.6]) + cost2([0.6]), atol=1e-5, ) + + @pytest.mark.unit + def test_mixed_problem_classes(self, problem, design_problem): + cost1 = pybop.SumSquaredError(problem) + cost2 = pybop.GravimetricEnergyDensity(design_problem) + with pytest.raises( + TypeError, + match="All problems must be of the same class type.", + ): + pybop.WeightedCost(cost1, cost2) From 132f83c0e378cf14343b7a5ee52b0baf581a4a0a Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 18 Jul 2024 13:34:33 +0100 Subject: [PATCH 26/35] feat: adds support for single DesignProblem optimisation, fixes for minimising, adds integration tests --- examples/scripts/spme_max_energy.py | 10 +- pybop/__init__.py | 3 +- pybop/costs/_weighted_cost.py | 137 ++++++++++++++++++ pybop/costs/base_cost.py | 141 +------------------ pybop/costs/design_costs.py | 31 ++--- pybop/problems/base_problem.py | 1 + pybop/problems/design_problem.py | 51 ++++--- pybop/problems/fitting_problem.py | 2 +- tests/integration/test_weighted_cost.py | 176 ++++++++++++++++++++++++ 9 files changed, 371 insertions(+), 181 deletions(-) create mode 100644 pybop/costs/_weighted_cost.py create mode 100644 tests/integration/test_weighted_cost.py diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index f5b7c827..db521b68 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -45,13 +45,15 @@ model, parameters, experiment, signal=signal, init_soc=init_soc ) -# Generate cost function and optimisation class: -cost = pybop.GravimetricEnergyDensity(problem) +# Generate multiple cost functions and combine them. +cost1 = pybop.GravimetricEnergyDensity(problem, update_capacity=True) +cost2 = pybop.VolumetricEnergyDensity(problem, update_capacity=True) +cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + +# Run optimisation optim = pybop.PSO( cost, verbose=True, allow_infeasible_solutions=False, max_iterations=15 ) - -# Run optimisation x, final_cost = optim.run() print("Estimated parameters:", x) print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") diff --git a/pybop/__init__.py b/pybop/__init__.py index 4a99ebc4..922a6480 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -81,7 +81,7 @@ # # Cost function class # -from .costs.base_cost import BaseCost, WeightedCost +from .costs.base_cost import BaseCost from .costs.fitting_costs import ( RootMeanSquaredError, SumSquaredError, @@ -100,6 +100,7 @@ GaussianLogLikelihoodKnownSigma, MAP, ) +from .costs._weighted_cost import WeightedCost # # Optimiser class diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py new file mode 100644 index 00000000..9555ece8 --- /dev/null +++ b/pybop/costs/_weighted_cost.py @@ -0,0 +1,137 @@ +import copy +from typing import Optional + +import numpy as np + +from pybop import BaseCost, BaseLikelihood, DesignCost +from pybop.parameters.parameter import Inputs + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + + Attributes + --------------------- + costs : list[pybop.BaseCost] + A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _has_different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). + """ + + def __init__(self, *costs, weights: Optional[list[float]] = None): + if not all(isinstance(cost, BaseCost) for cost in costs): + raise TypeError("All costs must be instances of BaseCost.") + self.costs = [copy.copy(cost) for cost in costs] + self._has_different_problems = False + self.minimising = not any( + isinstance(cost, (BaseLikelihood, DesignCost)) for cost in self.costs + ) + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") + + # Check if weights are provided + if weights is not None: + try: + self.weights = np.asarray(weights, dtype=float) + except ValueError: + raise ValueError("Weights must be numeric values.") from None + + if self.weights.size != len(self.costs): + raise ValueError("Number of weights must match number of costs.") + else: + self.weights = np.ones(len(self.costs)) + + # Check if all costs depend on the same problem + self._has_different_problems = any( + hasattr(cost, "problem") and cost.problem is not self.costs[0].problem + for cost in self.costs[1:] + ) + + if self._has_different_problems: + super().__init__() + for cost in self.costs: + self.parameters.join(cost.parameters) + else: + super().__init__(self.costs[0].problem) + self._predict = False + for cost in self.costs: + cost._predict = False + + # Check if any cost function requires capacity update + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + def _evaluate(self, inputs: Inputs, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. + """ + e = np.empty_like(self.costs) + + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) + + for i, cost in enumerate(self.costs): + if not self._has_different_problems: + cost.y = self.y + e[i] = cost.evaluate(inputs) + + return np.dot(e, self.weights) + + def _evaluateS1(self, inputs: Inputs): + """ + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + """ + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) + + if not self._predict: + if self._has_different_problems: + self.parameters.update(values=list(inputs.values())) + else: + self.y, self.dy = self.problem.evaluateS1(inputs) + + for i, cost in enumerate(self.costs): + if not self._has_different_problems: + cost.y, cost.dy = (self.y, self.dy) + e[i], de[:, i] = cost.evaluateS1(inputs) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 974227f9..3ba818f5 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,10 +1,6 @@ -import copy -import warnings from typing import Optional, Union -import numpy as np - -from pybop import BaseProblem, DesignProblem +from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters @@ -36,6 +32,7 @@ def __init__(self, problem: Optional[BaseProblem] = None): self.problem = problem self.verbose = False self._predict = False + self.update_capacity = False self.y = None self.dy = None self.set_fail_gradient() @@ -210,137 +207,3 @@ def verify_prediction(self, y): return False return True - - -class WeightedCost(BaseCost): - """ - A subclass for constructing a linear combination of cost functions as - a single weighted cost function. - - Inherits all parameters and attributes from ``BaseCost``. - - Attributes - --------------------- - costs : list[pybop.BaseCost] - A list of PyBOP cost objects. - weights : list[float] - A list of values with which to weight the cost values. - _has_different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). - """ - - def __init__(self, *costs, weights: Optional[list[float]] = None): - if not all(isinstance(cost, BaseCost) for cost in costs): - raise TypeError("All costs must be instances of BaseCost.") - self.costs = [copy.copy(cost) for cost in costs] - self._has_different_problems = False - self.minimising = not any( - isinstance(cost.problem, DesignProblem) for cost in self.costs - ) - if len(set(type(cost.problem) for cost in self.costs)) > 1: - raise TypeError("All problems must be of the same class type.") - - # Check if weights are provided - if weights is not None: - try: - self.weights = np.asarray(weights, dtype=float) - except ValueError: - raise ValueError("Weights must be numeric values.") from None - - if self.weights.size != len(self.costs): - raise ValueError("Number of weights must match number of costs.") - else: - self.weights = np.ones(len(self.costs)) - - # Check if all costs depend on the same problem - self._has_different_problems = any( - hasattr(cost, "problem") and cost.problem is not self.costs[0].problem - for cost in self.costs[1:] - ) - - if self._has_different_problems: - super().__init__() - for cost in self.costs: - self.parameters.join(cost.parameters) - else: - super().__init__(self.costs[0].problem) - self._predict = False - for cost in self.costs: - cost._predict = False - - # Catch UserWarnings as exceptions - if not self.minimising: - warnings.filterwarnings("error", category=UserWarning) - - def _evaluate(self, inputs: Inputs, grad=None): - """ - Calculate the weighted cost for a given set of parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The weighted cost value. - """ - e = np.empty_like(self.costs) - - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - try: - with warnings.catch_warnings(): - self.y = self.problem.evaluate(inputs) - except UserWarning as e: - if self.verbose: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - for i, cost in enumerate(self.costs): - if not self._has_different_problems: - cost.y = self.y - e[i] = cost.evaluate(inputs) - - return np.dot(e, self.weights) - - def _evaluateS1(self, inputs: Inputs): - """ - Compute the weighted cost and its gradient with respect to the parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - """ - e = np.empty_like(self.costs) - de = np.empty((len(self.parameters), len(self.costs))) - - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - self.y, self.dy = self.problem.evaluateS1(inputs) - - for i, cost in enumerate(self.costs): - if not self._has_different_problems: - cost.y, cost.dy = (self.y, self.dy) - e[i], de[:, i] = cost.evaluateS1(inputs) - - e = np.dot(e, self.weights) - de = np.dot(de, self.weights) - - return e, de diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 06649cb6..290f1efb 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -91,32 +91,17 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - if self._predict: - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - self.y = self.problem.evaluate(inputs) + if self._predict: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) - return self._evaluate(inputs, grad) + return self._evaluate(inputs, grad) # Catch NotImplementedError and raise it except NotImplementedError as e: raise e - # Catch infeasible solutions and return infinity - except UserWarning as e: - if self.verbose: - print(f"Ignoring this sample due to: {e}") - return -np.inf - - # Catch any other exception and return infinity - except Exception as e: - if self.verbose: - print(f"An error occurred during the evaluation: {e}") - return -np.inf - class GravimetricEnergyDensity(DesignCost): """ @@ -147,6 +132,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The gravimetric energy density or -infinity in case of infeasible parameters. """ + if not any(np.isfinite(self.y[signal][0]) for signal in self.signal): + return -np.inf + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( 3600 * self.problem.model.cell_mass(self.parameter_set) @@ -184,6 +172,9 @@ def _evaluate(self, inputs: Inputs, grad=None): float The volumetric energy density or -infinity in case of infeasible parameters. """ + if not any(np.isfinite(self.y[signal][0]) for signal in self.signal): + return -np.inf + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( 3600 * self.problem.model.cell_volume(self.parameter_set) diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index ee36a6bb..cb3bc597 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -64,6 +64,7 @@ def __init__( self.n_outputs = len(self.signal) self._time_data = None self._target = None + self.verbose = False if isinstance(model, BaseModel): self.additional_variables = additional_variables diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index 30e2d9c3..74b8ae71 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np from pybop import BaseProblem @@ -45,7 +47,10 @@ def __init__( signal = ["Voltage [V]"] additional_variables.extend(["Time [s]", "Current [A]"]) additional_variables = list(set(additional_variables)) - + self.warning_patterns = [ + "Ah is greater than", + "Non-physical point encountered", + ] super().__init__( parameters, model, @@ -75,7 +80,7 @@ def __init__( self._target = {key: sol[key] for key in self.signal} self._dataset = None - def evaluate(self, inputs: Inputs): + def evaluate(self, inputs: Inputs, update_capacity=False): """ Evaluate the model with the given parameters and return the signal. @@ -90,19 +95,33 @@ def evaluate(self, inputs: Inputs): The model output y(t) simulated with inputs. """ inputs = self.parameters.verify(inputs) - - sol = self._model.predict( - inputs=inputs, - experiment=self.experiment, - init_soc=self.init_soc, - ) - - if sol == [np.inf]: - return sol - - else: - predictions = {} - for signal in self.signal + self.additional_variables: - predictions[signal] = sol[signal].data + if update_capacity: + self.model.approximate_capacity(inputs) + + try: + with warnings.catch_warnings(): + for pattern in self.warning_patterns: + warnings.filterwarnings( + "error", category=UserWarning, message=pattern + ) + + sol = self._model.predict( + inputs=inputs, + experiment=self.experiment, + init_soc=self.init_soc, + ) + + # Catch infeasible solutions and return infinity + except (UserWarning, Exception) as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return { + signal: np.asarray(np.ones(2) * -np.inf) + for signal in [*self.signal, *self.additional_variables] + } + + predictions = {} + for signal in self.signal + self.additional_variables: + predictions[signal] = sol[signal].data return predictions diff --git a/pybop/problems/fitting_problem.py b/pybop/problems/fitting_problem.py index 58d59e9e..0ad636ce 100644 --- a/pybop/problems/fitting_problem.py +++ b/pybop/problems/fitting_problem.py @@ -79,7 +79,7 @@ def __init__( init_soc=self.init_soc, ) - def evaluate(self, inputs: Inputs): + def evaluate(self, inputs: Inputs, update_capacity=False): """ Evaluate the model with the given parameters and return the signal. diff --git a/tests/integration/test_weighted_cost.py b/tests/integration/test_weighted_cost.py new file mode 100644 index 00000000..a533bd98 --- /dev/null +++ b/tests/integration/test_weighted_cost.py @@ -0,0 +1,176 @@ +import numpy as np +import pytest + +import pybop + + +class TestWeightedCost: + """ + A class to test the weighted cost function. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.sigma0 = 0.002 + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( + loc=0.0, scale=0.05, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + return pybop.lithium_ion.SPM(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.75), + bounds=[0.375, 0.75], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.75), + # no bounds + ), + ) + + @pytest.fixture(params=[0.4]) + def init_soc(self, request): + return request.param + + @pytest.fixture( + params=[ + ( + pybop.GaussianLogLikelihoodKnownSigma, + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.MAP, + ) + ] + ) + def cost_class(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def weighted_fitting_cost(self, model, parameters, cost_class, init_soc): + # Form dataset + solution = self.get_data(model, parameters, self.ground_truth, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(self.sigma0, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) + costs = [] + for cost in cost_class: + if issubclass(cost, pybop.MAP): + costs.append( + cost( + problem, + pybop.GaussianLogLikelihoodKnownSigma, + sigma0=self.sigma0, + ) + ) + elif issubclass(cost, pybop.BaseLikelihood): + costs.append(cost(problem, sigma0=self.sigma0)) + else: + costs.append(cost(problem)) + + return pybop.WeightedCost(*costs, weights=[0.1, 1, 0.5, 0.6]) + + @pytest.mark.integration + def test_fitting_costs(self, weighted_fitting_cost): + x0 = weighted_fitting_cost.parameters.initial_value() + optim = pybop.CuckooSearch( + cost=weighted_fitting_cost, + sigma0=0.03, + max_iterations=250, + max_unchanged_iterations=35, + ) + + initial_cost = optim.cost(optim.parameters.initial_value()) + x, final_cost = optim.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + + @pytest.fixture( + params=[ + ( + pybop.GravimetricEnergyDensity, + pybop.VolumetricEnergyDensity, + ) + ] + ) + def design_cost(self, request): + return request.param + + @pytest.fixture + def weighted_design_cost(self, model, design_cost): + init_soc = 1.0 + parameters = pybop.Parameters( + pybop.Parameter( + "Positive electrode thickness [m]", + prior=pybop.Gaussian(5e-05, 0.1e-05), + bounds=[50e-06, 10e-05], + ), + pybop.Parameter( + "Positive particle radius [m]", + prior=pybop.Gaussian(5.22e-06, 0.1e-06), + bounds=[2e-06, 9e-06], + ), + ) + experiment = pybop.Experiment( + ["Discharge at 1C until 3.3 V (5 seconds period)"], + ) + + problem = pybop.DesignProblem( + model, parameters, experiment=experiment, init_soc=init_soc + ) + costs = [] + for cost in design_cost: + costs.append(cost(problem, update_capacity=True)) + + return pybop.WeightedCost(*costs, weights=[100, 1]) + + @pytest.mark.integration + def test_design_costs(self, weighted_design_cost): + optim = pybop.CuckooSearch( + weighted_design_cost, + max_iterations=15, + allow_infeasible_solutions=False, + ) + + initial_cost = optim.cost(optim.parameters.initial_value()) + _, final_cost = optim.run() + + # Assertions + assert initial_cost < final_cost + + def get_data(self, model, parameters, x, init_soc): + model.classify_and_update_parameters(parameters) + experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (4 second period)", + "Charge at 0.5C for 3 minutes (4 second period)", + ), + ] + ) + sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) + return sim From d739642faee723cb872384753fa733877922fc32 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 18 Jul 2024 13:43:11 +0100 Subject: [PATCH 27/35] refactor: move WeightedCost into seperate file --- pybop/__init__.py | 3 +- pybop/costs/_weighted_cost.py | 138 ++++++++++++++++++++++++++++++++++ pybop/costs/base_cost.py | 134 --------------------------------- 3 files changed, 140 insertions(+), 135 deletions(-) create mode 100644 pybop/costs/_weighted_cost.py diff --git a/pybop/__init__.py b/pybop/__init__.py index 4a99ebc4..922a6480 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -81,7 +81,7 @@ # # Cost function class # -from .costs.base_cost import BaseCost, WeightedCost +from .costs.base_cost import BaseCost from .costs.fitting_costs import ( RootMeanSquaredError, SumSquaredError, @@ -100,6 +100,7 @@ GaussianLogLikelihoodKnownSigma, MAP, ) +from .costs._weighted_cost import WeightedCost # # Optimiser class diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py new file mode 100644 index 00000000..effa5a51 --- /dev/null +++ b/pybop/costs/_weighted_cost.py @@ -0,0 +1,138 @@ +from typing import Optional + +import numpy as np + +from pybop import BaseCost +from pybop.parameters.parameter import Inputs + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + + Additional Attributes + --------------------- + costs : list[pybop.BaseCost] + A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). + """ + + def __init__(self, *args, weights: Optional[list[float]] = None): + self.costs = [] + for cost in args: + if not isinstance(cost, BaseCost): + raise TypeError(f"Received {type(cost)} instead of cost object.") + self.costs.append(cost) + self.weights = weights + self._different_problems = False + + if self.weights is None: + self.weights = np.ones(len(self.costs)) + elif isinstance(self.weights, list): + self.weights = np.array(self.weights) + if not isinstance(self.weights, np.ndarray): + raise TypeError( + "Expected a list or array of weights the same length as costs." + ) + if not len(self.weights) == len(self.costs): + raise ValueError( + "Expected a list or array of weights the same length as costs." + ) + + # Check if all costs depend on the same problem + for cost in self.costs: + if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: + self._different_problems = True + + if not self._different_problems: + super().__init__(self.costs[0].problem) + self._fixed_problem = self.costs[0]._fixed_problem + else: + super().__init__() + self._fixed_problem = False + for cost in self.costs: + self.parameters.join(cost.parameters) + + def _evaluate(self, inputs: Inputs, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. + """ + e = np.empty_like(self.costs) + + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction = self.problem.evaluate(inputs) + + for i, cost in enumerate(self.costs): + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction = cost.problem.evaluate(inputs) + else: + cost._current_prediction = self._current_prediction + e[i] = cost._evaluate(inputs, grad) + + return np.dot(e, self.weights) + + def _evaluateS1(self, inputs: Inputs): + """ + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + """ + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) + + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(inputs) + ) + + for i, cost in enumerate(self.costs): + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction, cost._current_sensitivities = ( + cost.problem.evaluateS1(inputs) + ) + else: + cost._current_prediction, cost._current_sensitivities = ( + self._current_prediction, + self._current_sensitivities, + ) + e[i], de[:, i] = cost._evaluateS1(inputs) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index b3cf7ae1..eedbbc2c 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,7 +1,5 @@ from typing import Optional, Union -import numpy as np - from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters @@ -210,135 +208,3 @@ def verify_prediction(self, y): return False return True - - -class WeightedCost(BaseCost): - """ - A subclass for constructing a linear combination of cost functions as - a single weighted cost function. - - Inherits all parameters and attributes from ``BaseCost``. - - Additional Attributes - --------------------- - costs : list[pybop.BaseCost] - A list of PyBOP cost objects. - weights : list[float] - A list of values with which to weight the cost values. - _different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). - """ - - def __init__(self, *args, weights: Optional[list[float]] = None): - self.costs = [] - for cost in args: - if not isinstance(cost, BaseCost): - raise TypeError(f"Received {type(cost)} instead of cost object.") - self.costs.append(cost) - self.weights = weights - self._different_problems = False - - if self.weights is None: - self.weights = np.ones(len(self.costs)) - elif isinstance(self.weights, list): - self.weights = np.array(self.weights) - if not isinstance(self.weights, np.ndarray): - raise TypeError( - "Expected a list or array of weights the same length as costs." - ) - if not len(self.weights) == len(self.costs): - raise ValueError( - "Expected a list or array of weights the same length as costs." - ) - - # Check if all costs depend on the same problem - for cost in self.costs: - if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: - self._different_problems = True - - if not self._different_problems: - super().__init__(self.costs[0].problem) - self._fixed_problem = self.costs[0]._fixed_problem - else: - super().__init__() - self._fixed_problem = False - for cost in self.costs: - self.parameters.join(cost.parameters) - - def _evaluate(self, inputs: Inputs, grad=None): - """ - Calculate the weighted cost for a given set of parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The weighted cost value. - """ - e = np.empty_like(self.costs) - - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) - - for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction = cost.problem.evaluate(inputs) - else: - cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) - - return np.dot(e, self.weights) - - def _evaluateS1(self, inputs: Inputs): - """ - Compute the weighted cost and its gradient with respect to the parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - """ - e = np.empty_like(self.costs) - de = np.empty((len(self.parameters), len(self.costs))) - - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) - - for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(inputs) - ) - else: - cost._current_prediction, cost._current_sensitivities = ( - self._current_prediction, - self._current_sensitivities, - ) - e[i], de[:, i] = cost._evaluateS1(inputs) - - e = np.dot(e, self.weights) - de = np.dot(de, self.weights) - - return e, de From df0e0178e49e5942a5d8f8d2b746e9a3a7f63bb4 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 22 Jul 2024 09:04:29 +0100 Subject: [PATCH 28/35] fix: add MAP catches for weightedcost implementation, update .gitignore --- .gitignore | 3 +++ pybop/costs/_likelihoods.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/.gitignore b/.gitignore index 3c3bb708..2bafcca6 100644 --- a/.gitignore +++ b/.gitignore @@ -314,3 +314,6 @@ $RECYCLE.BIN/ # Airspeed Velocity *.asv/ results/ + +# Pycharm +*.idea/ diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 0751b433..638b4bf9 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -302,6 +302,8 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf + if not self._predict: + self.likelihood.y = self.y log_likelihood = self.likelihood.evaluate(inputs) posterior = log_likelihood + log_prior @@ -334,6 +336,8 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) + if not self._predict: + self.likelihood.y, self.likelihood.dy = (self.y, self.dy) log_likelihood, dl = self.likelihood.evaluateS1(inputs) # Compute a finite difference approximation of the gradient of the log prior From 589a5f6bcff5181a1d113a250eeb01172e202bd7 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 23 Jul 2024 16:58:33 +0100 Subject: [PATCH 29/35] Clarify evaluation sequence --- examples/standalone/problem.py | 2 +- pybop/costs/_likelihoods.py | 9 ++-- pybop/costs/_weighted_cost.py | 80 ++++++++++++++++++---------------- pybop/costs/base_cost.py | 18 ++++---- pybop/costs/design_costs.py | 39 ----------------- pybop/costs/fitting_costs.py | 2 +- tests/unit/test_cost.py | 25 ++++++----- 7 files changed, 73 insertions(+), 102 deletions(-) diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py index 18bf1f7d..5fd33e0f 100644 --- a/examples/standalone/problem.py +++ b/examples/standalone/problem.py @@ -42,7 +42,7 @@ def __init__( ) self._target = {signal: self._dataset[signal] for signal in self.signal} - def evaluate(self, inputs): + def evaluate(self, inputs, **kwargs): """ Evaluate the model with the given parameters and return the signal. diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 638b4bf9..728e8095 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -116,7 +116,7 @@ def __init__( super().__init__(problem) self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._predict = False # keep problem evaluation within _evaluate + self._has_separable_problem = False self.sigma = Parameters() self._add_sigma_parameters(sigma0) @@ -278,6 +278,9 @@ def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3): ): raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") + self.parameters = self.likelihood.parameters + self._has_separable_problem = self.likelihood._has_separable_problem + def _evaluate(self, inputs: Inputs, grad=None) -> float: """ Calculate the maximum a posteriori cost for a given set of parameters. @@ -302,7 +305,7 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf - if not self._predict: + if self._has_separable_problem: self.likelihood.y = self.y log_likelihood = self.likelihood.evaluate(inputs) @@ -336,7 +339,7 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) - if not self._predict: + if self._has_separable_problem: self.likelihood.y, self.likelihood.dy = (self.y, self.dy) log_likelihood, dl = self.likelihood.evaluateS1(inputs) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index 9555ece8..a3f85053 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -1,4 +1,3 @@ -import copy from typing import Optional import numpy as np @@ -20,21 +19,20 @@ class WeightedCost(BaseCost): A list of PyBOP cost objects. weights : list[float] A list of values with which to weight the cost values. - _has_different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). + _has_identical_problems : bool + If True, the shared problem will be evaluated once and saved before the + self._evaluate() method of each cost is called (default: False). """ def __init__(self, *costs, weights: Optional[list[float]] = None): if not all(isinstance(cost, BaseCost) for cost in costs): raise TypeError("All costs must be instances of BaseCost.") - self.costs = [copy.copy(cost) for cost in costs] - self._has_different_problems = False + self.costs = [cost for cost in costs] + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") self.minimising = not any( isinstance(cost, (BaseLikelihood, DesignCost)) for cost in self.costs ) - if len(set(type(cost.problem) for cost in self.costs)) > 1: - raise TypeError("All problems must be of the same class type.") # Check if weights are provided if weights is not None: @@ -49,24 +47,26 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): self.weights = np.ones(len(self.costs)) # Check if all costs depend on the same problem - self._has_different_problems = any( - hasattr(cost, "problem") and cost.problem is not self.costs[0].problem - for cost in self.costs[1:] + self._has_identical_problems = all( + cost._has_separable_problem and cost.problem is self.costs[0].problem + for cost in self.costs ) - if self._has_different_problems: + # Check if any cost function requires capacity update + self.update_capacity = False + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + if self._has_identical_problems: + super().__init__(self.costs[0].problem) + else: super().__init__() for cost in self.costs: self.parameters.join(cost.parameters) - else: - super().__init__(self.costs[0].problem) - self._predict = False - for cost in self.costs: - cost._predict = False + cost.update_capacity = self.update_capacity - # Check if any cost function requires capacity update - if any(cost.update_capacity for cost in self.costs): - self.update_capacity = True + # Weighted costs do not use this functionality + self._has_separable_problem = False def _evaluate(self, inputs: Inputs, grad=None): """ @@ -85,20 +85,22 @@ def _evaluate(self, inputs: Inputs, grad=None): float The weighted cost value. """ - e = np.empty_like(self.costs) + self.parameters.update(values=list(inputs.values())) - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - self.y = self.problem.evaluate( - inputs, update_capacity=self.update_capacity - ) + if self._has_identical_problems: + self.y = self.problem.evaluate(inputs, update_capacity=self.update_capacity) + + e = np.empty_like(self.costs) for i, cost in enumerate(self.costs): - if not self._has_different_problems: + inputs = cost.parameters.as_dict() + if self._has_identical_problems: cost.y = self.y - e[i] = cost.evaluate(inputs) + elif cost._has_separable_problem: + cost.y = cost.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) + e[i] = cost._evaluate(inputs) return np.dot(e, self.weights) @@ -117,19 +119,21 @@ def _evaluateS1(self, inputs: Inputs): A tuple containing the cost and the gradient. The cost is a float, and the gradient is an array-like of the same length as `x`. """ + self.parameters.update(values=list(inputs.values())) + + if self._has_identical_problems: + self.y, self.dy = self.problem.evaluateS1(inputs) + e = np.empty_like(self.costs) de = np.empty((len(self.parameters), len(self.costs))) - if not self._predict: - if self._has_different_problems: - self.parameters.update(values=list(inputs.values())) - else: - self.y, self.dy = self.problem.evaluateS1(inputs) - for i, cost in enumerate(self.costs): - if not self._has_different_problems: + inputs = cost.parameters.as_dict() + if self._has_identical_problems: cost.y, cost.dy = (self.y, self.dy) - e[i], de[:, i] = cost.evaluateS1(inputs) + elif cost._has_separable_problem: + cost.y, cost.dy = cost.problem.evaluateS1(inputs) + e[i], de[:, i] = cost._evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 3ba818f5..a03b4a26 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -22,16 +22,16 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. - _predict : bool - If False, the problem will be evaluated outside the self.evaluate() method - before the cost is calculated (default: False). + _has_separable_problem : bool + If True, the problem is independent from the cost function and will be + evaluated in advance of the call to self._evaluate() (default: False). """ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem self.verbose = False - self._predict = False + self._has_separable_problem = False self.update_capacity = False self.y = None self.dy = None @@ -41,7 +41,7 @@ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self._predict = True + self._has_separable_problem = True @property def n_parameters(self): @@ -78,8 +78,10 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: - if self._predict: - self.y = self.problem.evaluate(inputs) + if self._has_separable_problem: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) return self._evaluate(inputs, grad) @@ -138,7 +140,7 @@ def evaluateS1(self, inputs: Union[Inputs, list]): inputs = self.parameters.verify(inputs) try: - if self._predict: + if self._has_separable_problem: self.y, self.dy = self.problem.evaluateS1(inputs) return self._evaluateS1(inputs) diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index a01af603..d0bc8fc8 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -1,5 +1,4 @@ import warnings -from typing import Union import numpy as np @@ -66,42 +65,6 @@ def update_simulation_data(self, inputs: Inputs): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def evaluate(self, inputs: Union[Inputs, list], grad=None): - """ - Call the evaluate function for a given set of parameters. - - Parameters - ---------- - inputs : Inputs or array-like - The parameters for which to compute the cost and gradient. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The calculated cost function value. - - Raises - ------ - ValueError - If an error occurs during the calculation of the cost. - """ - inputs = self.parameters.verify(inputs) - - try: - if self._predict: - self.y = self.problem.evaluate( - inputs, update_capacity=self.update_capacity - ) - - return self._evaluate(inputs, grad) - - # Catch NotImplementedError and raise it - except NotImplementedError as e: - raise e - class GravimetricEnergyDensity(DesignCost): """ @@ -115,7 +78,6 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -156,7 +118,6 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index cfcc32ef..15922d8c 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -387,7 +387,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - self._predict = False # evaluate problem within cost._evaluate() + self._has_separable_problem = False def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index a104b21e..db9679c3 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -329,8 +329,8 @@ def test_weighted_fitting_cost(self, problem): # Test with and without different problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._has_different_problems is False - assert weighted_cost_2._predict is False + assert weighted_cost_2._has_identical_problems is True + assert weighted_cost_2._has_separable_problem is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -341,8 +341,8 @@ def test_weighted_fitting_cost(self, problem): cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._has_different_problems is True - assert weighted_cost_3._predict is False + assert weighted_cost_3._has_identical_problems is False + assert weighted_cost_3._has_separable_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -357,14 +357,15 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) # Test MAP explicitly - cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) + cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihood) weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) - assert weighted_cost_4._has_different_problems is False - assert weighted_cost_4._predict is False - assert weighted_cost_4([0.5]) <= 0 + assert weighted_cost_4._has_identical_problems is False + assert weighted_cost_4._has_separable_problem is False + sigma = 0.05 + assert weighted_cost_4([0.5, sigma]) <= 0 np.testing.assert_allclose( - weighted_cost_4.evaluate([0.6]), - cost1([0.6]) + weight * cost4([0.6]), + weighted_cost_4.evaluate([0.6, sigma]), + cost1([0.6, sigma]) + weight * cost4([0.6, sigma]), atol=1e-5, ) @@ -375,8 +376,8 @@ def test_weighted_design_cost(self, design_problem): # Test with and without weights weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._has_different_problems is False - assert weighted_cost._predict is False + assert weighted_cost._has_identical_problems is True + assert weighted_cost._has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( From deb4ebe551f0a98a96cf652e59524e598705f8c2 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Tue, 23 Jul 2024 17:47:30 +0100 Subject: [PATCH 30/35] Update spme_max_energy output --- examples/scripts/spme_max_energy.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index db521b68..8542b4ad 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -9,12 +9,6 @@ # electrode widths, particle radii, volume fractions and # separator width. -# NOTE: This script can be easily adjusted to consider the volumetric -# (instead of gravimetric) energy density by changing the line which -# defines the cost and changing the output to: -# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3") -# print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3") - # Define parameter set and model parameter_set = pybop.ParameterSet.pybamm("Chen2020", formation_concentrations=True) model = pybop.lithium_ion.SPMe(parameter_set=parameter_set) @@ -56,8 +50,10 @@ ) x, final_cost = optim.run() print("Estimated parameters:", x) -print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") -print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1") +print(f"Initial gravimetric energy density: {cost1(optim.x0):.2f} Wh.kg-1") +print(f"Optimised gravimetric energy density: {cost1(x):.2f} Wh.kg-1") +print(f"Initial volumetric energy density: {cost2(optim.x0):.2f} Wh.m-3") +print(f"Optimised volumetric energy density: {cost2(x):.2f} Wh.m-3") # Plot the timeseries output if cost.update_capacity: From f2bcfecf4b645189c1134c7b129acaf6db129759 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 16:45:47 +0100 Subject: [PATCH 31/35] fix: catch update_capacity overwrite on super(), add UserWarning for global update_capacity() usage, add tests for side-effects on underlying objects --- pybop/costs/_weighted_cost.py | 19 ++++++---- tests/integration/test_weighted_cost.py | 35 ++++++++++++------- tests/unit/test_cost.py | 46 +++++++++++++++++++++++-- 3 files changed, 79 insertions(+), 21 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index a3f85053..f9088b7f 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -1,3 +1,4 @@ +import warnings from typing import Optional import numpy as np @@ -52,18 +53,24 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): for cost in self.costs ) - # Check if any cost function requires capacity update - self.update_capacity = False - if any(cost.update_capacity for cost in self.costs): - self.update_capacity = True - if self._has_identical_problems: super().__init__(self.costs[0].problem) else: super().__init__() for cost in self.costs: self.parameters.join(cost.parameters) - cost.update_capacity = self.update_capacity + + # Check if any cost function requires capacity update + self.update_capacity = False + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + warnings.warn( + "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" + f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", + UserWarning, + stacklevel=2, + ) # Weighted costs do not use this functionality self._has_separable_problem = False diff --git a/tests/integration/test_weighted_cost.py b/tests/integration/test_weighted_cost.py index a533bd98..e964e970 100644 --- a/tests/integration/test_weighted_cost.py +++ b/tests/integration/test_weighted_cost.py @@ -126,41 +126,50 @@ def weighted_design_cost(self, model, design_cost): parameters = pybop.Parameters( pybop.Parameter( "Positive electrode thickness [m]", - prior=pybop.Gaussian(5e-05, 0.1e-05), - bounds=[50e-06, 10e-05], + prior=pybop.Gaussian(5e-05, 5e-06), + bounds=[2e-06, 10e-05], ), pybop.Parameter( - "Positive particle radius [m]", - prior=pybop.Gaussian(5.22e-06, 0.1e-06), - bounds=[2e-06, 9e-06], + "Negative electrode thickness [m]", + prior=pybop.Gaussian(5e-05, 5e-06), + bounds=[2e-06, 10e-05], ), ) experiment = pybop.Experiment( - ["Discharge at 1C until 3.3 V (5 seconds period)"], + ["Discharge at 1C until 3.5 V (5 seconds period)"], ) problem = pybop.DesignProblem( model, parameters, experiment=experiment, init_soc=init_soc ) + costs_update_capacity = [] costs = [] for cost in design_cost: - costs.append(cost(problem, update_capacity=True)) + costs_update_capacity.append(cost(problem, update_capacity=True)) + costs.append(cost(problem)) - return pybop.WeightedCost(*costs, weights=[100, 1]) + return [ + pybop.WeightedCost(*costs, weights=[1.0, 0.1]), + pybop.WeightedCost(*costs_update_capacity, weights=[0.1, 1.0]), + ] @pytest.mark.integration - def test_design_costs(self, weighted_design_cost): + @pytest.mark.parametrize("cost_index", [0, 1]) + def test_design_costs(self, weighted_design_cost, cost_index): + cost = weighted_design_cost[cost_index] optim = pybop.CuckooSearch( - weighted_design_cost, + cost, max_iterations=15, allow_infeasible_solutions=False, ) - - initial_cost = optim.cost(optim.parameters.initial_value()) - _, final_cost = optim.run() + initial_values = optim.parameters.initial_value() + initial_cost = optim.cost(initial_values) + x, final_cost = optim.run() # Assertions assert initial_cost < final_cost + for i, _ in enumerate(x): + assert x[i] > initial_values[i] def get_data(self, model, parameters, x, init_soc): model.classify_and_update_parameters(parameters) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index db9679c3..562094d0 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -326,7 +326,7 @@ def test_weighted_fitting_cost(self, problem): ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) - # Test with and without different problems + # Test with identical problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) assert weighted_cost_2._has_identical_problems is True @@ -339,6 +339,7 @@ def test_weighted_fitting_cost(self, problem): atol=1e-5, ) + # Test with different problems cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) assert weighted_cost_3._has_identical_problems is False @@ -374,7 +375,7 @@ def test_weighted_design_cost(self, design_problem): cost1 = pybop.GravimetricEnergyDensity(design_problem) cost2 = pybop.VolumetricEnergyDensity(design_problem) - # Test with and without weights + # Test DesignCosts with identical problems weighted_cost = pybop.WeightedCost(cost1, cost2) assert weighted_cost._has_identical_problems is True assert weighted_cost._has_separable_problem is False @@ -386,6 +387,47 @@ def test_weighted_design_cost(self, design_problem): atol=1e-5, ) + # Test DesignCosts with different problems + cost3 = pybop.VolumetricEnergyDensity(copy(design_problem)) + weighted_cost = pybop.WeightedCost(cost1, cost3) + assert weighted_cost._has_identical_problems is False + assert weighted_cost._has_separable_problem is False + for i, _ in enumerate(weighted_cost.costs): + assert isinstance(weighted_cost.costs[i].problem, pybop.DesignProblem) + + # Ensure attributes are set correctly and not modified via side-effects + assert weighted_cost.update_capacity is False + assert weighted_cost.costs[0].update_capacity is False + assert weighted_cost.costs[1].update_capacity is False + + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) + + @pytest.mark.unit + def test_weighted_design_cost_with_update_capacity(self, design_problem): + cost1 = pybop.GravimetricEnergyDensity(design_problem, update_capacity=True) + cost2 = pybop.VolumetricEnergyDensity(design_problem) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + + # Ensure attributes are set correctly and not modified via side-effects + assert weighted_cost.update_capacity is True + assert weighted_cost.costs[0].update_capacity is True + assert weighted_cost.costs[1].update_capacity is False + + assert weighted_cost._has_identical_problems is True + assert weighted_cost._has_separable_problem is False + assert weighted_cost.problem is design_problem + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) + @pytest.mark.unit def test_mixed_problem_classes(self, problem, design_problem): cost1 = pybop.SumSquaredError(problem) From 1899c0b9fc4841a85f5bf91981c9d2677b864711 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 31 Jul 2024 16:52:37 +0100 Subject: [PATCH 32/35] fix: update warning location --- pybop/costs/_weighted_cost.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index f9088b7f..32d48404 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -65,12 +65,12 @@ def __init__(self, *costs, weights: Optional[list[float]] = None): if any(cost.update_capacity for cost in self.costs): self.update_capacity = True - warnings.warn( - "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" - f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", - UserWarning, - stacklevel=2, - ) + warnings.warn( + "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" + f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", + UserWarning, + stacklevel=2, + ) # Weighted costs do not use this functionality self._has_separable_problem = False From 6f461643971af4946b4241218dee53899efeed4d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 07:51:20 +0100 Subject: [PATCH 33/35] feat: adds @property for new WeightedCost attributes, adds to docstring --- pybop/costs/_weighted_cost.py | 8 ++++++++ pybop/costs/base_cost.py | 4 ++++ tests/unit/test_cost.py | 24 ++++++++++++------------ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index 32d48404..ca5ae461 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -23,6 +23,10 @@ class WeightedCost(BaseCost): _has_identical_problems : bool If True, the shared problem will be evaluated once and saved before the self._evaluate() method of each cost is called (default: False). + _has_seperable_problem: bool + If True, the shared problem is seperable from the cost function and + will be evaluated for each problem before the cost evaluation is + called (default: False). """ def __init__(self, *costs, weights: Optional[list[float]] = None): @@ -146,3 +150,7 @@ def _evaluateS1(self, inputs: Inputs): de = np.dot(de, self.weights) return e, de + + @property + def has_identical_problems(self): + return self._has_identical_problems diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index a03b4a26..813e27be 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -47,6 +47,10 @@ def __init__(self, problem: Optional[BaseProblem] = None): def n_parameters(self): return len(self.parameters) + @property + def has_separable_problem(self): + return self._has_separable_problem + def __call__(self, inputs: Union[Inputs, list], grad=None): """ Call the evaluate function for a given set of parameters. diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 562094d0..5a8bc630 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -329,8 +329,8 @@ def test_weighted_fitting_cost(self, problem): # Test with identical problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._has_identical_problems is True - assert weighted_cost_2._has_separable_problem is False + assert weighted_cost_2.has_identical_problems is True + assert weighted_cost_2.has_separable_problem is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -342,8 +342,8 @@ def test_weighted_fitting_cost(self, problem): # Test with different problems cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._has_identical_problems is False - assert weighted_cost_3._has_separable_problem is False + assert weighted_cost_3.has_identical_problems is False + assert weighted_cost_3.has_separable_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -360,8 +360,8 @@ def test_weighted_fitting_cost(self, problem): # Test MAP explicitly cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihood) weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) - assert weighted_cost_4._has_identical_problems is False - assert weighted_cost_4._has_separable_problem is False + assert weighted_cost_4.has_identical_problems is False + assert weighted_cost_4.has_separable_problem is False sigma = 0.05 assert weighted_cost_4([0.5, sigma]) <= 0 np.testing.assert_allclose( @@ -377,8 +377,8 @@ def test_weighted_design_cost(self, design_problem): # Test DesignCosts with identical problems weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._has_identical_problems is True - assert weighted_cost._has_separable_problem is False + assert weighted_cost.has_identical_problems is True + assert weighted_cost.has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( @@ -390,8 +390,8 @@ def test_weighted_design_cost(self, design_problem): # Test DesignCosts with different problems cost3 = pybop.VolumetricEnergyDensity(copy(design_problem)) weighted_cost = pybop.WeightedCost(cost1, cost3) - assert weighted_cost._has_identical_problems is False - assert weighted_cost._has_separable_problem is False + assert weighted_cost.has_identical_problems is False + assert weighted_cost.has_separable_problem is False for i, _ in enumerate(weighted_cost.costs): assert isinstance(weighted_cost.costs[i].problem, pybop.DesignProblem) @@ -418,8 +418,8 @@ def test_weighted_design_cost_with_update_capacity(self, design_problem): assert weighted_cost.costs[0].update_capacity is True assert weighted_cost.costs[1].update_capacity is False - assert weighted_cost._has_identical_problems is True - assert weighted_cost._has_separable_problem is False + assert weighted_cost.has_identical_problems is True + assert weighted_cost.has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( From 0e503ae6210fea5878466e451e1fedf8505cef3a Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 10:28:25 +0100 Subject: [PATCH 34/35] Suggestions from review --- pybop/costs/_weighted_cost.py | 5 +++-- pybop/costs/base_cost.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index ca5ae461..a435c002 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -23,10 +23,11 @@ class WeightedCost(BaseCost): _has_identical_problems : bool If True, the shared problem will be evaluated once and saved before the self._evaluate() method of each cost is called (default: False). - _has_seperable_problem: bool + _has_separable_problem: bool If True, the shared problem is seperable from the cost function and will be evaluated for each problem before the cost evaluation is - called (default: False). + called (default: False). This attribute is used for sub-cost objects; + however, the top-level WeightedCost attribute is not used (i.e. == False). """ def __init__(self, *costs, weights: Optional[list[float]] = None): diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 813e27be..e853ceba 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -23,7 +23,7 @@ class BaseCost: n_outputs : int The number of outputs in the model. _has_separable_problem : bool - If True, the problem is independent from the cost function and will be + If True, the problem is separable from the cost function and will be evaluated in advance of the call to self._evaluate() (default: False). """ From 1bedcd722d5ed6476ae3ee6a25807e3815a7b6bc Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 1 Aug 2024 17:13:52 +0100 Subject: [PATCH 35/35] Add changlog entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 567f88c8..f2ba3f2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#413](https://github.com/pybop-team/PyBOP/pull/413) - Adds `DesignCost` functionality to `WeightedCost` class with additional tests. - [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests. - [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks. - [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script.