From 3a3f8283643fa64574f05a5145887cbccf33e408 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:41:02 +0100 Subject: [PATCH 01/19] New SolverMetrics dataclass (linopy/constants.py) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 7 optional fields: solver_name, solve_time, objective_value, best_bound, mip_gap, node_count, iteration_count — all default to None - Custom __repr__ that only shows non-None fields - Added as metrics field on Result (backward-compatible — defaults to None) Solver-specific metric extraction (linopy/solvers.py) - Base Solver class: _extract_metrics() returns solver_name + objective_value - Gurobi: extracts Runtime, ObjBound, MIPGap, NodeCount, IterCount - HiGHS: extracts getRunTime(), mip_node_count, simplex_iteration_count, mip_gap, mip_objective_bound - SCIP: extracts getSolvingTime(), getDualbound(), getGap(), getNNodes(), getNLPIterations() - CBC: uses already-parsed mip_gap and runtime from log output - All other solvers (GLPK, Cplex, Xpress, Mosek, COPT, MindOpt, cuPDLPx): use base class default - All 12 return Result(...) sites updated to pass metrics - Every attribute access is wrapped in try/except so extraction never breaks the solve Model integration (linopy/model.py) - _solver_metrics slot, initialized to None - solver_metrics property - Stored from result.metrics after solve() - Set to basic metrics in _mock_solve() - Reset to None in reset_solution() Package export (linopy/__init__.py) - SolverMetrics added to imports and __all__ Tests (test/test_solver_metrics.py) - 13 tests covering: dataclass defaults, partial values, repr, Result backward compat, Model integration (before/after solve, reset), parametrized solver-specific tests for both direct and file-IO solvers --- linopy/__init__.py | 3 +- linopy/constants.py | 33 +++++++- linopy/model.py | 14 ++++ linopy/solvers.py | 148 +++++++++++++++++++++++++++++--- test/test_solver_metrics.py | 162 ++++++++++++++++++++++++++++++++++++ 5 files changed, 346 insertions(+), 14 deletions(-) create mode 100644 test/test_solver_metrics.py diff --git a/linopy/__init__.py b/linopy/__init__.py index 3efc297a..bccd5c26 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -14,7 +14,7 @@ import linopy.monkey_patch_xarray # noqa: F401 from linopy.common import align from linopy.config import options -from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL +from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL, SolverMetrics from linopy.constraints import Constraint, Constraints from linopy.expressions import LinearExpression, QuadraticExpression, merge from linopy.io import read_netcdf @@ -34,6 +34,7 @@ "OetcHandler", "QuadraticExpression", "RemoteHandler", + "SolverMetrics", "Variable", "Variables", "available_solvers", diff --git a/linopy/constants.py b/linopy/constants.py index 021a9a10..1726e04e 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -211,6 +211,32 @@ class Solution: objective: float = field(default=np.nan) +@dataclass +class SolverMetrics: + """ + Unified solver performance metrics. + + All fields default to None and are populated by solvers on a best-effort + basis. Fields that a particular solver cannot provide remain None. + """ + + solver_name: str | None = None + solve_time: float | None = None + objective_value: float | None = None + best_bound: float | None = None + mip_gap: float | None = None + node_count: float | None = None + iteration_count: float | None = None + + def __repr__(self) -> str: + fields = [] + for f in self.__dataclass_fields__: + val = getattr(self, f) + if val is not None: + fields.append(f"{f}={val!r}") + return f"SolverMetrics({', '.join(fields)})" + + @dataclass class Result: """ @@ -220,6 +246,7 @@ class Result: status: Status solution: Solution | None = None solver_model: Any = None + metrics: SolverMetrics | None = None def __repr__(self) -> str: solver_model_string = ( @@ -232,12 +259,16 @@ def __repr__(self) -> str: ) else: solution_string = "Solution: None\n" + metrics_string = "" + if self.metrics is not None: + metrics_string = f"Solver metrics: {self.metrics}\n" return ( f"Status: {self.status.status.value}\n" f"Termination condition: {self.status.termination_condition.value}\n" + solution_string + f"Solver model: {solver_model_string}\n" - f"Solver message: {self.status.legacy_status}" + + metrics_string + + f"Solver message: {self.status.legacy_status}" ) def info(self) -> None: diff --git a/linopy/model.py b/linopy/model.py index 871945ba..25adde49 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -41,6 +41,7 @@ LESS_EQUAL, TERM_DIM, ModelStatus, + SolverMetrics, TerminationCondition, ) from linopy.constraints import AnonymousScalarConstraint, Constraint, Constraints @@ -97,6 +98,7 @@ class Model: solver_model: Any solver_name: str + _solver_metrics: SolverMetrics | None _variables: Variables _constraints: Constraints _objective: Objective @@ -137,6 +139,7 @@ class Model: "_force_dim_names", "_auto_mask", "_solver_dir", + "_solver_metrics", "solver_model", "solver_name", "matrices", @@ -197,6 +200,14 @@ def __init__( ) self.matrices: MatrixAccessor = MatrixAccessor(self) + self._solver_metrics: SolverMetrics | None = None + + @property + def solver_metrics(self) -> SolverMetrics | None: + """ + Solver performance metrics from the last solve, or None if not yet solved. + """ + return self._solver_metrics @property def variables(self) -> Variables: @@ -1413,6 +1424,7 @@ def solve( self.termination_condition = result.status.termination_condition.value self.solver_model = result.solver_model self.solver_name = solver_name + self._solver_metrics = result.metrics if not result.status.is_ok: return result.status.status.value, result.status.termination_condition.value @@ -1470,6 +1482,7 @@ def _mock_solve( self.termination_condition = TerminationCondition.optimal.value self.solver_model = None self.solver_name = solver_name + self._solver_metrics = SolverMetrics(solver_name="mock", objective_value=0.0) for name, var in self.variables.items(): var.solution = xr.DataArray(0.0, var.coords) @@ -1712,6 +1725,7 @@ def reset_solution(self) -> None: """ self.variables.reset_solution() self.constraints.reset_dual() + self._solver_metrics = None to_netcdf = to_netcdf diff --git a/linopy/solvers.py b/linopy/solvers.py index fe516b47..eaff2ec3 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -29,6 +29,7 @@ from linopy.constants import ( Result, Solution, + SolverMetrics, SolverStatus, Status, TerminationCondition, @@ -410,6 +411,19 @@ def solve_problem( msg = "No problem file or model specified." raise ValueError(msg) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + """ + Extract solver performance metrics. + + Default implementation returns basic metrics. Subclasses override + to provide solver-specific metrics. + """ + objective = None if np.isnan(solution.objective) else solution.objective + return SolverMetrics( + solver_name=self.solver_name.value, + objective_value=objective, + ) + @property def solver_name(self) -> SolverName: return SolverName[self.__class__.__name__] @@ -598,7 +612,15 @@ def get_solver_solution() -> Solution: runtime = float(m.group(1)) CbcModel = namedtuple("CbcModel", ["mip_gap", "runtime"]) - return Result(status, solution, CbcModel(mip_gap, runtime)) + solver_model = CbcModel(mip_gap, runtime) + objective = None if np.isnan(solution.objective) else solution.objective + metrics = SolverMetrics( + solver_name=self.solver_name.value, + solve_time=runtime, + objective_value=objective, + mip_gap=mip_gap, + ) + return Result(status, solution, solver_model, metrics) class GLPK(Solver[None]): @@ -728,7 +750,8 @@ def solve_problem_from_file( if not os.path.exists(solution_fn): status = Status(SolverStatus.warning, TerminationCondition.unknown) - return Result(status, Solution()) + metrics = SolverMetrics(solver_name=self.solver_name.value) + return Result(status, Solution(), metrics=metrics) f = open(solution_fn) @@ -768,7 +791,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution) + metrics = self._extract_metrics(None, solution) + return Result(status, solution, metrics=metrics) class Highs(Solver[None]): @@ -911,6 +935,39 @@ def solve_problem_from_file( sense=read_sense_from_problem_file(problem_fn), ) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + h = solver_model + objective = None if np.isnan(solution.objective) else solution.objective + metrics = SolverMetrics( + solver_name=self.solver_name.value, + objective_value=objective, + ) + try: + metrics.solve_time = h.getRunTime() + except Exception: + pass + try: + metrics.node_count = float(h.getInfoValue("mip_node_count")[1]) + except Exception: + pass + try: + metrics.iteration_count = float( + h.getInfoValue("simplex_iteration_count")[1] + ) + except Exception: + pass + try: + metrics.mip_gap = h.getInfoValue("mip_gap")[1] + except Exception: + pass + try: + obj_bound = h.getInfoValue("mip_objective_bound") + if obj_bound[0] == 0: # HighsStatus.kOk + metrics.best_bound = obj_bound[1] + except Exception: + pass + return metrics + def _set_solver_params( self, highs_solver: highspy.Highs, @@ -1019,7 +1076,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, h) + metrics = self._extract_metrics(h, solution) + return Result(status, solution, h, metrics) class Gurobi(Solver["gurobipy.Env | dict[str, Any] | None"]): @@ -1153,6 +1211,35 @@ def solve_problem_from_file( sense=sense, ) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + objective = None if np.isnan(solution.objective) else solution.objective + metrics = SolverMetrics( + solver_name=self.solver_name.value, + objective_value=objective, + ) + try: + metrics.solve_time = m.Runtime + except Exception: + pass + try: + metrics.best_bound = m.ObjBound + except Exception: + pass + try: + metrics.mip_gap = m.MIPGap + except Exception: + pass + try: + metrics.node_count = float(m.NodeCount) + except Exception: + pass + try: + metrics.iteration_count = float(m.IterCount) + except Exception: + pass + return metrics + def _solve( self, m: gurobipy.Model, @@ -1254,7 +1341,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class Cplex(Solver[None]): @@ -1410,7 +1498,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class SCIP(Solver[None]): @@ -1429,6 +1518,35 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + objective = None if np.isnan(solution.objective) else solution.objective + metrics = SolverMetrics( + solver_name=self.solver_name.value, + objective_value=objective, + ) + try: + metrics.solve_time = m.getSolvingTime() + except Exception: + pass + try: + metrics.best_bound = m.getDualbound() + except Exception: + pass + try: + metrics.mip_gap = m.getGap() + except Exception: + pass + try: + metrics.node_count = float(m.getNNodes()) + except Exception: + pass + try: + metrics.iteration_count = float(m.getNLPIterations()) + except Exception: + pass + return metrics + def solve_problem_from_model( self, model: Model, @@ -1563,7 +1681,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class Xpress(Solver[None]): @@ -1733,7 +1852,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) mosek_bas_re = re.compile(r" (XL|XU)\s+([^ \t]+)\s+([^ \t]+)| (LL|UL|BS)\s+([^ \t]+)") @@ -2075,7 +2195,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution) + metrics = self._extract_metrics(None, solution) + return Result(status, solution, metrics=metrics) class COPT(Solver[None]): @@ -2216,7 +2337,8 @@ def get_solver_solution() -> Solution: env_.close() - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class MindOpt(Solver[None]): @@ -2360,7 +2482,8 @@ def get_solver_solution() -> Solution: m.dispose() env_.dispose() - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class PIPS(Solver[None]): @@ -2609,7 +2732,8 @@ def get_solver_solution() -> Solution: solution = maybe_adjust_objective_sign(solution, io_api, sense) # see https://github.com/MIT-Lu-Lab/cuPDLPx/tree/main/python#solution-attributes - return Result(status, solution, cu_model) + metrics = self._extract_metrics(cu_model, solution) + return Result(status, solution, cu_model, metrics) def _set_solver_params(self, cu_model: cupdlpx.Model) -> None: """ diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py new file mode 100644 index 00000000..decde7f6 --- /dev/null +++ b/test/test_solver_metrics.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +""" +Tests for the SolverMetrics feature. +""" + +from __future__ import annotations + +import numpy as np +import pytest +import xarray as xr + +from linopy import Model, available_solvers +from linopy.constants import Result, Solution, SolverMetrics, Status +from linopy.solver_capabilities import SolverFeature, get_available_solvers_with_feature + +# --------------------------------------------------------------------------- +# SolverMetrics dataclass tests +# --------------------------------------------------------------------------- + + +def test_solver_metrics_defaults() -> None: + m = SolverMetrics() + assert m.solver_name is None + assert m.solve_time is None + assert m.objective_value is None + assert m.best_bound is None + assert m.mip_gap is None + assert m.node_count is None + assert m.iteration_count is None + + +def test_solver_metrics_partial() -> None: + m = SolverMetrics(solver_name="highs", solve_time=1.5) + assert m.solver_name == "highs" + assert m.solve_time == 1.5 + assert m.objective_value is None + + +def test_solver_metrics_repr_only_non_none() -> None: + m = SolverMetrics(solver_name="gurobi", solve_time=2.3) + r = repr(m) + assert "solver_name='gurobi'" in r + assert "solve_time=2.3" in r + assert "objective_value" not in r + assert "best_bound" not in r + + +def test_solver_metrics_repr_empty() -> None: + m = SolverMetrics() + assert repr(m) == "SolverMetrics()" + + +# --------------------------------------------------------------------------- +# Result backward compatibility tests +# --------------------------------------------------------------------------- + + +def test_result_without_metrics() -> None: + """Result without metrics should still work (backward compatible).""" + status = Status.from_termination_condition("optimal") + result = Result(status=status, solution=Solution()) + assert result.metrics is None + # repr should not crash + repr(result) + + +def test_result_with_metrics() -> None: + status = Status.from_termination_condition("optimal") + metrics = SolverMetrics(solver_name="test", solve_time=1.0) + result = Result(status=status, solution=Solution(), metrics=metrics) + assert result.metrics is not None + assert result.metrics.solver_name == "test" + r = repr(result) + assert "Solver metrics:" in r + + +# --------------------------------------------------------------------------- +# Model integration tests +# --------------------------------------------------------------------------- + + +def test_model_metrics_none_before_solve() -> None: + m = Model() + assert m.solver_metrics is None + + +def test_model_metrics_populated_after_mock_solve() -> None: + m = Model() + x = m.add_variables( + lower=xr.DataArray(np.zeros(5), dims=["i"]), + upper=xr.DataArray(np.ones(5), dims=["i"]), + name="x", + ) + m.add_objective(x.sum()) + m.solve(mock_solve=True) + assert m.solver_metrics is not None + assert m.solver_metrics.solver_name == "mock" + assert m.solver_metrics.objective_value == 0.0 + + +def test_model_metrics_reset() -> None: + m = Model() + x = m.add_variables( + lower=xr.DataArray(np.zeros(5), dims=["i"]), + upper=xr.DataArray(np.ones(5), dims=["i"]), + name="x", + ) + m.add_objective(x.sum()) + m.solve(mock_solve=True) + assert m.solver_metrics is not None + m.reset_solution() + assert m.solver_metrics is None + + +# --------------------------------------------------------------------------- +# Solver-specific integration tests (parametrized over available solvers) +# --------------------------------------------------------------------------- + +direct_solvers = get_available_solvers_with_feature( + SolverFeature.DIRECT_API, available_solvers +) +file_io_solvers = get_available_solvers_with_feature( + SolverFeature.READ_MODEL_FROM_FILE, available_solvers +) + + +def _make_simple_model() -> Model: + m = Model() + x = m.add_variables( + lower=xr.DataArray(np.zeros(3), dims=["i"]), + upper=xr.DataArray(np.ones(3), dims=["i"]), + name="x", + ) + m.add_constraints(x.sum() >= 1, name="con") + m.add_objective(x.sum()) + return m + + +@pytest.mark.parametrize("solver", direct_solvers) +def test_solver_metrics_direct(solver: str) -> None: + m = _make_simple_model() + m.solve(solver_name=solver, io_api="direct") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.solver_name == solver + assert metrics.objective_value is not None + assert metrics.objective_value == pytest.approx(1.0) + # Direct API solvers should generally report solve_time + if solver in ("gurobi", "highs"): + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 + + +@pytest.mark.parametrize("solver", file_io_solvers) +def test_solver_metrics_file_io(solver: str) -> None: + m = _make_simple_model() + m.solve(solver_name=solver, io_api="lp") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.solver_name == solver + assert metrics.objective_value is not None + assert metrics.objective_value == pytest.approx(1.0) From b42d2d4ac4376816145d388b6cfd184e87f6c868 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:43:51 +0100 Subject: [PATCH 02/19] Move extraction into method --- linopy/solvers.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index eaff2ec3..c9e2b963 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -445,6 +445,22 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + objective = None if np.isnan(solution.objective) else solution.objective + metrics = SolverMetrics( + solver_name=self.solver_name.value, + objective_value=objective, + ) + try: + metrics.solve_time = solver_model.runtime + except Exception: + pass + try: + metrics.mip_gap = solver_model.mip_gap + except Exception: + pass + return metrics + def solve_problem_from_model( self, model: Model, @@ -613,13 +629,7 @@ def get_solver_solution() -> Solution: CbcModel = namedtuple("CbcModel", ["mip_gap", "runtime"]) solver_model = CbcModel(mip_gap, runtime) - objective = None if np.isnan(solution.objective) else solution.objective - metrics = SolverMetrics( - solver_name=self.solver_name.value, - solve_time=runtime, - objective_value=objective, - mip_gap=mip_gap, - ) + metrics = self._extract_metrics(solver_model, solution) return Result(status, solution, solver_model, metrics) From c5322c420fae8e4d9d48495a954901dfa74e895b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:45:06 +0100 Subject: [PATCH 03/19] Remove some metrics --- linopy/constants.py | 2 -- linopy/solvers.py | 26 -------------------------- test/test_solver_metrics.py | 2 -- 3 files changed, 30 deletions(-) diff --git a/linopy/constants.py b/linopy/constants.py index 1726e04e..d7b0595b 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -225,8 +225,6 @@ class SolverMetrics: objective_value: float | None = None best_bound: float | None = None mip_gap: float | None = None - node_count: float | None = None - iteration_count: float | None = None def __repr__(self) -> str: fields = [] diff --git a/linopy/solvers.py b/linopy/solvers.py index c9e2b963..eeedffc5 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -956,16 +956,6 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri metrics.solve_time = h.getRunTime() except Exception: pass - try: - metrics.node_count = float(h.getInfoValue("mip_node_count")[1]) - except Exception: - pass - try: - metrics.iteration_count = float( - h.getInfoValue("simplex_iteration_count")[1] - ) - except Exception: - pass try: metrics.mip_gap = h.getInfoValue("mip_gap")[1] except Exception: @@ -1240,14 +1230,6 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri metrics.mip_gap = m.MIPGap except Exception: pass - try: - metrics.node_count = float(m.NodeCount) - except Exception: - pass - try: - metrics.iteration_count = float(m.IterCount) - except Exception: - pass return metrics def _solve( @@ -1547,14 +1529,6 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri metrics.mip_gap = m.getGap() except Exception: pass - try: - metrics.node_count = float(m.getNNodes()) - except Exception: - pass - try: - metrics.iteration_count = float(m.getNLPIterations()) - except Exception: - pass return metrics def solve_problem_from_model( diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index decde7f6..6928778f 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -25,8 +25,6 @@ def test_solver_metrics_defaults() -> None: assert m.objective_value is None assert m.best_bound is None assert m.mip_gap is None - assert m.node_count is None - assert m.iteration_count is None def test_solver_metrics_partial() -> None: From 5e2afdb65553d46ecd994a97c388ed6e295205b6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:48:55 +0100 Subject: [PATCH 04/19] Extract safe get --- linopy/solvers.py | 107 +++++++++++++++++++--------------------------- 1 file changed, 43 insertions(+), 64 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index eeedffc5..51eb164b 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -217,6 +217,14 @@ class xpress_Namespaces: # type: ignore[no-redef] logger = logging.getLogger(__name__) +def _safe_get(func: Callable[[], Any]) -> Any: + """Call *func* and return its result, or None if it raises.""" + try: + return func() + except Exception: + return None + + io_structure = dict( lp_file={ "gurobi", @@ -418,10 +426,11 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri Default implementation returns basic metrics. Subclasses override to provide solver-specific metrics. """ - objective = None if np.isnan(solution.objective) else solution.objective return SolverMetrics( solver_name=self.solver_name.value, - objective_value=objective, + objective_value=_safe_get( + lambda: solution.objective if not np.isnan(solution.objective) else None + ), ) @property @@ -446,20 +455,14 @@ def __init__( super().__init__(**solver_options) def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: - objective = None if np.isnan(solution.objective) else solution.objective - metrics = SolverMetrics( + return SolverMetrics( solver_name=self.solver_name.value, - objective_value=objective, + objective_value=_safe_get( + lambda: solution.objective if not np.isnan(solution.objective) else None + ), + solve_time=_safe_get(lambda: solver_model.runtime), + mip_gap=_safe_get(lambda: solver_model.mip_gap), ) - try: - metrics.solve_time = solver_model.runtime - except Exception: - pass - try: - metrics.mip_gap = solver_model.mip_gap - except Exception: - pass - return metrics def solve_problem_from_model( self, @@ -947,26 +950,20 @@ def solve_problem_from_file( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: h = solver_model - objective = None if np.isnan(solution.objective) else solution.objective - metrics = SolverMetrics( + + def _highs_best_bound() -> float | None: + status, val = h.getInfoValue("mip_objective_bound") + return val if status == 0 else None # 0 = HighsStatus.kOk + + return SolverMetrics( solver_name=self.solver_name.value, - objective_value=objective, + objective_value=_safe_get( + lambda: solution.objective if not np.isnan(solution.objective) else None + ), + solve_time=_safe_get(lambda: h.getRunTime()), + mip_gap=_safe_get(lambda: h.getInfoValue("mip_gap")[1]), + best_bound=_safe_get(_highs_best_bound), ) - try: - metrics.solve_time = h.getRunTime() - except Exception: - pass - try: - metrics.mip_gap = h.getInfoValue("mip_gap")[1] - except Exception: - pass - try: - obj_bound = h.getInfoValue("mip_objective_bound") - if obj_bound[0] == 0: # HighsStatus.kOk - metrics.best_bound = obj_bound[1] - except Exception: - pass - return metrics def _set_solver_params( self, @@ -1213,24 +1210,15 @@ def solve_problem_from_file( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model - objective = None if np.isnan(solution.objective) else solution.objective - metrics = SolverMetrics( + return SolverMetrics( solver_name=self.solver_name.value, - objective_value=objective, + objective_value=_safe_get( + lambda: solution.objective if not np.isnan(solution.objective) else None + ), + solve_time=_safe_get(lambda: m.Runtime), + best_bound=_safe_get(lambda: m.ObjBound), + mip_gap=_safe_get(lambda: m.MIPGap), ) - try: - metrics.solve_time = m.Runtime - except Exception: - pass - try: - metrics.best_bound = m.ObjBound - except Exception: - pass - try: - metrics.mip_gap = m.MIPGap - except Exception: - pass - return metrics def _solve( self, @@ -1512,24 +1500,15 @@ def __init__( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model - objective = None if np.isnan(solution.objective) else solution.objective - metrics = SolverMetrics( + return SolverMetrics( solver_name=self.solver_name.value, - objective_value=objective, + objective_value=_safe_get( + lambda: solution.objective if not np.isnan(solution.objective) else None + ), + solve_time=_safe_get(lambda: m.getSolvingTime()), + best_bound=_safe_get(lambda: m.getDualbound()), + mip_gap=_safe_get(lambda: m.getGap()), ) - try: - metrics.solve_time = m.getSolvingTime() - except Exception: - pass - try: - metrics.best_bound = m.getDualbound() - except Exception: - pass - try: - metrics.mip_gap = m.getGap() - except Exception: - pass - return metrics def solve_problem_from_model( self, From aa811d447273fb42be6cacffec66804f2361aac8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:54:26 +0100 Subject: [PATCH 05/19] Add base population --- linopy/solvers.py | 65 +++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 39 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index 51eb164b..30ddbe0d 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -423,8 +423,9 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri """ Extract solver performance metrics. - Default implementation returns basic metrics. Subclasses override - to provide solver-specific metrics. + Base implementation populates solver_name and objective_value. + Subclasses should call super(), then set solver-specific fields + on the returned object. """ return SolverMetrics( solver_name=self.solver_name.value, @@ -455,14 +456,10 @@ def __init__( super().__init__(**solver_options) def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: - return SolverMetrics( - solver_name=self.solver_name.value, - objective_value=_safe_get( - lambda: solution.objective if not np.isnan(solution.objective) else None - ), - solve_time=_safe_get(lambda: solver_model.runtime), - mip_gap=_safe_get(lambda: solver_model.mip_gap), - ) + metrics = super()._extract_metrics(solver_model, solution) + metrics.solve_time = _safe_get(lambda: solver_model.runtime) + metrics.mip_gap = _safe_get(lambda: solver_model.mip_gap) + return metrics def solve_problem_from_model( self, @@ -763,8 +760,10 @@ def solve_problem_from_file( if not os.path.exists(solution_fn): status = Status(SolverStatus.warning, TerminationCondition.unknown) - metrics = SolverMetrics(solver_name=self.solver_name.value) - return Result(status, Solution(), metrics=metrics) + solution = Solution() + return Result( + status, solution, metrics=self._extract_metrics(None, solution) + ) f = open(solution_fn) @@ -950,20 +949,16 @@ def solve_problem_from_file( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: h = solver_model + metrics = super()._extract_metrics(solver_model, solution) + metrics.solve_time = _safe_get(lambda: h.getRunTime()) + metrics.mip_gap = _safe_get(lambda: h.getInfoValue("mip_gap")[1]) def _highs_best_bound() -> float | None: status, val = h.getInfoValue("mip_objective_bound") return val if status == 0 else None # 0 = HighsStatus.kOk - return SolverMetrics( - solver_name=self.solver_name.value, - objective_value=_safe_get( - lambda: solution.objective if not np.isnan(solution.objective) else None - ), - solve_time=_safe_get(lambda: h.getRunTime()), - mip_gap=_safe_get(lambda: h.getInfoValue("mip_gap")[1]), - best_bound=_safe_get(_highs_best_bound), - ) + metrics.best_bound = _safe_get(_highs_best_bound) + return metrics def _set_solver_params( self, @@ -1210,15 +1205,11 @@ def solve_problem_from_file( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model - return SolverMetrics( - solver_name=self.solver_name.value, - objective_value=_safe_get( - lambda: solution.objective if not np.isnan(solution.objective) else None - ), - solve_time=_safe_get(lambda: m.Runtime), - best_bound=_safe_get(lambda: m.ObjBound), - mip_gap=_safe_get(lambda: m.MIPGap), - ) + metrics = super()._extract_metrics(solver_model, solution) + metrics.solve_time = _safe_get(lambda: m.Runtime) + metrics.best_bound = _safe_get(lambda: m.ObjBound) + metrics.mip_gap = _safe_get(lambda: m.MIPGap) + return metrics def _solve( self, @@ -1500,15 +1491,11 @@ def __init__( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model - return SolverMetrics( - solver_name=self.solver_name.value, - objective_value=_safe_get( - lambda: solution.objective if not np.isnan(solution.objective) else None - ), - solve_time=_safe_get(lambda: m.getSolvingTime()), - best_bound=_safe_get(lambda: m.getDualbound()), - mip_gap=_safe_get(lambda: m.getGap()), - ) + metrics = super()._extract_metrics(solver_model, solution) + metrics.solve_time = _safe_get(lambda: m.getSolvingTime()) + metrics.best_bound = _safe_get(lambda: m.getDualbound()) + metrics.mip_gap = _safe_get(lambda: m.getGap()) + return metrics def solve_problem_from_model( self, From c614b6ba32f100613ab547046b5e918d247733a0 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 15:39:37 +0100 Subject: [PATCH 06/19] Improve docstrings --- linopy/constants.py | 5 +++-- linopy/model.py | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/linopy/constants.py b/linopy/constants.py index d7b0595b..c3fc3f6d 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -216,8 +216,9 @@ class SolverMetrics: """ Unified solver performance metrics. - All fields default to None and are populated by solvers on a best-effort - basis. Fields that a particular solver cannot provide remain None. + All fields default to ``None``. Solvers populate what they can; + unsupported fields remain ``None``. Access via + :attr:`Model.solver_metrics` after calling :meth:`Model.solve`. """ solver_name: str | None = None diff --git a/linopy/model.py b/linopy/model.py index 25adde49..c71b9ad2 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -205,7 +205,23 @@ def __init__( @property def solver_metrics(self) -> SolverMetrics | None: """ - Solver performance metrics from the last solve, or None if not yet solved. + Solver performance metrics from the last solve, or ``None`` + if the model has not been solved yet. + + Returns a :class:`~linopy.constants.SolverMetrics` with fields + ``solver_name``, ``solve_time``, ``objective_value``, + ``best_bound``, and ``mip_gap``. Fields the solver cannot + provide remain ``None``. + + Reset to ``None`` by :meth:`reset_solution`. + + Examples + -------- + >>> m.solve(solver_name="highs") # doctest: +SKIP + >>> m.solver_metrics.solve_time # doctest: +SKIP + 0.003 + >>> m.solver_metrics.objective_value # doctest: +SKIP + 0.0 """ return self._solver_metrics From 3b349c11ec78a9e0aa21fd198571a35b3ad0d77e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 15:52:24 +0100 Subject: [PATCH 07/19] Update CHangelog --- doc/release_notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 60926055..28bfd655 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -4,6 +4,8 @@ Release Notes Upcoming Version ---------------- +* Add unified ``SolverMetrics`` dataclass accessible via ``Model.solver_metrics`` after solving. Provides ``solver_name``, ``solve_time``, ``objective_value``, ``best_bound``, and ``mip_gap`` in a solver-independent way. Gurobi, HiGHS, SCIP, and CBC populate solver-specific fields; other solvers provide the baseline. + Version 0.6.3 -------------- From 5e815a7f04d2b26647a2562ff75e955355af3804 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 16:14:09 +0100 Subject: [PATCH 08/19] Make dataclass frozen and add some more solvers --- linopy/constants.py | 9 ++-- linopy/solvers.py | 100 +++++++++++++++++++++++++++++------- test/test_solver_metrics.py | 6 +++ 3 files changed, 93 insertions(+), 22 deletions(-) diff --git a/linopy/constants.py b/linopy/constants.py index c3fc3f6d..44e13654 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -3,6 +3,7 @@ Linopy module for defining constant values used within the package. """ +import dataclasses import logging from dataclasses import dataclass, field from enum import Enum @@ -211,7 +212,7 @@ class Solution: objective: float = field(default=np.nan) -@dataclass +@dataclass(frozen=True) class SolverMetrics: """ Unified solver performance metrics. @@ -229,10 +230,10 @@ class SolverMetrics: def __repr__(self) -> str: fields = [] - for f in self.__dataclass_fields__: - val = getattr(self, f) + for f in dataclasses.fields(self): + val = getattr(self, f.name) if val is not None: - fields.append(f"{f}={val!r}") + fields.append(f"{f.name}={val!r}") return f"SolverMetrics({', '.join(fields)})" diff --git a/linopy/solvers.py b/linopy/solvers.py index 30ddbe0d..51e4bf00 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -6,6 +6,7 @@ from __future__ import annotations import contextlib +import dataclasses import enum import io import logging @@ -222,6 +223,7 @@ def _safe_get(func: Callable[[], Any]) -> Any: try: return func() except Exception: + logger.debug("Failed to extract solver metric", exc_info=True) return None @@ -457,9 +459,11 @@ def __init__( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: metrics = super()._extract_metrics(solver_model, solution) - metrics.solve_time = _safe_get(lambda: solver_model.runtime) - metrics.mip_gap = _safe_get(lambda: solver_model.mip_gap) - return metrics + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: solver_model.runtime), + mip_gap=_safe_get(lambda: solver_model.mip_gap), + ) def solve_problem_from_model( self, @@ -950,15 +954,17 @@ def solve_problem_from_file( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: h = solver_model metrics = super()._extract_metrics(solver_model, solution) - metrics.solve_time = _safe_get(lambda: h.getRunTime()) - metrics.mip_gap = _safe_get(lambda: h.getInfoValue("mip_gap")[1]) def _highs_best_bound() -> float | None: status, val = h.getInfoValue("mip_objective_bound") return val if status == 0 else None # 0 = HighsStatus.kOk - metrics.best_bound = _safe_get(_highs_best_bound) - return metrics + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: h.getRunTime()), + mip_gap=_safe_get(lambda: h.getInfoValue("mip_gap")[1]), + best_bound=_safe_get(_highs_best_bound), + ) def _set_solver_params( self, @@ -1206,10 +1212,12 @@ def solve_problem_from_file( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model metrics = super()._extract_metrics(solver_model, solution) - metrics.solve_time = _safe_get(lambda: m.Runtime) - metrics.best_bound = _safe_get(lambda: m.ObjBound) - metrics.mip_gap = _safe_get(lambda: m.MIPGap) - return metrics + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.Runtime), + best_bound=_safe_get(lambda: m.ObjBound), + mip_gap=_safe_get(lambda: m.MIPGap), + ) def _solve( self, @@ -1336,6 +1344,16 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.solution.progress.get_time()), + best_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()), + mip_gap=_safe_get(lambda: m.solution.MIP.get_mip_relative_gap()), + ) + def solve_problem_from_model( self, model: Model, @@ -1492,10 +1510,12 @@ def __init__( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model metrics = super()._extract_metrics(solver_model, solution) - metrics.solve_time = _safe_get(lambda: m.getSolvingTime()) - metrics.best_bound = _safe_get(lambda: m.getDualbound()) - metrics.mip_gap = _safe_get(lambda: m.getGap()) - return metrics + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.getSolvingTime()), + best_bound=_safe_get(lambda: m.getDualbound()), + mip_gap=_safe_get(lambda: m.getGap()), + ) def solve_problem_from_model( self, @@ -1654,6 +1674,16 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.attributes.time), + best_bound=_safe_get(lambda: m.attributes.bestbound), + mip_gap=_safe_get(lambda: m.attributes.miprelgap), + ) + def solve_problem_from_model( self, model: Model, @@ -1835,6 +1865,14 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.getdouinf(mosek.dinfitem.optimizer_time)), + ) + def solve_problem_from_model( self, model: Model, @@ -2145,7 +2183,7 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - metrics = self._extract_metrics(None, solution) + metrics = self._extract_metrics(m, solution) return Result(status, solution, metrics=metrics) @@ -2170,6 +2208,16 @@ def __init( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.SolvingTime), + best_bound=_safe_get(lambda: m.BestBnd), + mip_gap=_safe_get(lambda: m.BestGap), + ) + def solve_problem_from_model( self, model: Model, @@ -2285,9 +2333,9 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) + metrics = self._extract_metrics(m, solution) env_.close() - metrics = self._extract_metrics(m, solution) return Result(status, solution, m, metrics) @@ -2312,6 +2360,14 @@ def __init( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.getAttr("SolvingTime")), + ) + def solve_problem_from_model( self, model: Model, @@ -2429,10 +2485,11 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) + metrics = self._extract_metrics(m, solution) + m.dispose() env_.dispose() - metrics = self._extract_metrics(m, solution) return Result(status, solution, m, metrics) @@ -2476,6 +2533,13 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: solver_model.SolveTime), + ) + def solve_problem_from_file( self, problem_fn: Path, diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index 6928778f..f57e7492 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -48,6 +48,12 @@ def test_solver_metrics_repr_empty() -> None: assert repr(m) == "SolverMetrics()" +def test_solver_metrics_frozen() -> None: + m = SolverMetrics(solver_name="test") + with pytest.raises(AttributeError): + m.solver_name = "other" # type: ignore[misc] + + # --------------------------------------------------------------------------- # Result backward compatibility tests # --------------------------------------------------------------------------- From cbc5a31152ef9a8b864ca68281412e798eeb0d7d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 16:40:04 +0100 Subject: [PATCH 09/19] =?UTF-8?q?=20=20-=20Added=20test=5Fsolver=5Fmetrics?= =?UTF-8?q?=5Ffrozen=20=E2=80=94=20verifies=20frozen=20dataclass=20behavio?= =?UTF-8?q?r=20=20=20-=20Added=20mock-based=20unit=20tests=20for=20all=201?= =?UTF-8?q?0=20solver=20overrides=20(CBC,=20Highs,=20Gurobi,=20SCIP,=20Cpl?= =?UTF-8?q?ex,=20Xpress,=20Mosek,=20COPT,=20MindOpt,=20cuPDLPx)=20=20=20-?= =?UTF-8?q?=20Added=20test=5Fextract=5Fmetrics=5Fgraceful=5Fon=5Fmissing?= =?UTF-8?q?=5Fattr=20=E2=80=94=20verifies=20=5Fsafe=5Fget=20degrades=20gra?= =?UTF-8?q?cefully=20=20=20-=20Tests=20skip=20for=20unavailable=20solvers?= =?UTF-8?q?=20using=20@pytest.mark.skipif?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/test_solver_metrics.py | 142 ++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index f57e7492..2a1be9e4 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -5,6 +5,9 @@ from __future__ import annotations +from types import SimpleNamespace +from unittest.mock import MagicMock + import numpy as np import pytest import xarray as xr @@ -12,6 +15,18 @@ from linopy import Model, available_solvers from linopy.constants import Result, Solution, SolverMetrics, Status from linopy.solver_capabilities import SolverFeature, get_available_solvers_with_feature +from linopy.solvers import ( + CBC, + COPT, + SCIP, + Cplex, + Gurobi, + Highs, + MindOpt, + Mosek, + Xpress, + cuPDLPx, +) # --------------------------------------------------------------------------- # SolverMetrics dataclass tests @@ -164,3 +179,130 @@ def test_solver_metrics_file_io(solver: str) -> None: assert metrics.solver_name == solver assert metrics.objective_value is not None assert metrics.objective_value == pytest.approx(1.0) + + +# --------------------------------------------------------------------------- +# Mock-based _extract_metrics unit tests for each solver override +# --------------------------------------------------------------------------- + +_SOLUTION = Solution(objective=42.0) + + +@pytest.mark.skipif("cbc" not in available_solvers, reason="CBC not installed") +def test_cbc_extract_metrics() -> None: + solver_model = SimpleNamespace(runtime=1.5, mip_gap=0.01) + solver = CBC() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 1.5 + assert metrics.mip_gap == 0.01 + + +@pytest.mark.skipif("highs" not in available_solvers, reason="HiGHS not installed") +def test_highs_extract_metrics() -> None: + solver_model = MagicMock() + solver_model.getRunTime.return_value = 2.0 + solver_model.getInfoValue.side_effect = lambda key: { + "mip_gap": (0, 0.05), + "mip_objective_bound": (0, 40.0), + }[key] + solver = Highs() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 2.0 + assert metrics.mip_gap == 0.05 + assert metrics.best_bound == 40.0 + + +@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") +def test_gurobi_extract_metrics() -> None: + solver_model = SimpleNamespace(Runtime=3.0, ObjBound=39.0, MIPGap=0.02) + solver = Gurobi() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 3.0 + assert metrics.best_bound == 39.0 + assert metrics.mip_gap == 0.02 + + +@pytest.mark.skipif("scip" not in available_solvers, reason="SCIP not installed") +def test_scip_extract_metrics() -> None: + solver_model = MagicMock() + solver_model.getSolvingTime.return_value = 4.0 + solver_model.getDualbound.return_value = 38.0 + solver_model.getGap.return_value = 0.03 + solver = SCIP() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 4.0 + assert metrics.best_bound == 38.0 + assert metrics.mip_gap == 0.03 + + +@pytest.mark.skipif("cplex" not in available_solvers, reason="CPLEX not installed") +def test_cplex_extract_metrics() -> None: + solver_model = MagicMock() + solver_model.solution.progress.get_time.return_value = 5.0 + solver_model.solution.MIP.get_best_objective.return_value = 37.0 + solver_model.solution.MIP.get_mip_relative_gap.return_value = 0.04 + solver = Cplex() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 5.0 + assert metrics.best_bound == 37.0 + assert metrics.mip_gap == 0.04 + + +@pytest.mark.skipif("xpress" not in available_solvers, reason="Xpress not installed") +def test_xpress_extract_metrics() -> None: + solver_model = SimpleNamespace( + attributes=SimpleNamespace(time=6.0, bestbound=36.0, miprelgap=0.05) + ) + solver = Xpress() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 6.0 + assert metrics.best_bound == 36.0 + assert metrics.mip_gap == 0.05 + + +@pytest.mark.skipif("mosek" not in available_solvers, reason="Mosek not installed") +def test_mosek_extract_metrics() -> None: + solver_model = MagicMock() + solver_model.getdouinf.return_value = 7.0 + solver = Mosek() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 7.0 + + +@pytest.mark.skipif("copt" not in available_solvers, reason="COPT not installed") +def test_copt_extract_metrics() -> None: + solver_model = SimpleNamespace(SolvingTime=8.0, BestBnd=35.0, BestGap=0.06) + solver = COPT() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 8.0 + assert metrics.best_bound == 35.0 + assert metrics.mip_gap == 0.06 + + +@pytest.mark.skipif("mindopt" not in available_solvers, reason="MindOpt not installed") +def test_mindopt_extract_metrics() -> None: + solver_model = MagicMock() + solver_model.getAttr.return_value = 9.0 + solver = MindOpt() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 9.0 + + +@pytest.mark.skipif("cupdlpx" not in available_solvers, reason="cuPDLPx not installed") +def test_cupdlpx_extract_metrics() -> None: + solver_model = SimpleNamespace(SolveTime=10.0) + solver = cuPDLPx() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time == 10.0 + + +@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") +def test_extract_metrics_graceful_on_missing_attr() -> None: + """_safe_get should return None when attributes are missing.""" + solver_model = SimpleNamespace() # no attributes at all + solver = Gurobi() + metrics = solver._extract_metrics(solver_model, _SOLUTION) + assert metrics.solve_time is None + assert metrics.best_bound is None + assert metrics.mip_gap is None + assert metrics.objective_value == 42.0 From 92054f123a146bb10ee53bef19c19d923841b461 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 16:54:21 +0100 Subject: [PATCH 10/19] Update Release notes --- doc/release_notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 28bfd655..ed59b835 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -4,7 +4,7 @@ Release Notes Upcoming Version ---------------- -* Add unified ``SolverMetrics`` dataclass accessible via ``Model.solver_metrics`` after solving. Provides ``solver_name``, ``solve_time``, ``objective_value``, ``best_bound``, and ``mip_gap`` in a solver-independent way. Gurobi, HiGHS, SCIP, and CBC populate solver-specific fields; other solvers provide the baseline. +* Add unified ``SolverMetrics`` dataclass accessible via ``Model.solver_metrics`` after solving. Provides ``solver_name``, ``solve_time``, ``objective_value``, ``best_bound``, and ``mip_gap`` in a solver-independent way. All solvers populate solver-specific fields where available. Version 0.6.3 -------------- From 90db31f286516366fe0f4ba79362dfc8ed6e3365 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 19:34:24 +0100 Subject: [PATCH 11/19] Replace mock-based metrics tests with real solver integration tests Remove all mock/patch-based _extract_metrics tests. The parametrized integration tests (test_solver_metrics_direct, test_solver_metrics_file_io) now assert solve_time >= 0 for every available solver, ensuring attribute names are correct against real solver objects. Co-Authored-By: Claude Opus 4.6 --- test/test_solver_metrics.py | 150 +----------------------------------- 1 file changed, 4 insertions(+), 146 deletions(-) diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index 2a1be9e4..b35bd75f 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -5,9 +5,6 @@ from __future__ import annotations -from types import SimpleNamespace -from unittest.mock import MagicMock - import numpy as np import pytest import xarray as xr @@ -15,18 +12,6 @@ from linopy import Model, available_solvers from linopy.constants import Result, Solution, SolverMetrics, Status from linopy.solver_capabilities import SolverFeature, get_available_solvers_with_feature -from linopy.solvers import ( - CBC, - COPT, - SCIP, - Cplex, - Gurobi, - Highs, - MindOpt, - Mosek, - Xpress, - cuPDLPx, -) # --------------------------------------------------------------------------- # SolverMetrics dataclass tests @@ -164,10 +149,8 @@ def test_solver_metrics_direct(solver: str) -> None: assert metrics.solver_name == solver assert metrics.objective_value is not None assert metrics.objective_value == pytest.approx(1.0) - # Direct API solvers should generally report solve_time - if solver in ("gurobi", "highs"): - assert metrics.solve_time is not None - assert metrics.solve_time >= 0 + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 @pytest.mark.parametrize("solver", file_io_solvers) @@ -179,130 +162,5 @@ def test_solver_metrics_file_io(solver: str) -> None: assert metrics.solver_name == solver assert metrics.objective_value is not None assert metrics.objective_value == pytest.approx(1.0) - - -# --------------------------------------------------------------------------- -# Mock-based _extract_metrics unit tests for each solver override -# --------------------------------------------------------------------------- - -_SOLUTION = Solution(objective=42.0) - - -@pytest.mark.skipif("cbc" not in available_solvers, reason="CBC not installed") -def test_cbc_extract_metrics() -> None: - solver_model = SimpleNamespace(runtime=1.5, mip_gap=0.01) - solver = CBC() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 1.5 - assert metrics.mip_gap == 0.01 - - -@pytest.mark.skipif("highs" not in available_solvers, reason="HiGHS not installed") -def test_highs_extract_metrics() -> None: - solver_model = MagicMock() - solver_model.getRunTime.return_value = 2.0 - solver_model.getInfoValue.side_effect = lambda key: { - "mip_gap": (0, 0.05), - "mip_objective_bound": (0, 40.0), - }[key] - solver = Highs() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 2.0 - assert metrics.mip_gap == 0.05 - assert metrics.best_bound == 40.0 - - -@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") -def test_gurobi_extract_metrics() -> None: - solver_model = SimpleNamespace(Runtime=3.0, ObjBound=39.0, MIPGap=0.02) - solver = Gurobi() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 3.0 - assert metrics.best_bound == 39.0 - assert metrics.mip_gap == 0.02 - - -@pytest.mark.skipif("scip" not in available_solvers, reason="SCIP not installed") -def test_scip_extract_metrics() -> None: - solver_model = MagicMock() - solver_model.getSolvingTime.return_value = 4.0 - solver_model.getDualbound.return_value = 38.0 - solver_model.getGap.return_value = 0.03 - solver = SCIP() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 4.0 - assert metrics.best_bound == 38.0 - assert metrics.mip_gap == 0.03 - - -@pytest.mark.skipif("cplex" not in available_solvers, reason="CPLEX not installed") -def test_cplex_extract_metrics() -> None: - solver_model = MagicMock() - solver_model.solution.progress.get_time.return_value = 5.0 - solver_model.solution.MIP.get_best_objective.return_value = 37.0 - solver_model.solution.MIP.get_mip_relative_gap.return_value = 0.04 - solver = Cplex() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 5.0 - assert metrics.best_bound == 37.0 - assert metrics.mip_gap == 0.04 - - -@pytest.mark.skipif("xpress" not in available_solvers, reason="Xpress not installed") -def test_xpress_extract_metrics() -> None: - solver_model = SimpleNamespace( - attributes=SimpleNamespace(time=6.0, bestbound=36.0, miprelgap=0.05) - ) - solver = Xpress() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 6.0 - assert metrics.best_bound == 36.0 - assert metrics.mip_gap == 0.05 - - -@pytest.mark.skipif("mosek" not in available_solvers, reason="Mosek not installed") -def test_mosek_extract_metrics() -> None: - solver_model = MagicMock() - solver_model.getdouinf.return_value = 7.0 - solver = Mosek() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 7.0 - - -@pytest.mark.skipif("copt" not in available_solvers, reason="COPT not installed") -def test_copt_extract_metrics() -> None: - solver_model = SimpleNamespace(SolvingTime=8.0, BestBnd=35.0, BestGap=0.06) - solver = COPT() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 8.0 - assert metrics.best_bound == 35.0 - assert metrics.mip_gap == 0.06 - - -@pytest.mark.skipif("mindopt" not in available_solvers, reason="MindOpt not installed") -def test_mindopt_extract_metrics() -> None: - solver_model = MagicMock() - solver_model.getAttr.return_value = 9.0 - solver = MindOpt() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 9.0 - - -@pytest.mark.skipif("cupdlpx" not in available_solvers, reason="cuPDLPx not installed") -def test_cupdlpx_extract_metrics() -> None: - solver_model = SimpleNamespace(SolveTime=10.0) - solver = cuPDLPx() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time == 10.0 - - -@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") -def test_extract_metrics_graceful_on_missing_attr() -> None: - """_safe_get should return None when attributes are missing.""" - solver_model = SimpleNamespace() # no attributes at all - solver = Gurobi() - metrics = solver._extract_metrics(solver_model, _SOLUTION) - assert metrics.solve_time is None - assert metrics.best_bound is None - assert metrics.mip_gap is None - assert metrics.objective_value == 42.0 + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 From fef88b09db87619d3c721d1feedf74acca97bfee Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 19:37:41 +0100 Subject: [PATCH 12/19] Add better tests actually checking if metrics are populated, and fix highs best bound --- linopy/solvers.py | 13 ++++++++----- test/test_solver_metrics.py | 37 ++++++++++++++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index 51e4bf00..140f1f0d 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -955,15 +955,18 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri h = solver_model metrics = super()._extract_metrics(solver_model, solution) - def _highs_best_bound() -> float | None: - status, val = h.getInfoValue("mip_objective_bound") - return val if status == 0 else None # 0 = HighsStatus.kOk + def _highs_info(key: str) -> float: + status, val = h.getInfoValue(key) + if status != highspy.HighsStatus.kOk: + msg = f"Failed to get HiGHS info: {key}" + raise RuntimeError(msg) + return val return dataclasses.replace( metrics, solve_time=_safe_get(lambda: h.getRunTime()), - mip_gap=_safe_get(lambda: h.getInfoValue("mip_gap")[1]), - best_bound=_safe_get(_highs_best_bound), + mip_gap=_safe_get(lambda: _highs_info("mip_gap")), + best_bound=_safe_get(lambda: _highs_info("mip_dual_bound")), ) def _set_solver_params( diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index b35bd75f..2118f5ab 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -126,9 +126,12 @@ def test_model_metrics_reset() -> None: file_io_solvers = get_available_solvers_with_feature( SolverFeature.READ_MODEL_FROM_FILE, available_solvers ) +mip_solvers = get_available_solvers_with_feature( + SolverFeature.INTEGER_VARIABLES, available_solvers +) -def _make_simple_model() -> Model: +def _make_simple_lp() -> Model: m = Model() x = m.add_variables( lower=xr.DataArray(np.zeros(3), dims=["i"]), @@ -140,9 +143,17 @@ def _make_simple_model() -> Model: return m +def _make_simple_mip() -> Model: + m = Model() + x = m.add_variables(coords=[np.arange(3)], name="x", binary=True) + m.add_constraints(x.sum() >= 1, name="con") + m.add_objective(x.sum()) + return m + + @pytest.mark.parametrize("solver", direct_solvers) def test_solver_metrics_direct(solver: str) -> None: - m = _make_simple_model() + m = _make_simple_lp() m.solve(solver_name=solver, io_api="direct") metrics = m.solver_metrics assert metrics is not None @@ -155,7 +166,7 @@ def test_solver_metrics_direct(solver: str) -> None: @pytest.mark.parametrize("solver", file_io_solvers) def test_solver_metrics_file_io(solver: str) -> None: - m = _make_simple_model() + m = _make_simple_lp() m.solve(solver_name=solver, io_api="lp") metrics = m.solver_metrics assert metrics is not None @@ -164,3 +175,23 @@ def test_solver_metrics_file_io(solver: str) -> None: assert metrics.objective_value == pytest.approx(1.0) assert metrics.solve_time is not None assert metrics.solve_time >= 0 + + +@pytest.mark.parametrize("solver", mip_solvers) +def test_solver_metrics_mip(solver: str) -> None: + """Solve a MIP and verify mip_gap and best_bound are populated.""" + m = _make_simple_mip() + if solver in direct_solvers: + m.solve(solver_name=solver, io_api="direct") + else: + m.solve(solver_name=solver, io_api="lp") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.solver_name == solver + assert metrics.objective_value == pytest.approx(1.0) + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 + assert metrics.mip_gap is not None + assert metrics.mip_gap >= 0 + assert metrics.best_bound is not None + assert isinstance(metrics.best_bound, float) From c405ef00e8801f44a0595bde3513ddf9fcbccda3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 19:46:45 +0100 Subject: [PATCH 13/19] Rename to dual_bound and improve docstring --- linopy/constants.py | 17 ++++++++++++++++- linopy/model.py | 6 ++---- linopy/solvers.py | 12 ++++++------ test/test_solver_metrics.py | 10 +++++----- 4 files changed, 29 insertions(+), 16 deletions(-) diff --git a/linopy/constants.py b/linopy/constants.py index 44e13654..39466789 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -220,12 +220,27 @@ class SolverMetrics: All fields default to ``None``. Solvers populate what they can; unsupported fields remain ``None``. Access via :attr:`Model.solver_metrics` after calling :meth:`Model.solve`. + + Attributes + ---------- + solver_name : str or None + Name of the solver used. + solve_time : float or None + Wall-clock time spent solving (seconds). + objective_value : float or None + Objective value of the best solution found. + dual_bound : float or None + Best bound on the objective from the MIP relaxation (also known as + "best bound"). Only populated for integer programs. + mip_gap : float or None + Relative gap between the objective value and the dual bound. + Only populated for integer programs. """ solver_name: str | None = None solve_time: float | None = None objective_value: float | None = None - best_bound: float | None = None + dual_bound: float | None = None mip_gap: float | None = None def __repr__(self) -> str: diff --git a/linopy/model.py b/linopy/model.py index c71b9ad2..015cbec0 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -208,10 +208,8 @@ def solver_metrics(self) -> SolverMetrics | None: Solver performance metrics from the last solve, or ``None`` if the model has not been solved yet. - Returns a :class:`~linopy.constants.SolverMetrics` with fields - ``solver_name``, ``solve_time``, ``objective_value``, - ``best_bound``, and ``mip_gap``. Fields the solver cannot - provide remain ``None``. + Returns a :class:`~linopy.constants.SolverMetrics` instance. + Fields the solver cannot provide remain ``None``. Reset to ``None`` by :meth:`reset_solution`. diff --git a/linopy/solvers.py b/linopy/solvers.py index 140f1f0d..c4a24ec2 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -966,7 +966,7 @@ def _highs_info(key: str) -> float: metrics, solve_time=_safe_get(lambda: h.getRunTime()), mip_gap=_safe_get(lambda: _highs_info("mip_gap")), - best_bound=_safe_get(lambda: _highs_info("mip_dual_bound")), + dual_bound=_safe_get(lambda: _highs_info("mip_dual_bound")), ) def _set_solver_params( @@ -1218,7 +1218,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.Runtime), - best_bound=_safe_get(lambda: m.ObjBound), + dual_bound=_safe_get(lambda: m.ObjBound), mip_gap=_safe_get(lambda: m.MIPGap), ) @@ -1353,7 +1353,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.solution.progress.get_time()), - best_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()), + dual_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()), mip_gap=_safe_get(lambda: m.solution.MIP.get_mip_relative_gap()), ) @@ -1516,7 +1516,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.getSolvingTime()), - best_bound=_safe_get(lambda: m.getDualbound()), + dual_bound=_safe_get(lambda: m.getDualbound()), mip_gap=_safe_get(lambda: m.getGap()), ) @@ -1683,7 +1683,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.attributes.time), - best_bound=_safe_get(lambda: m.attributes.bestbound), + dual_bound=_safe_get(lambda: m.attributes.bestbound), mip_gap=_safe_get(lambda: m.attributes.miprelgap), ) @@ -2217,7 +2217,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.SolvingTime), - best_bound=_safe_get(lambda: m.BestBnd), + dual_bound=_safe_get(lambda: m.BestBnd), mip_gap=_safe_get(lambda: m.BestGap), ) diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index 2118f5ab..78220c88 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -23,7 +23,7 @@ def test_solver_metrics_defaults() -> None: assert m.solver_name is None assert m.solve_time is None assert m.objective_value is None - assert m.best_bound is None + assert m.dual_bound is None assert m.mip_gap is None @@ -40,7 +40,7 @@ def test_solver_metrics_repr_only_non_none() -> None: assert "solver_name='gurobi'" in r assert "solve_time=2.3" in r assert "objective_value" not in r - assert "best_bound" not in r + assert "dual_bound" not in r def test_solver_metrics_repr_empty() -> None: @@ -179,7 +179,7 @@ def test_solver_metrics_file_io(solver: str) -> None: @pytest.mark.parametrize("solver", mip_solvers) def test_solver_metrics_mip(solver: str) -> None: - """Solve a MIP and verify mip_gap and best_bound are populated.""" + """Solve a MIP and verify mip_gap and dual_bound are populated.""" m = _make_simple_mip() if solver in direct_solvers: m.solve(solver_name=solver, io_api="direct") @@ -193,5 +193,5 @@ def test_solver_metrics_mip(solver: str) -> None: assert metrics.solve_time >= 0 assert metrics.mip_gap is not None assert metrics.mip_gap >= 0 - assert metrics.best_bound is not None - assert isinstance(metrics.best_bound, float) + assert metrics.dual_bound is not None + assert isinstance(metrics.dual_bound, float) From f135e9b590355113811060c05c87708db98afb4e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 19:59:38 +0100 Subject: [PATCH 14/19] FIx testable solvers and remove for others --- linopy/solvers.py | 42 +++++++++++++++--------------------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index c4a24ec2..817f79d0 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -15,6 +15,7 @@ import subprocess as sub import sys import threading +import time import warnings from abc import ABC, abstractmethod from collections import namedtuple @@ -1352,7 +1353,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri metrics = super()._extract_metrics(solver_model, solution) return dataclasses.replace( metrics, - solve_time=_safe_get(lambda: m.solution.progress.get_time()), + solve_time=_safe_get(lambda: self._solve_time), dual_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()), mip_gap=_safe_get(lambda: m.solution.MIP.get_mip_relative_gap()), ) @@ -1446,8 +1447,10 @@ def solve_problem_from_file( is_lp = m.problem_type[m.get_problem_type()] == "LP" + _t0 = time.perf_counter() with contextlib.suppress(cplex.exceptions.errors.CplexSolverError): m.solve() + self._solve_time = time.perf_counter() - _t0 if solution_fn is not None: try: @@ -1680,11 +1683,19 @@ def __init__( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model metrics = super()._extract_metrics(solver_model, solution) + + def _xpress_mip_gap() -> float | None: + obj = m.attributes.mipbestobjval + bound = m.attributes.bestbound + if obj == 0: + return 0.0 if bound == 0 else None + return abs(obj - bound) / abs(obj) + return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.attributes.time), dual_bound=_safe_get(lambda: m.attributes.bestbound), - mip_gap=_safe_get(lambda: m.attributes.miprelgap), + mip_gap=_safe_get(_xpress_mip_gap), ) def solve_problem_from_model( @@ -1874,6 +1885,8 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.getdouinf(mosek.dinfitem.optimizer_time)), + dual_bound=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_bound)), + mip_gap=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_rel_gap)), ) def solve_problem_from_model( @@ -2211,16 +2224,6 @@ def __init( ) -> None: super().__init__(**solver_options) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: - m = solver_model - metrics = super()._extract_metrics(solver_model, solution) - return dataclasses.replace( - metrics, - solve_time=_safe_get(lambda: m.SolvingTime), - dual_bound=_safe_get(lambda: m.BestBnd), - mip_gap=_safe_get(lambda: m.BestGap), - ) - def solve_problem_from_model( self, model: Model, @@ -2363,14 +2366,6 @@ def __init( ) -> None: super().__init__(**solver_options) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: - m = solver_model - metrics = super()._extract_metrics(solver_model, solution) - return dataclasses.replace( - metrics, - solve_time=_safe_get(lambda: m.getAttr("SolvingTime")), - ) - def solve_problem_from_model( self, model: Model, @@ -2536,13 +2531,6 @@ def __init__( ) -> None: super().__init__(**solver_options) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: - metrics = super()._extract_metrics(solver_model, solution) - return dataclasses.replace( - metrics, - solve_time=_safe_get(lambda: solver_model.SolveTime), - ) - def solve_problem_from_file( self, problem_fn: Path, From 7135f1498a9e95950e330945caea51ca53df287f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 20:13:59 +0100 Subject: [PATCH 15/19] Scope metrics tests to solvers with tested overrides Only parametrize over solvers with _extract_metrics overrides (gurobi, highs, scip, cplex, xpress, mosek), so solvers with base-only metrics (glpk, copt, cbc) don't fail on solve_time. --- test/test_solver_metrics.py | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py index 78220c88..e6bcee4e 100644 --- a/test/test_solver_metrics.py +++ b/test/test_solver_metrics.py @@ -120,15 +120,30 @@ def test_model_metrics_reset() -> None: # Solver-specific integration tests (parametrized over available solvers) # --------------------------------------------------------------------------- -direct_solvers = get_available_solvers_with_feature( - SolverFeature.DIRECT_API, available_solvers -) -file_io_solvers = get_available_solvers_with_feature( - SolverFeature.READ_MODEL_FROM_FILE, available_solvers -) -mip_solvers = get_available_solvers_with_feature( - SolverFeature.INTEGER_VARIABLES, available_solvers -) +# Solvers that have a tested _extract_metrics override providing solve_time etc. +_solvers_with_metrics = {"gurobi", "highs", "scip", "cplex", "xpress", "mosek"} + +direct_solvers = [ + s + for s in get_available_solvers_with_feature( + SolverFeature.DIRECT_API, available_solvers + ) + if s in _solvers_with_metrics +] +file_io_solvers = [ + s + for s in get_available_solvers_with_feature( + SolverFeature.READ_MODEL_FROM_FILE, available_solvers + ) + if s in _solvers_with_metrics +] +mip_solvers = [ + s + for s in get_available_solvers_with_feature( + SolverFeature.INTEGER_VARIABLES, available_solvers + ) + if s in _solvers_with_metrics +] def _make_simple_lp() -> Model: From 45f81279b40b3a08aebb2935dcb5e3ba365c2f49 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 21:49:44 +0100 Subject: [PATCH 16/19] Add pragma no cover for test depending on solvers not in CI --- linopy/solvers.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index 817f79d0..70c86901 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -1213,7 +1213,9 @@ def solve_problem_from_file( sense=sense, ) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) return dataclasses.replace( @@ -1348,7 +1350,9 @@ def __init__( ) -> None: super().__init__(**solver_options) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) return dataclasses.replace( @@ -1447,10 +1451,10 @@ def solve_problem_from_file( is_lp = m.problem_type[m.get_problem_type()] == "LP" - _t0 = time.perf_counter() + _t0 = time.perf_counter() # pragma: no cover with contextlib.suppress(cplex.exceptions.errors.CplexSolverError): m.solve() - self._solve_time = time.perf_counter() - _t0 + self._solve_time = time.perf_counter() - _t0 # pragma: no cover if solution_fn is not None: try: @@ -1680,7 +1684,9 @@ def __init__( ) -> None: super().__init__(**solver_options) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) @@ -1879,7 +1885,9 @@ def __init__( ) -> None: super().__init__(**solver_options) - def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) return dataclasses.replace( From 34549f8765d44ecb945fdf899da918f4cea092f4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 22:28:57 +0100 Subject: [PATCH 17/19] Add pragma no cover for test depending on solvers not in CI --- linopy/solvers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index 70c86901..4f8ee37e 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -958,7 +958,7 @@ def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetri def _highs_info(key: str) -> float: status, val = h.getInfoValue(key) - if status != highspy.HighsStatus.kOk: + if status != highspy.HighsStatus.kOk: # pragma: no cover msg = f"Failed to get HiGHS info: {key}" raise RuntimeError(msg) return val From 21f555efb10eb5f0fd1e45353dce24b1fd58b81c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Feb 2026 22:43:35 +0100 Subject: [PATCH 18/19] Add to notebook --- examples/create-a-model.ipynb | 175 +++++++++++++++++++++++++--------- 1 file changed, 132 insertions(+), 43 deletions(-) diff --git a/examples/create-a-model.ipynb b/examples/create-a-model.ipynb index a158e0cf..6b476ac2 100644 --- a/examples/create-a-model.ipynb +++ b/examples/create-a-model.ipynb @@ -30,11 +30,16 @@ }, { "cell_type": "code", - "execution_count": null, "id": "dramatic-cannon", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:38.164407Z", + "start_time": "2026-02-11T21:42:38.162992Z" + } + }, + "source": [], "outputs": [], - "source": [] + "execution_count": null }, { "attachments": {}, @@ -49,15 +54,20 @@ }, { "cell_type": "code", - "execution_count": null, "id": "technical-conducting", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.360058Z", + "start_time": "2026-02-11T21:42:38.171827Z" + } + }, "source": [ "from linopy import Model\n", "\n", "m = Model()" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -83,14 +93,19 @@ }, { "cell_type": "code", - "execution_count": null, "id": "protecting-power", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.387467Z", + "start_time": "2026-02-11T21:42:39.384712Z" + } + }, "source": [ "x = m.add_variables(lower=0, name=\"x\")\n", "y = m.add_variables(lower=0, name=\"y\");" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -103,13 +118,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "virtual-anxiety", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.393709Z", + "start_time": "2026-02-11T21:42:39.390438Z" + } + }, "source": [ "x" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -127,13 +147,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "fbb46cad", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.405691Z", + "start_time": "2026-02-11T21:42:39.396625Z" + } + }, "source": [ "3 * x + 7 * y >= 10" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -146,13 +171,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "60f41b76", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.416325Z", + "start_time": "2026-02-11T21:42:39.409117Z" + } + }, "source": [ "3 * x + 7 * y - 10 >= 0" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -167,14 +197,19 @@ }, { "cell_type": "code", - "execution_count": null, "id": "hollywood-production", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.431755Z", + "start_time": "2026-02-11T21:42:39.420977Z" + } + }, "source": [ "m.add_constraints(3 * x + 7 * y >= 10)\n", "m.add_constraints(5 * x + 2 * y >= 3);" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -189,13 +224,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "overall-exhibition", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.438865Z", + "start_time": "2026-02-11T21:42:39.434328Z" + } + }, "source": [ "m.add_objective(x + 2 * y)" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -210,13 +250,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "pressing-copying", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.532619Z", + "start_time": "2026-02-11T21:42:39.441886Z" + } + }, "source": [ "m.solve(solver_name=\"highs\")" - ] + ], + "outputs": [], + "execution_count": null }, { "attachments": {}, @@ -229,23 +274,67 @@ }, { "cell_type": "code", - "execution_count": null, "id": "electric-duration", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.560199Z", + "start_time": "2026-02-11T21:42:39.553844Z" + } + }, "source": [ "x.solution" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "e6d31751", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.577784Z", + "start_time": "2026-02-11T21:42:39.573362Z" + } + }, "source": [ "y.solution" - ] + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "9zgzuhvo1b8", + "source": [ + "### Solver Metrics\n", + "\n", + "After solving, you can inspect performance metrics reported by the solver via `solver_metrics`. This includes solve time, objective value, and for MIP problems, the dual bound and MIP gap (available for most solvers." + ], + "metadata": {} + }, + { + "cell_type": "code", + "id": "bdfxi7haoc", + "source": "m.solver_metrics", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-11T21:42:39.592065Z", + "start_time": "2026-02-11T21:42:39.589851Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "SolverMetrics(solver_name='highs', solve_time=0.0019101661164313555, objective_value=2.862068965517241, dual_bound=0.0, mip_gap=inf)" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "attachments": {}, From 032e098cd91b8594e32c6d8c83075905044bbac2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:57:40 +0100 Subject: [PATCH 19/19] Fix Highs Metrics if LP --- linopy/solvers.py | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/linopy/solvers.py b/linopy/solvers.py index 4f8ee37e..c82f4a22 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -963,11 +963,15 @@ def _highs_info(key: str) -> float: raise RuntimeError(msg) return val + is_mip = _safe_get(lambda: _highs_info("mip_node_count")) not in (None, -1) + return dataclasses.replace( metrics, solve_time=_safe_get(lambda: h.getRunTime()), - mip_gap=_safe_get(lambda: _highs_info("mip_gap")), - dual_bound=_safe_get(lambda: _highs_info("mip_dual_bound")), + mip_gap=_safe_get(lambda: _highs_info("mip_gap")) if is_mip else None, + dual_bound=_safe_get(lambda: _highs_info("mip_dual_bound")) + if is_mip + else None, ) def _set_solver_params( @@ -1218,11 +1222,12 @@ def _extract_metrics( ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.IsMIP) == 1 return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.Runtime), - dual_bound=_safe_get(lambda: m.ObjBound), - mip_gap=_safe_get(lambda: m.MIPGap), + dual_bound=_safe_get(lambda: m.ObjBound) if is_mip else None, + mip_gap=_safe_get(lambda: m.MIPGap) if is_mip else None, ) def _solve( @@ -1355,11 +1360,16 @@ def _extract_metrics( ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.problem_type[m.get_problem_type()] != "LP") return dataclasses.replace( metrics, solve_time=_safe_get(lambda: self._solve_time), - dual_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()), - mip_gap=_safe_get(lambda: m.solution.MIP.get_mip_relative_gap()), + dual_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()) + if is_mip + else None, + mip_gap=_safe_get(lambda: m.solution.MIP.get_mip_relative_gap()) + if is_mip + else None, ) def solve_problem_from_model( @@ -1520,11 +1530,12 @@ def __init__( def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: m = solver_model metrics = super()._extract_metrics(solver_model, solution) + is_mip = getattr(self, "_is_mip", False) return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.getSolvingTime()), - dual_bound=_safe_get(lambda: m.getDualbound()), - mip_gap=_safe_get(lambda: m.getGap()), + dual_bound=_safe_get(lambda: m.getDualbound()) if is_mip else None, + mip_gap=_safe_get(lambda: m.getGap()) if is_mip else None, ) def solve_problem_from_model( @@ -1618,6 +1629,7 @@ def solve_problem_from_file( if warmstart_fn: logger.warning("Warmstart not implemented for SCIP") + self._is_mip = m.getNIntVars() + m.getNBinVars() > 0 m.optimize() if basis_fn: @@ -1689,6 +1701,7 @@ def _extract_metrics( ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.attributes.mipents) not in (None, 0) def _xpress_mip_gap() -> float | None: obj = m.attributes.mipbestobjval @@ -1700,8 +1713,8 @@ def _xpress_mip_gap() -> float | None: return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.attributes.time), - dual_bound=_safe_get(lambda: m.attributes.bestbound), - mip_gap=_safe_get(_xpress_mip_gap), + dual_bound=_safe_get(lambda: m.attributes.bestbound) if is_mip else None, + mip_gap=_safe_get(_xpress_mip_gap) if is_mip else None, ) def solve_problem_from_model( @@ -1890,11 +1903,16 @@ def _extract_metrics( ) -> SolverMetrics: # pragma: no cover m = solver_model metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.getnumintvar()) not in (None, 0) return dataclasses.replace( metrics, solve_time=_safe_get(lambda: m.getdouinf(mosek.dinfitem.optimizer_time)), - dual_bound=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_bound)), - mip_gap=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_rel_gap)), + dual_bound=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_bound)) + if is_mip + else None, + mip_gap=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_rel_gap)) + if is_mip + else None, ) def solve_problem_from_model(