diff --git a/src/cabinetry/fit/__init__.py b/src/cabinetry/fit/__init__.py index e8d09de4..4fbd1abc 100644 --- a/src/cabinetry/fit/__init__.py +++ b/src/cabinetry/fit/__init__.py @@ -81,6 +81,7 @@ def _fit_model_pyhf( Returns: FitResults: object storing relevant fit results """ + _, initial_optimizer = pyhf.get_backend() # store initial optimizer settings pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1)) # strategy=None is currently not supported in pyhf @@ -124,7 +125,7 @@ def _fit_model_pyhf( best_twice_nll, minos_uncertainty=minos_results, ) - + pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings return fit_results @@ -174,6 +175,7 @@ def _fit_model_custom( Returns: FitResults: object storing relevant fit results """ + _, initial_optimizer = pyhf.get_backend() # store initial optimizer settings pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1)) # use parameter settings provided in function arguments if they exist, else defaults @@ -236,7 +238,7 @@ def twice_nll_func(pars: np.ndarray) -> Any: best_twice_nll, minos_uncertainty=minos_results, ) - + pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings return fit_results @@ -774,6 +776,7 @@ def limit( Returns: LimitResults: observed and expected limits, CLs values, and scanned points """ + _, initial_optimizer = pyhf.get_backend() # store initial optimizer settings pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1)) # use POI given by kwarg, fall back to POI specified in model @@ -984,6 +987,7 @@ def _cls_minus_threshold( poi_arr, confidence_level, ) + pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings return limit_results @@ -1012,6 +1016,7 @@ def significance( Returns: SignificanceResults: observed and expected p-values and significances """ + _, initial_optimizer = pyhf.get_backend() # store initial optimizer settings pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1)) log.info("calculating discovery significance") @@ -1044,4 +1049,5 @@ def significance( significance_results = SignificanceResults( obs_p_val, obs_significance, exp_p_val, exp_significance ) + pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings return significance_results diff --git a/tests/fit/test_fit.py b/tests/fit/test_fit.py index 6d40b534..5c4ea9ac 100644 --- a/tests/fit/test_fit.py +++ b/tests/fit/test_fit.py @@ -4,6 +4,7 @@ import iminuit import numpy as np +import pyhf import pytest from cabinetry import fit @@ -29,6 +30,7 @@ def test_print_results(caplog): @pytest.mark.filterwarnings("ignore::RuntimeWarning") @mock.patch("cabinetry.fit._run_minos", return_value={"Signal strength": (-0.1, 0.2)}) def test__fit_model_pyhf(mock_minos, example_spec, example_spec_multibin): + pyhf.set_backend("numpy", "scipy") model, data = model_utils.model_and_data(example_spec) fit_results = fit._fit_model_pyhf(model, data) assert np.allclose(fit_results.bestfit, [8.33624084, 1.1]) @@ -36,6 +38,7 @@ def test__fit_model_pyhf(mock_minos, example_spec, example_spec_multibin): assert fit_results.labels == ["Signal strength", "staterror_Signal-Region[0]"] assert np.allclose(fit_results.best_twice_nll, 7.82495235) assert np.allclose(fit_results.corr_mat, [[1.0, 0.0], [0.0, 0.0]]) + assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset # Asimov fit, with fixed gamma (fixed not to Asimov MLE) model, data = model_utils.model_and_data(example_spec, asimov=True) @@ -104,6 +107,7 @@ def test__fit_model_pyhf(mock_minos, example_spec, example_spec_multibin): @pytest.mark.filterwarnings("ignore::RuntimeWarning") @mock.patch("cabinetry.fit._run_minos", return_value={"Signal strength": (-0.1, 0.2)}) def test__fit_model_custom(mock_minos, example_spec, example_spec_multibin): + pyhf.set_backend("numpy", "scipy") model, data = model_utils.model_and_data(example_spec) fit_results = fit._fit_model_custom(model, data) assert np.allclose(fit_results.bestfit, [8.33625071, 1.1]) @@ -111,6 +115,7 @@ def test__fit_model_custom(mock_minos, example_spec, example_spec_multibin): assert fit_results.labels == ["Signal strength", "staterror_Signal-Region[0]"] assert np.allclose(fit_results.best_twice_nll, 7.82495235) assert np.allclose(fit_results.corr_mat, [[1.0, 0.0], [0.0, 0.0]]) + assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset # Asimov fit, with fixed gamma (fixed not to Asimov MLE) model, data = model_utils.model_and_data(example_spec, asimov=True) @@ -659,6 +664,7 @@ def test_scan(mock_fit, example_spec): def test_limit(example_spec_with_background, caplog): caplog.set_level(logging.DEBUG) + pyhf.set_backend("numpy", "scipy") # expected values for results observed_limit = 0.749 @@ -686,6 +692,7 @@ def test_limit(example_spec_with_background, caplog): assert limit_results.confidence_level == 0.95 # verify that POI values are sorted assert np.allclose(limit_results.poi_values, sorted(limit_results.poi_values)) + assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset caplog.clear() # access negative POI values with lower bracket below zero @@ -780,6 +787,7 @@ def test_limit(example_spec_with_background, caplog): def test_significance(example_spec_with_background): + pyhf.set_backend("numpy", "scipy") # increase observed data for smaller observed p-value example_spec_with_background["observations"][0]["data"] = [196] @@ -789,6 +797,7 @@ def test_significance(example_spec_with_background): assert np.allclose(significance_results.observed_significance, 3.15402672) assert np.allclose(significance_results.expected_p_value, 0.00033333) assert np.allclose(significance_results.expected_significance, 3.40293444) + assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset # reduce signal for larger expected p-value example_spec_with_background["channels"][0]["samples"][0]["data"] = [30] diff --git a/tests/test_integration.py b/tests/test_integration.py index 2899bc39..d6e16c1a 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -193,7 +193,7 @@ def test_integration(tmp_path, ntuple_creator, caplog): assert np.allclose( prediction_postfit.total_stdev_model_channels, [[41.043814, 45.814417, 20.439575]], - atol=2e-3, + atol=5e-3, ) _ = cabinetry.visualize.data_mc(prediction_postfit, data, close_figure=True)