Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

feat: restore optimizer settings after fits #362

Merged
merged 3 commits into from
Aug 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions src/cabinetry/fit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def _fit_model_pyhf(
Returns:
FitResults: object storing relevant fit results
"""
_, initial_optimizer = pyhf.get_backend() # store initial optimizer settings
pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1))

# strategy=None is currently not supported in pyhf
Expand Down Expand Up @@ -124,7 +125,7 @@ def _fit_model_pyhf(
best_twice_nll,
minos_uncertainty=minos_results,
)

pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings
return fit_results


Expand Down Expand Up @@ -174,6 +175,7 @@ def _fit_model_custom(
Returns:
FitResults: object storing relevant fit results
"""
_, initial_optimizer = pyhf.get_backend() # store initial optimizer settings
pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1))

# use parameter settings provided in function arguments if they exist, else defaults
Expand Down Expand Up @@ -236,7 +238,7 @@ def twice_nll_func(pars: np.ndarray) -> Any:
best_twice_nll,
minos_uncertainty=minos_results,
)

pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings
return fit_results


Expand Down Expand Up @@ -774,6 +776,7 @@ def limit(
Returns:
LimitResults: observed and expected limits, CLs values, and scanned points
"""
_, initial_optimizer = pyhf.get_backend() # store initial optimizer settings
pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1))

# use POI given by kwarg, fall back to POI specified in model
Expand Down Expand Up @@ -984,6 +987,7 @@ def _cls_minus_threshold(
poi_arr,
confidence_level,
)
pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings
return limit_results


Expand Down Expand Up @@ -1012,6 +1016,7 @@ def significance(
Returns:
SignificanceResults: observed and expected p-values and significances
"""
_, initial_optimizer = pyhf.get_backend() # store initial optimizer settings
pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(verbose=1))

log.info("calculating discovery significance")
Expand Down Expand Up @@ -1044,4 +1049,5 @@ def significance(
significance_results = SignificanceResults(
obs_p_val, obs_significance, exp_p_val, exp_significance
)
pyhf.set_backend(pyhf.tensorlib, initial_optimizer) # restore optimizer settings
return significance_results
9 changes: 9 additions & 0 deletions tests/fit/test_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import iminuit
import numpy as np
import pyhf
import pytest

from cabinetry import fit
Expand All @@ -29,13 +30,15 @@ def test_print_results(caplog):
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
@mock.patch("cabinetry.fit._run_minos", return_value={"Signal strength": (-0.1, 0.2)})
def test__fit_model_pyhf(mock_minos, example_spec, example_spec_multibin):
pyhf.set_backend("numpy", "scipy")
model, data = model_utils.model_and_data(example_spec)
fit_results = fit._fit_model_pyhf(model, data)
assert np.allclose(fit_results.bestfit, [8.33624084, 1.1])
assert np.allclose(fit_results.uncertainty, [0.38182003, 0.0])
assert fit_results.labels == ["Signal strength", "staterror_Signal-Region[0]"]
assert np.allclose(fit_results.best_twice_nll, 7.82495235)
assert np.allclose(fit_results.corr_mat, [[1.0, 0.0], [0.0, 0.0]])
assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset

# Asimov fit, with fixed gamma (fixed not to Asimov MLE)
model, data = model_utils.model_and_data(example_spec, asimov=True)
Expand Down Expand Up @@ -104,13 +107,15 @@ def test__fit_model_pyhf(mock_minos, example_spec, example_spec_multibin):
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
@mock.patch("cabinetry.fit._run_minos", return_value={"Signal strength": (-0.1, 0.2)})
def test__fit_model_custom(mock_minos, example_spec, example_spec_multibin):
pyhf.set_backend("numpy", "scipy")
model, data = model_utils.model_and_data(example_spec)
fit_results = fit._fit_model_custom(model, data)
assert np.allclose(fit_results.bestfit, [8.33625071, 1.1])
assert np.allclose(fit_results.uncertainty, [0.38182151, 0.0])
assert fit_results.labels == ["Signal strength", "staterror_Signal-Region[0]"]
assert np.allclose(fit_results.best_twice_nll, 7.82495235)
assert np.allclose(fit_results.corr_mat, [[1.0, 0.0], [0.0, 0.0]])
assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset

# Asimov fit, with fixed gamma (fixed not to Asimov MLE)
model, data = model_utils.model_and_data(example_spec, asimov=True)
Expand Down Expand Up @@ -659,6 +664,7 @@ def test_scan(mock_fit, example_spec):

def test_limit(example_spec_with_background, caplog):
caplog.set_level(logging.DEBUG)
pyhf.set_backend("numpy", "scipy")

# expected values for results
observed_limit = 0.749
Expand Down Expand Up @@ -686,6 +692,7 @@ def test_limit(example_spec_with_background, caplog):
assert limit_results.confidence_level == 0.95
# verify that POI values are sorted
assert np.allclose(limit_results.poi_values, sorted(limit_results.poi_values))
assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset
caplog.clear()

# access negative POI values with lower bracket below zero
Expand Down Expand Up @@ -780,6 +787,7 @@ def test_limit(example_spec_with_background, caplog):


def test_significance(example_spec_with_background):
pyhf.set_backend("numpy", "scipy")
# increase observed data for smaller observed p-value
example_spec_with_background["observations"][0]["data"] = [196]

Expand All @@ -789,6 +797,7 @@ def test_significance(example_spec_with_background):
assert np.allclose(significance_results.observed_significance, 3.15402672)
assert np.allclose(significance_results.expected_p_value, 0.00033333)
assert np.allclose(significance_results.expected_significance, 3.40293444)
assert pyhf.get_backend()[1].name == "scipy" # optimizer was reset

# reduce signal for larger expected p-value
example_spec_with_background["channels"][0]["samples"][0]["data"] = [30]
Expand Down
2 changes: 1 addition & 1 deletion tests/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def test_integration(tmp_path, ntuple_creator, caplog):
assert np.allclose(
prediction_postfit.total_stdev_model_channels,
[[41.043814, 45.814417, 20.439575]],
atol=2e-3,
atol=5e-3,
)
_ = cabinetry.visualize.data_mc(prediction_postfit, data, close_figure=True)

Expand Down