From 132250a0d594f3d1abc865523bf1959a60a84a2f Mon Sep 17 00:00:00 2001 From: linyiqi Date: Thu, 30 Dec 2021 15:49:53 +0800 Subject: [PATCH] [Fix] add eval hook resume test code --- mmfewshot/classification/core/evaluation/eval_hooks.py | 10 +++------- .../test_meta_test_eval_hook.py | 2 +- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/mmfewshot/classification/core/evaluation/eval_hooks.py b/mmfewshot/classification/core/evaluation/eval_hooks.py index d324025..e088be5 100644 --- a/mmfewshot/classification/core/evaluation/eval_hooks.py +++ b/mmfewshot/classification/core/evaluation/eval_hooks.py @@ -86,8 +86,8 @@ def before_run(self, runner: Runner) -> None: warnings.warn('runner.meta is None. Creating an empty one.') runner.meta = dict() runner.meta.setdefault('hook_msgs', dict()) - self.best_score = runner.meta.get('best_score', 0.0) - if self.best_score > 0.0: + if runner.meta['hook_msgs'].get('best_score', False): + self.best_score = runner.meta['hook_msgs']['best_score'] runner.logger.info( f'Previous best score is: {self.best_score}.') self.best_ckpt_path = runner.meta['hook_msgs'].get( @@ -140,7 +140,6 @@ def _save_ckpt(self, runner: Runner, key_score: float) -> None: if self.best_score < key_score: self.best_score = key_score - runner.meta['best_score'] = self.best_score runner.meta['hook_msgs']['best_score'] = self.best_score runner.meta['hook_msgs']['ckpt_time'] = current @@ -152,10 +151,7 @@ def _save_ckpt(self, runner: Runner, key_score: float) -> None: runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path runner.save_checkpoint( - runner.work_dir, - best_ckpt_name, - meta={'best_score': self.best_score}, - create_symlink=False) + runner.work_dir, best_ckpt_name, create_symlink=False) runner.logger.info( f'Now best checkpoint is saved as {best_ckpt_name}.') runner.logger.info( diff --git a/tests/test_classification_runtime/test_meta_test_eval_hook.py b/tests/test_classification_runtime/test_meta_test_eval_hook.py index 2b3c626..c2c7039 100644 --- a/tests/test_classification_runtime/test_meta_test_eval_hook.py +++ b/tests/test_classification_runtime/test_meta_test_eval_hook.py @@ -213,6 +213,6 @@ def test_resume_eval_hook(): logger=logging.getLogger(), max_epochs=1) runner.register_hook(eval_hook) - runner.meta = {'best_score': 99.0} + runner.meta = {'hook_msgs': {'best_score': 99.0}} runner.run([loader], [('train', 1)], 1) assert eval_hook.best_score == 99.0