You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
02/10 19:02:07 - mmengine - INFO - Checkpoints will be saved to /gemini/prun.
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1053: UserWarning: Using a non-full backward hook when the forward contains multiple autograd Nodes is deprecated and will be removed in future versions. This hook will be missing some grad_input. Please use register_full_backward_hook to get the documented behavior.
warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
Traceback (most recent call last):
File "tools/train.py", line 121, in
main()
File "tools/train.py", line 117, in main
runner.train()
File "/usr/local/lib/python3.8/dist-packages/mmengine/runner/runner.py", line 1706, in train
model = self.train_loop.run() # type: ignore
File "/usr/local/lib/python3.8/dist-packages/mmengine/runner/loops.py", line 278, in run
self.run_iter(data_batch)
File "/usr/local/lib/python3.8/dist-packages/mmengine/runner/loops.py", line 301, in run_iter
outputs = self.runner.model.train_step(
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/algorithm.py", line 61, in train_step
return self._train_step(data, optim_wrapper)
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/algorithm.py", line 69, in _train_step
self.mutator.update_imp()
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/mutator.py", line 82, in update_imp
unit.update_fisher_info()
File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/unit.py", line 144, in update_fisher_info
batch_fisher_sum = self.current_batch_fisher
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/unit.py", line 158, in current_batch_fisher
fisher = fisher + self._fisher_of_a_module(module)
File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/unit.py", line 171, in _fisher_of_a_module
assert len(module.recorded_input) > 0 and \
Describe the bug
02/10 19:02:07 - mmengine - INFO - Checkpoints will be saved to /gemini/prun.
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1053: UserWarning: Using a non-full backward hook when the forward contains multiple autograd Nodes is deprecated and will be removed in future versions. This hook will be missing some grad_input. Please use register_full_backward_hook to get the documented behavior.
warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
Traceback (most recent call last):
File "tools/train.py", line 121, in
main()
File "tools/train.py", line 117, in main
runner.train()
File "/usr/local/lib/python3.8/dist-packages/mmengine/runner/runner.py", line 1706, in train
model = self.train_loop.run() # type: ignore
File "/usr/local/lib/python3.8/dist-packages/mmengine/runner/loops.py", line 278, in run
self.run_iter(data_batch)
File "/usr/local/lib/python3.8/dist-packages/mmengine/runner/loops.py", line 301, in run_iter
outputs = self.runner.model.train_step(
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/algorithm.py", line 61, in train_step
return self._train_step(data, optim_wrapper)
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/algorithm.py", line 69, in _train_step
self.mutator.update_imp()
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/mutator.py", line 82, in update_imp
unit.update_fisher_info()
File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/unit.py", line 144, in update_fisher_info
batch_fisher_sum = self.current_batch_fisher
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/unit.py", line 158, in current_batch_fisher
fisher = fisher + self._fisher_of_a_module(module)
File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/gemini/code/mmrazor/mmrazor/implementations/pruning/group_fisher/unit.py", line 171, in _fisher_of_a_module
assert len(module.recorded_input) > 0 and \
[here]
To Reproduce
python tools/train.py /gemini/code/mmrazor/configs/pruning/mmdet/group_fisher/degnet/prun.py
Post related information
I have noticed that someone has already raised similar issues but has not provided a solution. I hope you can help me solve this problem, thank you
The text was updated successfully, but these errors were encountered: