Source code for nemo.core.classes.module

# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.

from contextlib import contextmanager

from torch.nn import Module

from nemo.core.classes.common import FileIO, Serialization, Typing

__all__ = ['NeuralModule']

[docs]class NeuralModule(Module, Typing, Serialization, FileIO): """ Abstract class offering interface shared between all PyTorch Neural Modules. """ @property def num_weights(self): """ Utility property that returns the total number of parameters of NeuralModule. """ num: int = 0 for p in self.parameters(): if p.requires_grad: num += p.numel() return num
[docs] def input_example(self, max_batch=None, max_dim=None): """ Override this method if random inputs won't work Returns: A tuple sample of valid input data. """ return None
[docs] def freeze(self) -> None: r""" Freeze all params for inference. """ for param in self.parameters(): param.requires_grad = False self.eval()
[docs] def unfreeze(self) -> None: """ Unfreeze all parameters for training. """ for param in self.parameters(): param.requires_grad = True self.train()
[docs] @contextmanager def as_frozen(self): """ Context manager which temporarily freezes a module, yields control and finally unfreezes the module. """ training_mode = grad_map = {} for pname, param in self.named_parameters(): grad_map[pname] = param.requires_grad self.freeze() try: yield finally: self.unfreeze() for pname, param in self.named_parameters(): param.requires_grad = grad_map[pname] if training_mode: self.train() else: self.eval()