PhysicsNeMo Sym Hydra#
hydra.arch#
Architecture/Model configs
- class physicsnemo.sym.hydra.arch.AFNOConf(
- arch_type: str = 'afno',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- img_shape: Tuple[int] = '???',
- patch_size: int = 16,
- embed_dim: int = 256,
- depth: int = 4,
- num_blocks: int = 8,
Bases:
ModelConf
- arch_type: str = 'afno'#
- depth: int = 4#
- embed_dim: int = 256#
- img_shape: Tuple[int] = '???'#
- num_blocks: int = 8#
- patch_size: int = 16#
- class physicsnemo.sym.hydra.arch.ConvFullyConnectedConf(
- arch_type: str = 'conv_fully_connected',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- activation_fn: str = 'silu',
- adaptive_activations: bool = False,
- weight_norm: bool = True,
Bases:
ModelConf
- activation_fn: str = 'silu'#
- adaptive_activations: bool = False#
- arch_type: str = 'conv_fully_connected'#
- layer_size: int = 512#
- nr_layers: int = 6#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.DeepOConf(
- arch_type: str = 'deeponet',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- trunk_dim: Any = None,
- branch_dim: Any = None,
Bases:
ModelConf
- arch_type: str = 'deeponet'#
- branch_dim: Any = None#
- trunk_dim: Any = None#
- class physicsnemo.sym.hydra.arch.DistributedAFNOConf(
- arch_type: str = 'distributed_afno',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- img_shape: Tuple[int] = '???',
- patch_size: int = 16,
- embed_dim: int = 256,
- depth: int = 4,
- num_blocks: int = 8,
- channel_parallel_inputs: bool = False,
- channel_parallel_outputs: bool = False,
Bases:
ModelConf
- arch_type: str = 'distributed_afno'#
- channel_parallel_inputs: bool = False#
- channel_parallel_outputs: bool = False#
- depth: int = 4#
- embed_dim: int = 256#
- img_shape: Tuple[int] = '???'#
- num_blocks: int = 8#
- patch_size: int = 16#
- class physicsnemo.sym.hydra.arch.FNOConf(
- arch_type: str = 'fno',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- dimension: int = '???',
- nr_fno_layers: int = 4,
- fno_modes: Any = 16,
- padding: int = 8,
- padding_type: str = 'constant',
- activation_fn: str = 'gelu',
- coord_features: bool = True,
Bases:
ModelConf
- activation_fn: str = 'gelu'#
- arch_type: str = 'fno'#
- coord_features: bool = True#
- dimension: int = '???'#
- fno_modes: Any = 16#
- nr_fno_layers: int = 4#
- padding: int = 8#
- padding_type: str = 'constant'#
- class physicsnemo.sym.hydra.arch.FourierConf(
- arch_type: str = 'fourier',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
- frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
- activation_fn: str = 'silu',
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- weight_norm: bool = True,
- adaptive_activations: bool = False,
Bases:
ModelConf
- activation_fn: str = 'silu'#
- adaptive_activations: bool = False#
- arch_type: str = 'fourier'#
- frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"#
- frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"#
- layer_size: int = 512#
- nr_layers: int = 6#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.FullyConnectedConf(
- arch_type: str = 'fully_connected',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- activation_fn: str = 'silu',
- adaptive_activations: bool = False,
- weight_norm: bool = True,
Bases:
ModelConf
- activation_fn: str = 'silu'#
- adaptive_activations: bool = False#
- arch_type: str = 'fully_connected'#
- layer_size: int = 512#
- nr_layers: int = 6#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.FusedFourierNetConf(
- arch_type: str = 'fused_fourier',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 128,
- nr_layers: int = 6,
- activation_fn: str = 'sigmoid',
- n_frequencies: int = 12,
Bases:
ModelConf
- activation_fn: str = 'sigmoid'#
- arch_type: str = 'fused_fourier'#
- layer_size: int = 128#
- n_frequencies: int = 12#
- nr_layers: int = 6#
- class physicsnemo.sym.hydra.arch.FusedGridEncodingNetConf(
- arch_type: str = 'fused_hash_encoding',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 128,
- nr_layers: int = 6,
- activation_fn: str = 'sigmoid',
- indexing: str = 'Hash',
- n_levels: int = 16,
- n_features_per_level: int = 2,
- log2_hashmap_size: int = 19,
- base_resolution: int = 16,
- per_level_scale: float = 2.0,
- interpolation: str = 'Smoothstep',
Bases:
ModelConf
- activation_fn: str = 'sigmoid'#
- arch_type: str = 'fused_hash_encoding'#
- base_resolution: int = 16#
- indexing: str = 'Hash'#
- interpolation: str = 'Smoothstep'#
- layer_size: int = 128#
- log2_hashmap_size: int = 19#
- n_features_per_level: int = 2#
- n_levels: int = 16#
- nr_layers: int = 6#
- per_level_scale: float = 2.0#
- class physicsnemo.sym.hydra.arch.FusedMLPConf(
- arch_type: str = 'fused_fully_connected',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 128,
- nr_layers: int = 6,
- activation_fn: str = 'sigmoid',
Bases:
ModelConf
- activation_fn: str = 'sigmoid'#
- arch_type: str = 'fused_fully_connected'#
- layer_size: int = 128#
- nr_layers: int = 6#
- class physicsnemo.sym.hydra.arch.HighwayFourierConf(
- arch_type: str = 'highway_fourier',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
- frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
- activation_fn: str = 'silu',
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- weight_norm: bool = True,
- adaptive_activations: bool = False,
- transform_fourier_features: bool = True,
- project_fourier_features: bool = False,
Bases:
ModelConf
- activation_fn: str = 'silu'#
- adaptive_activations: bool = False#
- arch_type: str = 'highway_fourier'#
- frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"#
- frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"#
- layer_size: int = 512#
- nr_layers: int = 6#
- project_fourier_features: bool = False#
- skip_connections: bool = False#
- transform_fourier_features: bool = True#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.ModelConf(
- arch_type: str = '???',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
Bases:
object
- arch_type: str = '???'#
- detach_keys: Any = '???'#
- input_keys: Any = '???'#
- output_keys: Any = '???'#
- scaling: Any = None#
- class physicsnemo.sym.hydra.arch.ModifiedFourierConf(
- arch_type: str = 'modified_fourier',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
- frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
- activation_fn: str = 'silu',
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- weight_norm: bool = True,
- adaptive_activations: bool = False,
Bases:
ModelConf
- activation_fn: str = 'silu'#
- adaptive_activations: bool = False#
- arch_type: str = 'modified_fourier'#
- frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"#
- frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"#
- layer_size: int = 512#
- nr_layers: int = 6#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.MultiplicativeFilterConf(
- arch_type: str = 'multiplicative_fourier',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- activation_fn: str = 'identity',
- filter_type: str = 'fourier',
- weight_norm: bool = True,
- input_scale: float = 10.0,
- gabor_alpha: float = 6.0,
- gabor_beta: float = 1.0,
- normalization: Any = None,
Bases:
ModelConf
- activation_fn: str = 'identity'#
- arch_type: str = 'multiplicative_fourier'#
- filter_type: str = 'fourier'#
- gabor_alpha: float = 6.0#
- gabor_beta: float = 1.0#
- input_scale: float = 10.0#
- layer_size: int = 512#
- normalization: Any = None#
- nr_layers: int = 6#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.MultiresolutionHashNetConf(
- arch_type: str = 'hash_encoding',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 64,
- nr_layers: int = 3,
- skip_connections: bool = False,
- weight_norm: bool = True,
- adaptive_activations: bool = False,
- bounds: Any = '[(1.0, 1.0), (1.0, 1.0)]',
- nr_levels: int = 16,
- nr_features_per_level: int = 2,
- log2_hashmap_size: int = 19,
- base_resolution: int = 2,
- finest_resolution: int = 32,
Bases:
ModelConf
- adaptive_activations: bool = False#
- arch_type: str = 'hash_encoding'#
- base_resolution: int = 2#
- bounds: Any = '[(1.0, 1.0), (1.0, 1.0)]'#
- finest_resolution: int = 32#
- layer_size: int = 64#
- log2_hashmap_size: int = 19#
- nr_features_per_level: int = 2#
- nr_layers: int = 3#
- nr_levels: int = 16#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.MultiscaleFourierConf(
- arch_type: str = 'multiscale_fourier',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- frequencies: Any = <factory>,
- frequencies_params: Any = None,
- activation_fn: str = 'silu',
- layer_size: int = 512,
- nr_layers: int = 6,
- skip_connections: bool = False,
- weight_norm: bool = True,
- adaptive_activations: bool = False,
Bases:
ModelConf
- activation_fn: str = 'silu'#
- adaptive_activations: bool = False#
- arch_type: str = 'multiscale_fourier'#
- frequencies: Any#
- frequencies_params: Any = None#
- layer_size: int = 512#
- nr_layers: int = 6#
- skip_connections: bool = False#
- weight_norm: bool = True#
- class physicsnemo.sym.hydra.arch.Pix2PixConf(
- arch_type: str = 'pix2pix',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- dimension: int = '???',
- conv_layer_size: int = 64,
- n_downsampling: int = 3,
- n_blocks: int = 3,
- scaling_factor: int = 1,
- batch_norm: bool = True,
- padding_type: str = 'reflect',
- activation_fn: str = 'relu',
Bases:
ModelConf
- activation_fn: str = 'relu'#
- arch_type: str = 'pix2pix'#
- batch_norm: bool = True#
- conv_layer_size: int = 64#
- dimension: int = '???'#
- n_blocks: int = 3#
- n_downsampling: int = 3#
- padding_type: str = 'reflect'#
- scaling_factor: int = 1#
- class physicsnemo.sym.hydra.arch.SRResConf(
- arch_type: str = 'super_res',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- large_kernel_size: int = 7,
- small_kernel_size: int = 3,
- conv_layer_size: int = 32,
- n_resid_blocks: int = 8,
- scaling_factor: int = 8,
- activation_fn: str = 'prelu',
Bases:
ModelConf
- activation_fn: str = 'prelu'#
- arch_type: str = 'super_res'#
- conv_layer_size: int = 32#
- large_kernel_size: int = 7#
- n_resid_blocks: int = 8#
- scaling_factor: int = 8#
- small_kernel_size: int = 3#
- class physicsnemo.sym.hydra.arch.SirenConf(
- arch_type: str = 'siren',
- input_keys: Any = '???',
- output_keys: Any = '???',
- detach_keys: Any = '???',
- scaling: Any = None,
- layer_size: int = 512,
- nr_layers: int = 6,
- first_omega: float = 30.0,
- omega: float = 30.0,
- normalization: Any = None,
Bases:
ModelConf
- arch_type: str = 'siren'#
- first_omega: float = 30.0#
- layer_size: int = 512#
- normalization: Any = None#
- nr_layers: int = 6#
- omega: float = 30.0#
hydra.config#
PhysicsNeMo main config
- class physicsnemo.sym.hydra.config.DebugPhysicsNeMoConfig(
- network_dir: str = '.',
- initialization_network_dir: str = '',
- save_filetypes: str = 'vtk',
- summary_histograms: str = 'off',
- jit: bool = True,
- jit_use_nvfuser: bool = True,
- jit_arch_mode: str = 'only_activation',
- jit_autograd_nodes: bool = False,
- cuda_graphs: bool = True,
- cuda_graph_warmup: int = 20,
- find_unused_parameters: bool = False,
- broadcast_buffers: bool = False,
- device: str = '',
- debug: bool = True,
- run_mode: str = 'train',
- arch: Any = '???',
- models: Any = '???',
- training: physicsnemo.sym.hydra.training.TrainingConf = '???',
- stop_criterion: physicsnemo.sym.hydra.training.StopCriterionConf = '???',
- loss: physicsnemo.sym.hydra.loss.LossConf = '???',
- optimizer: physicsnemo.sym.hydra.optimizer.OptimizerConf = '???',
- scheduler: physicsnemo.sym.hydra.scheduler.SchedulerConf = '???',
- batch_size: Any = '???',
- profiler: physicsnemo.sym.hydra.profiler.ProfilerConf = '???',
- hydra: Any = <factory>,
- custom: Any = '???',
- defaults: List[Any] = <factory>,
Bases:
PhysicsNeMoConfig
- debug: bool = True#
- defaults: List[Any]#
- class physicsnemo.sym.hydra.config.DefaultPhysicsNeMoConfig(
- network_dir: str = '.',
- initialization_network_dir: str = '',
- save_filetypes: str = 'vtk',
- summary_histograms: str = 'off',
- jit: bool = True,
- jit_use_nvfuser: bool = True,
- jit_arch_mode: str = 'only_activation',
- jit_autograd_nodes: bool = False,
- cuda_graphs: bool = True,
- cuda_graph_warmup: int = 20,
- find_unused_parameters: bool = False,
- broadcast_buffers: bool = False,
- device: str = '',
- debug: bool = False,
- run_mode: str = 'train',
- arch: Any = '???',
- models: Any = '???',
- training: physicsnemo.sym.hydra.training.TrainingConf = '???',
- stop_criterion: physicsnemo.sym.hydra.training.StopCriterionConf = '???',
- loss: physicsnemo.sym.hydra.loss.LossConf = '???',
- optimizer: physicsnemo.sym.hydra.optimizer.OptimizerConf = '???',
- scheduler: physicsnemo.sym.hydra.scheduler.SchedulerConf = '???',
- batch_size: Any = '???',
- profiler: physicsnemo.sym.hydra.profiler.ProfilerConf = '???',
- hydra: Any = <factory>,
- custom: Any = '???',
- defaults: List[Any] = <factory>,
Bases:
PhysicsNeMoConfig
- defaults: List[Any]#
- class physicsnemo.sym.hydra.config.ExperimentalPhysicsNeMoConfig(
- network_dir: str = '.',
- initialization_network_dir: str = '',
- save_filetypes: str = 'vtk',
- summary_histograms: str = 'off',
- jit: bool = True,
- jit_use_nvfuser: bool = True,
- jit_arch_mode: str = 'only_activation',
- jit_autograd_nodes: bool = False,
- cuda_graphs: bool = True,
- cuda_graph_warmup: int = 20,
- find_unused_parameters: bool = False,
- broadcast_buffers: bool = False,
- device: str = '',
- debug: bool = False,
- run_mode: str = 'train',
- arch: Any = '???',
- models: Any = '???',
- training: physicsnemo.sym.hydra.training.TrainingConf = '???',
- stop_criterion: physicsnemo.sym.hydra.training.StopCriterionConf = '???',
- loss: physicsnemo.sym.hydra.loss.LossConf = '???',
- optimizer: physicsnemo.sym.hydra.optimizer.OptimizerConf = '???',
- scheduler: physicsnemo.sym.hydra.scheduler.SchedulerConf = '???',
- batch_size: Any = '???',
- profiler: physicsnemo.sym.hydra.profiler.ProfilerConf = '???',
- hydra: Any = <factory>,
- custom: Any = '???',
- defaults: List[Any] = <factory>,
- pde: physicsnemo.sym.hydra.pde.PDEConf = '???',
Bases:
PhysicsNeMoConfig
- defaults: List[Any]#
- pde: PDEConf = '???'#
- class physicsnemo.sym.hydra.config.PhysicsNeMoConfig(
- network_dir: str = '.',
- initialization_network_dir: str = '',
- save_filetypes: str = 'vtk',
- summary_histograms: str = 'off',
- jit: bool = True,
- jit_use_nvfuser: bool = True,
- jit_arch_mode: str = 'only_activation',
- jit_autograd_nodes: bool = False,
- cuda_graphs: bool = True,
- cuda_graph_warmup: int = 20,
- find_unused_parameters: bool = False,
- broadcast_buffers: bool = False,
- device: str = '',
- debug: bool = False,
- run_mode: str = 'train',
- arch: Any = '???',
- models: Any = '???',
- training: physicsnemo.sym.hydra.training.TrainingConf = '???',
- stop_criterion: physicsnemo.sym.hydra.training.StopCriterionConf = '???',
- loss: physicsnemo.sym.hydra.loss.LossConf = '???',
- optimizer: physicsnemo.sym.hydra.optimizer.OptimizerConf = '???',
- scheduler: physicsnemo.sym.hydra.scheduler.SchedulerConf = '???',
- batch_size: Any = '???',
- profiler: physicsnemo.sym.hydra.profiler.ProfilerConf = '???',
- hydra: Any = <factory>,
- custom: Any = '???',
Bases:
object
- arch: Any = '???'#
- batch_size: Any = '???'#
- broadcast_buffers: bool = False#
- cuda_graph_warmup: int = 20#
- cuda_graphs: bool = True#
- custom: Any = '???'#
- debug: bool = False#
- device: str = ''#
- find_unused_parameters: bool = False#
- hydra: Any#
- initialization_network_dir: str = ''#
- jit: bool = True#
- jit_arch_mode: str = 'only_activation'#
- jit_autograd_nodes: bool = False#
- jit_use_nvfuser: bool = True#
- models: Any = '???'#
- network_dir: str = '.'#
- optimizer: OptimizerConf = '???'#
- profiler: ProfilerConf = '???'#
- run_mode: str = 'train'#
- save_filetypes: str = 'vtk'#
- scheduler: SchedulerConf = '???'#
- stop_criterion: StopCriterionConf = '???'#
- summary_histograms: str = 'off'#
- training: TrainingConf = '???'#
hydra.hydra#
Hydra related configs
- class physicsnemo.sym.hydra.hydra.DebugFormat(
- format: str = '[%(levelname)s][%(asctime)s][%(module)s] - %(message)s',
- datefmt: str = '%Y-%m-%d %H:%M:%S',
Bases:
object
- datefmt: str = '%Y-%m-%d %H:%M:%S'#
- format: str = '[%(levelname)s][%(asctime)s][%(module)s] - %(message)s'#
- class physicsnemo.sym.hydra.hydra.DebugLogging(
- version: int = 1,
- formatters: Any = <factory>,
- handlers: Any = <factory>,
- root: Any = <factory>,
- disable_existing_loggers: bool = False,
- level: int = 0,
Bases:
object
- disable_existing_loggers: bool = False#
- formatters: Any#
- handlers: Any#
- level: int = 0#
- root: Any#
- version: int = 1#
- class physicsnemo.sym.hydra.hydra.DefaultLogging(
- version: int = 1,
- formatters: Any = <factory>,
- handlers: Any = <factory>,
- root: Any = <factory>,
- disable_existing_loggers: bool = False,
- level: int = 20,
Bases:
object
- disable_existing_loggers: bool = False#
- formatters: Any#
- handlers: Any#
- level: int = 20#
- root: Any#
- version: int = 1#
hydra.loss#
Supported PhysicsNeMo loss aggregator configs
- class physicsnemo.sym.hydra.loss.AggregatorGradNormConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.GradNorm',
- weights: Any = None,
- alpha: float = 1.0,
Bases:
LossConf
- alpha: float = 1.0#
- class physicsnemo.sym.hydra.loss.AggregatorHomoscedasticConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.HomoscedasticUncertainty',
- weights: Any = None,
Bases:
LossConf
- class physicsnemo.sym.hydra.loss.AggregatorLRAnnealingConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.LRAnnealing',
- weights: Any = None,
- update_freq: int = 1,
- alpha: float = 0.01,
- ref_key: Any = None,
- eps: float = 1e-08,
Bases:
LossConf
- alpha: float = 0.01#
- eps: float = 1e-08#
- ref_key: Any = None#
- update_freq: int = 1#
- class physicsnemo.sym.hydra.loss.AggregatorRelobraloConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.Relobralo',
- weights: Any = None,
- alpha: float = 0.95,
- beta: float = 0.99,
- tau: float = 1.0,
- eps: float = 1e-08,
Bases:
LossConf
- alpha: float = 0.95#
- beta: float = 0.99#
- eps: float = 1e-08#
- tau: float = 1.0#
- class physicsnemo.sym.hydra.loss.AggregatorResNormConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.ResNorm',
- weights: Any = None,
- alpha: float = 1.0,
Bases:
LossConf
- alpha: float = 1.0#
- class physicsnemo.sym.hydra.loss.AggregatorSoftAdaptConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.SoftAdapt',
- weights: Any = None,
- eps: float = 1e-08,
Bases:
LossConf
- eps: float = 1e-08#
- class physicsnemo.sym.hydra.loss.AggregatorSumConf(
- _target_: str = 'physicsnemo.sym.loss.aggregator.Sum',
- weights: Any = None,
Bases:
LossConf
- class physicsnemo.sym.hydra.loss.LossConf(_target_: str = '???', weights: Any = None)[source]#
Bases:
object
- weights: Any = None#
hydra.optimizer#
Supported optimizer configs
- class physicsnemo.sym.hydra.optimizer.A2GradExpConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.A2GradExp',
- lr: float = 0.01,
- beta: float = 10.0,
- lips: float = 10.0,
Bases:
OptimizerConf
- beta: float = 10.0#
- lips: float = 10.0#
- lr: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.A2GradIncConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.A2GradInc',
- lr: float = 0.01,
- beta: float = 10.0,
- lips: float = 10.0,
Bases:
OptimizerConf
- beta: float = 10.0#
- lips: float = 10.0#
- lr: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.A2GradUniConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.A2GradUni',
- lr: float = 0.01,
- beta: float = 10.0,
- lips: float = 10.0,
Bases:
OptimizerConf
- beta: float = 10.0#
- lips: float = 10.0#
- lr: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.ASGDConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.ASGD',
- lr: float = 0.01,
- lambd: float = 0.0001,
- alpha: float = 0.75,
- t0: float = 1000000.0,
- weight_decay: float = 0,
Bases:
OptimizerConf
- alpha: float = 0.75#
- lambd: float = 0.0001#
- lr: float = 0.01#
- t0: float = 1000000.0#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AccSGDConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.AccSGD',
- lr: float = 0.001,
- kappa: float = 1000.0,
- xi: float = 10.0,
- small_const: float = 0.7,
- weight_decay: float = 0,
Bases:
OptimizerConf
- kappa: float = 1000.0#
- lr: float = 0.001#
- small_const: float = 0.7#
- weight_decay: float = 0#
- xi: float = 10.0#
- class physicsnemo.sym.hydra.optimizer.AdaBeliefConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.AdaBelief',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 0.001,
- weight_decay: float = 0,
- amsgrad: bool = False,
- weight_decouple: bool = False,
- fixed_decay: bool = False,
- rectify: bool = False,
Bases:
OptimizerConf
- amsgrad: bool = False#
- betas: List[float]#
- eps: float = 0.001#
- fixed_decay: bool = False#
- lr: float = 0.001#
- rectify: bool = False#
- weight_decay: float = 0#
- weight_decouple: bool = False#
- class physicsnemo.sym.hydra.optimizer.AdaBoundConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.AdaBound',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- final_lr: float = 0.1,
- gamma: float = 0.001,
- eps: float = 1e-08,
- weight_decay: float = 0,
- amsbound: bool = False,
Bases:
OptimizerConf
- amsbound: bool = False#
- betas: List[float]#
- eps: float = 1e-08#
- final_lr: float = 0.1#
- gamma: float = 0.001#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdaModConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.AdaMod',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- beta3: float = 0.999,
- eps: float = 1e-08,
- weight_decay: float = 0,
Bases:
OptimizerConf
- beta3: float = 0.999#
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdadeltaConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.Adadelta',
- lr: float = 1.0,
- rho: float = 0.9,
- eps: float = 1e-06,
- weight_decay: float = 0,
Bases:
OptimizerConf
- eps: float = 1e-06#
- lr: float = 1.0#
- rho: float = 0.9#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdafactorConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Adafactor',
- lr: float = 0.001,
- eps2: List[float] = <factory>,
- clip_threshold: float = 1.0,
- decay_rate: float = -0.8,
- beta1: Any = None,
- weight_decay: float = 0,
- scale_parameter: bool = True,
- relative_step: bool = True,
- warmup_init: bool = False,
Bases:
OptimizerConf
- beta1: Any = None#
- clip_threshold: float = 1.0#
- decay_rate: float = -0.8#
- eps2: List[float]#
- lr: float = 0.001#
- relative_step: bool = True#
- scale_parameter: bool = True#
- warmup_init: bool = False#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdagradConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.Adagrad',
- lr: float = 0.01,
- lr_decay: float = 0,
- weight_decay: float = 0,
- initial_accumulator_value: float = 0,
- eps: float = 1e-10,
Bases:
OptimizerConf
- eps: float = 1e-10#
- initial_accumulator_value: float = 0#
- lr: float = 0.01#
- lr_decay: float = 0#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdahessianConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Adahessian',
- lr: float = 0.1,
- betas: List[float] = <factory>,
- eps: float = 0.0001,
- weight_decay: float = 0.0,
- hessian_power: float = 1.0,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 0.0001#
- hessian_power: float = 1.0#
- lr: float = 0.1#
- weight_decay: float = 0.0#
- class physicsnemo.sym.hydra.optimizer.AdamConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.Adam',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
- amsgrad: bool = False,
Bases:
OptimizerConf
- amsgrad: bool = False#
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdamPConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.AdamP',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
- delta: float = 0.1,
- wd_ratio: float = 0.1,
Bases:
OptimizerConf
- betas: List[float]#
- delta: float = 0.1#
- eps: float = 1e-08#
- lr: float = 0.001#
- wd_ratio: float = 0.1#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AdamWConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.AdamW',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0.01,
- amsgrad: bool = False,
Bases:
OptimizerConf
- amsgrad: bool = False#
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- weight_decay: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.AdamaxConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.Adamax',
- lr: float = 0.002,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.002#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.AggMoConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.AggMo',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- weight_decay: float = 0,
Bases:
OptimizerConf
- betas: List[float]#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.ApolloConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Apollo',
- lr: float = 0.01,
- beta: float = 0.9,
- eps: float = 0.0001,
- warmup: int = 0,
- init_lr: float = 0.01,
- weight_decay: float = 0,
Bases:
OptimizerConf
- beta: float = 0.9#
- eps: float = 0.0001#
- init_lr: float = 0.01#
- lr: float = 0.01#
- warmup: int = 0#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.BFGSConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.LBFGS',
- lr: float = 1.0,
- max_iter: int = 1000,
- max_eval: Any = None,
- tolerance_grad: float = 1e-07,
- tolerance_change: float = 1e-09,
- history_size: int = 100,
- line_search_fn: Any = None,
Bases:
OptimizerConf
- history_size: int = 100#
- line_search_fn: Any = None#
- lr: float = 1.0#
- max_eval: Any = None#
- max_iter: int = 1000#
- tolerance_change: float = 1e-09#
- tolerance_grad: float = 1e-07#
- class physicsnemo.sym.hydra.optimizer.DiffGradConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.DiffGrad',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.LambConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Lamb',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.MADGRADConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.MADGRAD',
- lr: float = 0.01,
- momentum: float = 0.9,
- weight_decay: float = 0,
- eps: float = 1e-06,
Bases:
OptimizerConf
- eps: float = 1e-06#
- lr: float = 0.01#
- momentum: float = 0.9#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.NAdamConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.NAdam',
- lr: float = 0.002,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
- momentum_decay: float = 0.004,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.002#
- momentum_decay: float = 0.004#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.NovoGradConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.NovoGrad',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
- grad_averaging: bool = False,
- amsgrad: bool = False,
Bases:
OptimizerConf
- amsgrad: bool = False#
- betas: List[float]#
- eps: float = 1e-08#
- grad_averaging: bool = False#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.OptimizerConf(_params_: Any = <factory>)[source]#
Bases:
object
- class physicsnemo.sym.hydra.optimizer.PIDConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.PID',
- lr: float = 0.001,
- momentum: float = 0,
- dampening: float = 0,
- weight_decay: float = 0.01,
- integral: float = 5.0,
- derivative: float = 10.0,
Bases:
OptimizerConf
- dampening: float = 0#
- derivative: float = 10.0#
- integral: float = 5.0#
- lr: float = 0.001#
- momentum: float = 0#
- weight_decay: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.QHAdamConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.QHAdam',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- nus: List[float] = <factory>,
- weight_decay: float = 0,
- decouple_weight_decay: bool = False,
- eps: float = 1e-08,
Bases:
OptimizerConf
- betas: List[float]#
- decouple_weight_decay: bool = False#
- eps: float = 1e-08#
- lr: float = 0.001#
- nus: List[float]#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.QHMConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.QHM',
- lr: float = 0.001,
- momentum: float = 0,
- nu: float = 0.7,
- weight_decay: float = 0.01,
- weight_decay_type: str = 'grad',
Bases:
OptimizerConf
- lr: float = 0.001#
- momentum: float = 0#
- nu: float = 0.7#
- weight_decay: float = 0.01#
- weight_decay_type: str = 'grad'#
- class physicsnemo.sym.hydra.optimizer.RAdamConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.RAdam',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
- weight_decay: float = 0,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.RMSpropConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.RMSprop',
- lr: float = 0.01,
- alpha: float = 0.99,
- eps: float = 1e-08,
- weight_decay: float = 0,
- momentum: float = 0,
- centered: bool = False,
Bases:
OptimizerConf
- alpha: float = 0.99#
- centered: bool = False#
- eps: float = 1e-08#
- lr: float = 0.01#
- momentum: float = 0#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.RangerConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Ranger',
- lr: float = 0.001,
- alpha: float = 0.5,
- k: int = 6,
- N_sma_threshhold: int = 5,
- betas: List[float] = <factory>,
- eps: float = 1e-05,
- weight_decay: float = 0,
Bases:
OptimizerConf
- N_sma_threshhold: int = 5#
- alpha: float = 0.5#
- betas: List[float]#
- eps: float = 1e-05#
- k: int = 6#
- lr: float = 0.001#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.RangerQHConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.RangerQH',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- nus: List[float] = <factory>,
- weight_decay: float = 0,
- k: int = 6,
- alpha: float = 0.5,
- decouple_weight_decay: bool = False,
- eps: float = 1e-08,
Bases:
OptimizerConf
- alpha: float = 0.5#
- betas: List[float]#
- decouple_weight_decay: bool = False#
- eps: float = 1e-08#
- k: int = 6#
- lr: float = 0.001#
- nus: List[float]#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.RangerVAConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.RangerVA',
- lr: float = 0.001,
- alpha: float = 0.5,
- k: int = 6,
- n_sma_threshhold: int = 5,
- betas: List[float] = <factory>,
- eps: float = 1e-05,
- weight_decay: float = 0,
- amsgrad: bool = True,
- transformer: str = 'softplus',
- smooth: int = 50,
- grad_transformer: str = 'square',
Bases:
OptimizerConf
- alpha: float = 0.5#
- amsgrad: bool = True#
- betas: List[float]#
- eps: float = 1e-05#
- grad_transformer: str = 'square'#
- k: int = 6#
- lr: float = 0.001#
- n_sma_threshhold: int = 5#
- smooth: int = 50#
- transformer: str = 'softplus'#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.RpropConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.Rprop',
- lr: float = 0.01,
- etas: List[float] = <factory>,
- step_sizes: List[float] = <factory>,
Bases:
OptimizerConf
- etas: List[float]#
- lr: float = 0.01#
- step_sizes: List[float]#
- class physicsnemo.sym.hydra.optimizer.SGDConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.SGD',
- lr: float = 0.001,
- momentum: float = 0.01,
- dampening: float = 0,
- weight_decay: float = 0,
- nesterov: bool = False,
Bases:
OptimizerConf
- dampening: float = 0#
- lr: float = 0.001#
- momentum: float = 0.01#
- nesterov: bool = False#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.SGDPConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.SGDP',
- lr: float = 0.001,
- momentum: float = 0,
- dampening: float = 0,
- weight_decay: float = 0.01,
- nesterov: bool = False,
- delta: float = 0.1,
- wd_ratio: float = 0.1,
Bases:
OptimizerConf
- dampening: float = 0#
- delta: float = 0.1#
- lr: float = 0.001#
- momentum: float = 0#
- nesterov: bool = False#
- wd_ratio: float = 0.1#
- weight_decay: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.SGDWConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.SGDW',
- lr: float = 0.001,
- momentum: float = 0,
- dampening: float = 0,
- weight_decay: float = 0.01,
- nesterov: bool = False,
Bases:
OptimizerConf
- dampening: float = 0#
- lr: float = 0.001#
- momentum: float = 0#
- nesterov: bool = False#
- weight_decay: float = 0.01#
- class physicsnemo.sym.hydra.optimizer.SWATSConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.SWATS',
- lr: float = 0.1,
- betas: List[float] = <factory>,
- eps: float = 0.001,
- weight_decay: float = 0,
- amsgrad: bool = False,
- nesterov: bool = False,
Bases:
OptimizerConf
- amsgrad: bool = False#
- betas: List[float]#
- eps: float = 0.001#
- lr: float = 0.1#
- nesterov: bool = False#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.ShampooConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Shampoo',
- lr: float = 0.1,
- momentum: float = 0,
- weight_decay: float = 0,
- epsilon: float = 0.0001,
- update_freq: int = 1,
Bases:
OptimizerConf
- epsilon: float = 0.0001#
- lr: float = 0.1#
- momentum: float = 0#
- update_freq: int = 1#
- weight_decay: float = 0#
- class physicsnemo.sym.hydra.optimizer.SparseAdamConf(
- _params_: Any = <factory>,
- _target_: str = 'torch.optim.SparseAdam',
- lr: float = 0.001,
- betas: List[float] = <factory>,
- eps: float = 1e-08,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 1e-08#
- lr: float = 0.001#
- class physicsnemo.sym.hydra.optimizer.YogiConf(
- _params_: Any = <factory>,
- _target_: str = 'torch_optimizer.Yogi',
- lr: float = 0.01,
- betas: List[float] = <factory>,
- eps: float = 0.001,
- initial_accumulator: float = 1e-06,
- weight_decay: float = 0,
Bases:
OptimizerConf
- betas: List[float]#
- eps: float = 0.001#
- initial_accumulator: float = 1e-06#
- lr: float = 0.01#
- weight_decay: float = 0#
hydra.profiler#
Profiler config
- class physicsnemo.sym.hydra.profiler.NvtxProfiler(
- profile: bool = False,
- start_step: int = 0,
- end_step: int = 100,
- name: str = 'nvtx',
Bases:
ProfilerConf
- end_step: int = 100#
- name: str = 'nvtx'#
- profile: bool = False#
- start_step: int = 0#
- class physicsnemo.sym.hydra.profiler.ProfilerConf(
- profile: bool = '???',
- start_step: int = '???',
- end_step: int = '???',
Bases:
object
- end_step: int = '???'#
- profile: bool = '???'#
- start_step: int = '???'#
- class physicsnemo.sym.hydra.profiler.TensorBoardProfiler(
- profile: bool = False,
- start_step: int = 0,
- end_step: int = 100,
- name: str = 'tensorboard',
- warmup: int = 5,
- repeat: int = 1,
- filename: str = '${hydra.job.override_dirname}-${hydra.job.name}.profile',
Bases:
ProfilerConf
- end_step: int = 100#
- filename: str = '${hydra.job.override_dirname}-${hydra.job.name}.profile'#
- name: str = 'tensorboard'#
- profile: bool = False#
- repeat: int = 1#
- start_step: int = 0#
- warmup: int = 5#
hydra.scheduler#
Supported PyTorch scheduler configs
- class physicsnemo.sym.hydra.scheduler.CosineAnnealingLRConf(
- _target_: str = 'torch.optim.lr_scheduler.CosineAnnealingLR',
- T_max: int = 1000,
- eta_min: float = 0,
- last_epoch: int = -1,
Bases:
SchedulerConf
- T_max: int = 1000#
- eta_min: float = 0#
- last_epoch: int = -1#
- class physicsnemo.sym.hydra.scheduler.CosineAnnealingWarmRestartsConf(
- _target_: str = 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts',
- T_0: int = 1000,
- T_mult: int = 1,
- eta_min: float = 0,
- last_epoch: int = -1,
Bases:
SchedulerConf
- T_0: int = 1000#
- T_mult: int = 1#
- eta_min: float = 0#
- last_epoch: int = -1#
- class physicsnemo.sym.hydra.scheduler.ExponentialLRConf(
- _target_: str = 'torch.optim.lr_scheduler.ExponentialLR',
- gamma: float = 0.99998718,
Bases:
SchedulerConf
- gamma: float = 0.99998718#
- class physicsnemo.sym.hydra.scheduler.TFExponentialLRConf(
- _target_: str = 'custom',
- _name_: str = 'tf.ExponentialLR',
- decay_rate: float = 0.95,
- decay_steps: int = 1000,
Bases:
SchedulerConf
- decay_rate: float = 0.95#
- decay_steps: int = 1000#
hydra.training#
Supported physicsnemo training paradigms
- class physicsnemo.sym.hydra.training.DefaultStopCriterion(
- metric: Any = None,
- min_delta: Any = None,
- patience: int = 50000,
- mode: str = 'min',
- freq: int = 1000,
- strict: bool = False,
Bases:
StopCriterionConf
- freq: int = 1000#
- metric: Any = None#
- min_delta: Any = None#
- mode: str = 'min'#
- patience: int = 50000#
- strict: bool = False#
- class physicsnemo.sym.hydra.training.DefaultTraining(
- max_steps: int = 10000,
- grad_agg_freq: int = 1,
- rec_results_freq: int = 1000,
- rec_validation_freq: int = '${training.rec_results_freq}',
- rec_inference_freq: int = '${training.rec_results_freq}',
- rec_monitor_freq: int = '${training.rec_results_freq}',
- rec_constraint_freq: int = '${training.rec_results_freq}',
- save_network_freq: int = 1000,
- print_stats_freq: int = 100,
- summary_freq: int = 1000,
- grad_clip_max_norm: float = 0.5,
- monitor_grad_clip: bool = True,
- ntk: physicsnemo.sym.hydra.loss.NTKConf = <factory>,
Bases:
TrainingConf
- grad_agg_freq: int = 1#
- grad_clip_max_norm: float = 0.5#
- max_steps: int = 10000#
- monitor_grad_clip: bool = True#
- print_stats_freq: int = 100#
- rec_constraint_freq: int = '${training.rec_results_freq}'#
- rec_inference_freq: int = '${training.rec_results_freq}'#
- rec_monitor_freq: int = '${training.rec_results_freq}'#
- rec_results_freq: int = 1000#
- rec_validation_freq: int = '${training.rec_results_freq}'#
- save_network_freq: int = 1000#
- summary_freq: int = 1000#
- class physicsnemo.sym.hydra.training.StopCriterionConf(
- metric: Any = '???',
- min_delta: Any = '???',
- patience: int = '???',
- mode: str = '???',
- freq: int = '???',
- strict: bool = '???',
Bases:
object
- freq: int = '???'#
- metric: Any = '???'#
- min_delta: Any = '???'#
- mode: str = '???'#
- patience: int = '???'#
- strict: bool = '???'#
- class physicsnemo.sym.hydra.training.TrainingConf(
- max_steps: int = '???',
- grad_agg_freq: int = '???',
- rec_results_freq: int = '???',
- rec_validation_freq: int = '???',
- rec_inference_freq: int = '???',
- rec_monitor_freq: int = '???',
- rec_constraint_freq: int = '???',
- save_network_freq: int = '???',
- print_stats_freq: int = '???',
- summary_freq: int = '???',
- grad_clip_max_norm: float = '???',
- monitor_grad_clip: bool = '???',
Bases:
object
- grad_agg_freq: int = '???'#
- grad_clip_max_norm: float = '???'#
- max_steps: int = '???'#
- monitor_grad_clip: bool = '???'#
- print_stats_freq: int = '???'#
- rec_constraint_freq: int = '???'#
- rec_inference_freq: int = '???'#
- rec_monitor_freq: int = '???'#
- rec_results_freq: int = '???'#
- rec_validation_freq: int = '???'#
- save_network_freq: int = '???'#
- summary_freq: int = '???'#
- class physicsnemo.sym.hydra.training.VariationalTraining(
- max_steps: int = 10000,
- grad_agg_freq: int = 1,
- rec_results_freq: int = 1000,
- rec_validation_freq: int = '${training.rec_results_freq}',
- rec_inference_freq: int = '${training.rec_results_freq}',
- rec_monitor_freq: int = '${training.rec_results_freq}',
- rec_constraint_freq: int = '${training.rec_results_freq}',
- save_network_freq: int = 1000,
- print_stats_freq: int = 100,
- summary_freq: int = 1000,
- grad_clip_max_norm: float = 0.5,
- monitor_grad_clip: bool = True,
- ntk: physicsnemo.sym.hydra.loss.NTKConf = <factory>,
- test_function: str = '???',
- use_quadratures: bool = False,
Bases:
DefaultTraining
- test_function: str = '???'#
- use_quadratures: bool = False#
hydra.utils#
- physicsnemo.sym.hydra.utils.add_hydra_run_path(path: str | Path) Path [source]#
Prepends current hydra run path
- physicsnemo.sym.hydra.utils.compose(
- config_name: str | None = None,
- config_path: str | None = None,
- overrides: List[str] = [],
- return_hydra_config: bool = False,
- job_name: str | None = 'app',
- caller_stack_depth: int = 2,
Internal PhysicsNeMo config initializer and compose function. This is an alternative for initializing a Hydra config which should be used as a last ditch effort in cases where @physicsnemo.main() cannot work. For more info see: https://hydra.cc/docs/advanced/compose_api/
- Parameters:
config_name (str) – PhysicsNeMo config name
config_path (str) – Path to config file relative to the caller at location caller_stack_depth
overrides (list of strings) – List of overrides
return_hydra_config (bool) – Return the hydra options in the dict config
job_name (string) – Name of program run instance
caller_stack_depth (int) – Stack depth of this function call (needed for finding config relative to python).
- physicsnemo.sym.hydra.utils.main(config_path: str, config_name: str = 'config')[source]#
Modified decorator for loading hydra configs in physicsnemo See: facebookresearch/hydra
- physicsnemo.sym.hydra.utils.to_absolute_path(*args: str | Path)[source]#
Converts file path to absolute path based on run file location Modified from: facebookresearch/hydra