modulus.hydra

hydra.arch

Architecture/Model configs

class modulus.hydra.arch.AFNOConf(_target_: str = 'modulus.architecture.afno.AFNOArch', img_shape: Tuple[int] = '???', patch_size: int = 16, embed_dim: int = 256, depth: int = 4, num_blocks: int = 8)

Bases: modulus.hydra.arch.ArchConf

depth: int = 4
embed_dim: int = 256
img_shape: Tuple[int] = '???'
num_blocks: int = 8
patch_size: int = 16
class modulus.hydra.arch.ArchConf(_target_: str = '???')

Bases: object

class modulus.hydra.arch.FNOConf(_target_: str = 'modulus.architecture.fno.FNOArch', dimension: int = '???', nr_fno_layers: int = 4, fno_layer_size: int = 32, fno_modes: Any = 16, padding: int = 8, padding_type: str = 'constant', output_fc_layer_sizes: List[int] = <factory>, coord_features: bool = True)

Bases: modulus.hydra.arch.ArchConf

coord_features: bool = True
dimension: int = '???'
fno_layer_size: int = 32
fno_modes: Any = 16
nr_fno_layers: int = 4
output_fc_layer_sizes: List[int]
padding: int = 8
padding_type: str = 'constant'
class modulus.hydra.arch.FourierConf(_target_: str = 'modulus.architecture.fourier_net.FourierNetArch', layer_size: int = 512, nr_layers: int = 6, skip_connections: bool = False, weight_norm: bool = True, adaptive_activations: bool = False)

Bases: modulus.hydra.arch.ArchConf

adaptive_activations: bool = False
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
class modulus.hydra.arch.FullyConnectedConf(_target_: str = 'modulus.architecture.fully_connected.FullyConnectedArch', layer_size: int = 512, nr_layers: int = 6, skip_connections: bool = False, adaptive_activations: bool = False, weight_norm: bool = True)

Bases: modulus.hydra.arch.ArchConf

adaptive_activations: bool = False
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
class modulus.hydra.arch.HighwayFourierConf(_target_: str = 'modulus.architecture.highway_fourier_net.HighwayFourierNetArch', layer_size: int = 512, nr_layers: int = 6, skip_connections: bool = False, weight_norm: bool = True, adaptive_activations: bool = False, transform_fourier_features: bool = True, project_fourier_features: bool = False)

Bases: modulus.hydra.arch.ArchConf

adaptive_activations: bool = False
layer_size: int = 512
nr_layers: int = 6
project_fourier_features: bool = False
skip_connections: bool = False
transform_fourier_features: bool = True
weight_norm: bool = True
class modulus.hydra.arch.ModifiedFourierConf(_target_: str = 'modulus.architecture.modified_fourier_net.ModifiedFourierNetArch', layer_size: int = 512, nr_layers: int = 6, skip_connections: bool = False, weight_norm: bool = True, adaptive_activations: bool = False)

Bases: modulus.hydra.arch.ArchConf

adaptive_activations: bool = False
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
class modulus.hydra.arch.MultiplicativeFilterConf(_target_: str = 'modulus.architecture.multiplicative_filter_net.MultiplicativeFilterNetArch', layer_size: int = 512, nr_layers: int = 6, skip_connections: bool = False, weight_norm: bool = True, input_scale: float = 10.0, gabor_alpha: float = 6.0, gabor_beta: float = 1.0, normalization: Any = None)

Bases: modulus.hydra.arch.ArchConf

gabor_alpha: float = 6.0
gabor_beta: float = 1.0
input_scale: float = 10.0
layer_size: int = 512
normalization: Any = None
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
class modulus.hydra.arch.MultiresolutionHashNetConf(_target_: str = 'modulus.architecture.hash_encoding_net.MultiresolutionHashNetArch', layer_size: int = 64, nr_layers: int = 3, skip_connections: bool = False, weight_norm: bool = False, adaptive_activations: bool = False, nr_levels: int = 5, nr_features_per_level: int = 2, log2_hashmap_size: int = 19, base_resolution: int = 2, finest_resolution: int = 32)

Bases: modulus.hydra.arch.ArchConf

adaptive_activations: bool = False
base_resolution: int = 2
finest_resolution: int = 32
layer_size: int = 64
log2_hashmap_size: int = 19
nr_features_per_level: int = 2
nr_layers: int = 3
nr_levels: int = 5
skip_connections: bool = False
weight_norm: bool = False
class modulus.hydra.arch.MultiscaleFourierConf(_target_: str = 'modulus.architecture.multiscale_fourier_net.MultiscaleFourierNetArch', frequencies_params: Any = None, layer_size: int = 512, nr_layers: int = 6, skip_connections: bool = False, weight_norm: bool = True, adaptive_activations: bool = False)

Bases: modulus.hydra.arch.ArchConf

adaptive_activations: bool = False
frequencies_params: Any = None
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
class modulus.hydra.arch.Pix2PixConf(_target_: str = 'modulus.architecture.pix2pix.Pix2PixArch', dimension: int = '???', conv_layer_size: int = 64, n_downsampling: int = 3, n_blocks: int = 3, scaling_factor: int = 1, batch_norm: bool = True, padding_type: str = 'reflect')

Bases: modulus.hydra.arch.ArchConf

batch_norm: bool = True
conv_layer_size: int = 64
dimension: int = '???'
n_blocks: int = 3
n_downsampling: int = 3
padding_type: str = 'reflect'
scaling_factor: int = 1
class modulus.hydra.arch.SRResConf(_target_: str = 'modulus.architecture.super_res_net.SRResNetArch', large_kernel_size: int = 7, small_kernel_size: int = 3, conv_layer_size: int = 32, n_resid_blocks: int = 8, scaling_factor: int = 8)

Bases: modulus.hydra.arch.ArchConf

conv_layer_size: int = 32
large_kernel_size: int = 7
n_resid_blocks: int = 8
scaling_factor: int = 8
small_kernel_size: int = 3
class modulus.hydra.arch.SirenConf(_target_: str = 'modulus.architecture.siren.SirenArch', layer_size: int = 512, nr_layers: int = 6, first_omega: float = 30.0, omega: float = 30.0, normalization: Any = None)

Bases: modulus.hydra.arch.ArchConf

first_omega: float = 30.0
layer_size: int = 512
normalization: Any = None
nr_layers: int = 6
omega: float = 30.0
modulus.hydra.arch.register_arch_configs() None

hydra.config

Modulus main config

class modulus.hydra.config.DebugModulusConfig(network_dir: str = '.', initialization_network_dir: str = '', save_filetypes: str = 'vtk', summary_histograms: bool = False, jit: bool = True, device: str = '', debug: bool = True, run_mode: str = 'train', arch: Any = '???', training: modulus.hydra.training.TrainingConf = '???', loss: modulus.hydra.loss.LossConf = '???', optimizer: modulus.hydra.optimizer.OptimizerConf = '???', scheduler: modulus.hydra.scheduler.SchedulerConf = '???', batch_size: Any = '???', profiler: modulus.hydra.profiler.ProfilerConf = '???', hydra: Any = <factory>, custom: Any = '???', defaults: List[Any] = <factory>)

Bases: modulus.hydra.config.ModulusConfig

debug: bool = True
defaults: List[Any]
class modulus.hydra.config.DefaultModulusConfig(network_dir: str = '.', initialization_network_dir: str = '', save_filetypes: str = 'vtk', summary_histograms: bool = False, jit: bool = True, device: str = '', debug: bool = False, run_mode: str = 'train', arch: Any = '???', training: modulus.hydra.training.TrainingConf = '???', loss: modulus.hydra.loss.LossConf = '???', optimizer: modulus.hydra.optimizer.OptimizerConf = '???', scheduler: modulus.hydra.scheduler.SchedulerConf = '???', batch_size: Any = '???', profiler: modulus.hydra.profiler.ProfilerConf = '???', hydra: Any = <factory>, custom: Any = '???', defaults: List[Any] = <factory>)

Bases: modulus.hydra.config.ModulusConfig

defaults: List[Any]
class modulus.hydra.config.ExperimentalModulusConfig(network_dir: str = '.', initialization_network_dir: str = '', save_filetypes: str = 'vtk', summary_histograms: bool = False, jit: bool = True, device: str = '', debug: bool = False, run_mode: str = 'train', arch: Any = '???', training: modulus.hydra.training.TrainingConf = '???', loss: modulus.hydra.loss.LossConf = '???', optimizer: modulus.hydra.optimizer.OptimizerConf = '???', scheduler: modulus.hydra.scheduler.SchedulerConf = '???', batch_size: Any = '???', profiler: modulus.hydra.profiler.ProfilerConf = '???', hydra: Any = <factory>, custom: Any = '???', defaults: List[Any] = <factory>, pde: modulus.hydra.pde.PDEConf = '???')

Bases: modulus.hydra.config.ModulusConfig

defaults: List[Any]
pde: modulus.hydra.pde.PDEConf = '???'
class modulus.hydra.config.ModulusConfig(network_dir: str = '.', initialization_network_dir: str = '', save_filetypes: str = 'vtk', summary_histograms: bool = False, jit: bool = True, device: str = '', debug: bool = False, run_mode: str = 'train', arch: Any = '???', training: modulus.hydra.training.TrainingConf = '???', loss: modulus.hydra.loss.LossConf = '???', optimizer: modulus.hydra.optimizer.OptimizerConf = '???', scheduler: modulus.hydra.scheduler.SchedulerConf = '???', batch_size: Any = '???', profiler: modulus.hydra.profiler.ProfilerConf = '???', hydra: Any = <factory>, custom: Any = '???')

Bases: object

arch: Any = '???'
batch_size: Any = '???'
custom: Any = '???'
debug: bool = False
device: str = ''
hydra: Any
initialization_network_dir: str = ''
jit: bool = True
loss: modulus.hydra.loss.LossConf = '???'
network_dir: str = '.'
optimizer: modulus.hydra.optimizer.OptimizerConf = '???'
profiler: modulus.hydra.profiler.ProfilerConf = '???'
run_mode: str = 'train'
save_filetypes: str = 'vtk'
scheduler: modulus.hydra.scheduler.SchedulerConf = '???'
summary_histograms: bool = False
training: modulus.hydra.training.TrainingConf = '???'
modulus.hydra.config.register_modulus_configs() None

hydra.hydra

Hydra related configs

class modulus.hydra.hydra.DebugFormat(format: str = '[%(levelname)s][%(asctime)s][%(module)s] - %(message)s', datefmt: str = '%Y-%m-%d %H:%M:%S')

Bases: object

datefmt: str = '%Y-%m-%d %H:%M:%S'
format: str = '[%(levelname)s][%(asctime)s][%(module)s] - %(message)s'
class modulus.hydra.hydra.DebugLogging(version: int = 1, formatters: Any = <factory>, handlers: Any = <factory>, root: Any = <factory>, disable_existing_loggers: bool = False, level: int = 0)

Bases: object

disable_existing_loggers: bool = False
formatters: Any
handlers: Any
level: int = 0
root: Any
version: int = 1
class modulus.hydra.hydra.DefaultLogging(version: int = 1, formatters: Any = <factory>, handlers: Any = <factory>, root: Any = <factory>, disable_existing_loggers: bool = False, level: int = 20)

Bases: object

disable_existing_loggers: bool = False
formatters: Any
handlers: Any
level: int = 20
root: Any
version: int = 1
class modulus.hydra.hydra.SimpleFormat(format: str = '[%(asctime)s] - %(message)s', datefmt: str = '%H:%M:%S')

Bases: object

datefmt: str = '%H:%M:%S'
format: str = '[%(asctime)s] - %(message)s'
modulus.hydra.hydra.register_hydra_configs() None

hydra.loss

Supported Modulus loss aggregator configs

class modulus.hydra.loss.AggregatorGradNormConf(_target_: str = 'modulus.aggregator.GradNorm', weights: Any = None, alpha: float = 1.0)

Bases: modulus.hydra.loss.LossConf

alpha: float = 1.0
class modulus.hydra.loss.AggregatorHomoscedasticConf(_target_: str = 'modulus.aggregator.HomoscedasticUncertainty', weights: Any = None)

Bases: modulus.hydra.loss.LossConf

class modulus.hydra.loss.AggregatorLRAnnealingConf(_target_: str = 'modulus.aggregator.LRAnnealing', weights: Any = None, update_freq: int = 1, alpha: float = 0.01, ref_key: Any = None, eps: float = 1e-08)

Bases: modulus.hydra.loss.LossConf

alpha: float = 0.01
eps: float = 1e-08
ref_key: Any = None
update_freq: int = 1
class modulus.hydra.loss.AggregatorRelobraloConf(_target_: str = 'modulus.aggregator.Relobralo', weights: Any = None, alpha: float = 0.95, beta: float = 0.99, tau: float = 1.0, eps: float = 1e-08)

Bases: modulus.hydra.loss.LossConf

alpha: float = 0.95
beta: float = 0.99
eps: float = 1e-08
tau: float = 1.0
class modulus.hydra.loss.AggregatorSoftAdaptConf(_target_: str = 'modulus.aggregator.SoftAdapt', weights: Any = None, eps: float = 1e-08)

Bases: modulus.hydra.loss.LossConf

eps: float = 1e-08
class modulus.hydra.loss.AggregatorSumConf(_target_: str = 'modulus.aggregator.Sum', weights: Any = None)

Bases: modulus.hydra.loss.LossConf

class modulus.hydra.loss.LossConf(_target_: str = '???', weights: Any = None)

Bases: object

weights: Any = None
class modulus.hydra.loss.NTKConf(use_ntk: bool = False, save_name: Any = None, run_freq: int = 1000)

Bases: object

run_freq: int = 1000
save_name: Any = None
use_ntk: bool = False
modulus.hydra.loss.register_loss_configs() None

hydra.optimizer

Supported optimizer configs

class modulus.hydra.optimizer.A2GradExpConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.A2GradExp', lr: float = 0.01, beta: float = 10.0, lips: float = 10.0)

Bases: modulus.hydra.optimizer.OptimizerConf

beta: float = 10.0
lips: float = 10.0
lr: float = 0.01
class modulus.hydra.optimizer.A2GradIncConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.A2GradInc', lr: float = 0.01, beta: float = 10.0, lips: float = 10.0)

Bases: modulus.hydra.optimizer.OptimizerConf

beta: float = 10.0
lips: float = 10.0
lr: float = 0.01
class modulus.hydra.optimizer.A2GradUniConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.A2GradUni', lr: float = 0.01, beta: float = 10.0, lips: float = 10.0)

Bases: modulus.hydra.optimizer.OptimizerConf

beta: float = 10.0
lips: float = 10.0
lr: float = 0.01
class modulus.hydra.optimizer.ASGDConf(_params_: Any = <factory>, _target_: str = 'torch.optim.ASGD', lr: float = 0.01, lambd: float = 0.0001, alpha: float = 0.75, t0: float = 1000000.0, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

alpha: float = 0.75
lambd: float = 0.0001
lr: float = 0.01
t0: float = 1000000.0
weight_decay: float = 0
class modulus.hydra.optimizer.AccSGDConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.AccSGD', lr: float = 0.001, kappa: float = 1000.0, xi: float = 10.0, small_const: float = 0.7, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

kappa: float = 1000.0
lr: float = 0.001
small_const: float = 0.7
weight_decay: float = 0
xi: float = 10.0
class modulus.hydra.optimizer.AdaBeliefConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.AdaBelief', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 0.001, weight_decay: float = 0, amsgrad: bool = False, weight_decouple: bool = False, fixed_decay: bool = False, rectify: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

amsgrad: bool = False
betas: List[float]
eps: float = 0.001
fixed_decay: bool = False
lr: float = 0.001
rectify: bool = False
weight_decay: float = 0
weight_decouple: bool = False
class modulus.hydra.optimizer.AdaBoundConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.AdaBound', lr: float = 0.001, betas: List[float] = <factory>, final_lr: float = 0.1, gamma: float = 0.001, eps: float = 1e-08, weight_decay: float = 0, amsbound: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

amsbound: bool = False
betas: List[float]
eps: float = 1e-08
final_lr: float = 0.1
gamma: float = 0.001
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.AdaModConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.AdaMod', lr: float = 0.001, betas: List[float] = <factory>, beta3: float = 0.999, eps: float = 1e-08, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

beta3: float = 0.999
betas: List[float]
eps: float = 1e-08
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.AdadeltaConf(_params_: Any = <factory>, _target_: str = 'torch.optim.Adadelta', lr: float = 1.0, rho: float = 0.9, eps: float = 1e-06, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

eps: float = 1e-06
lr: float = 1.0
rho: float = 0.9
weight_decay: float = 0
class modulus.hydra.optimizer.AdafactorConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Adafactor', lr: float = 0.001, eps2: List[float] = <factory>, clip_threshold: float = 1.0, decay_rate: float = -0.8, beta1: Any = None, weight_decay: float = 0, scale_parameter: bool = True, relative_step: bool = True, warmup_init: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

beta1: Any = None
clip_threshold: float = 1.0
decay_rate: float = -0.8
eps2: List[float]
lr: float = 0.001
relative_step: bool = True
scale_parameter: bool = True
warmup_init: bool = False
weight_decay: float = 0
class modulus.hydra.optimizer.AdagradConf(_params_: Any = <factory>, _target_: str = 'torch.optim.Adagrad', lr: float = 0.01, lr_decay: float = 0, weight_decay: float = 0, initial_accumulator_value: float = 0, eps: float = 1e-10)

Bases: modulus.hydra.optimizer.OptimizerConf

eps: float = 1e-10
initial_accumulator_value: float = 0
lr: float = 0.01
lr_decay: float = 0
weight_decay: float = 0
class modulus.hydra.optimizer.AdahessianConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Adahessian', lr: float = 0.1, betas: List[float] = <factory>, eps: float = 0.0001, weight_decay: float = 0.0, hessian_power: float = 1.0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 0.0001
hessian_power: float = 1.0
lr: float = 0.1
weight_decay: float = 0.0
class modulus.hydra.optimizer.AdamConf(_params_: Any = <factory>, _target_: str = 'torch.optim.Adam', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0, amsgrad: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

amsgrad: bool = False
betas: List[float]
eps: float = 1e-08
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.AdamPConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.AdamP', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0, delta: float = 0.1, wd_ratio: float = 0.1)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
delta: float = 0.1
eps: float = 1e-08
lr: float = 0.001
wd_ratio: float = 0.1
weight_decay: float = 0
class modulus.hydra.optimizer.AdamWConf(_params_: Any = <factory>, _target_: str = 'torch.optim.AdamW', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0.01, amsgrad: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

amsgrad: bool = False
betas: List[float]
eps: float = 1e-08
lr: float = 0.001
weight_decay: float = 0.01
class modulus.hydra.optimizer.AdamaxConf(_params_: Any = <factory>, _target_: str = 'torch.optim.Adamax', lr: float = 0.002, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 1e-08
lr: float = 0.002
weight_decay: float = 0
class modulus.hydra.optimizer.AggMoConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.AggMo', lr: float = 0.001, betas: List[float] = <factory>, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.ApolloConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Apollo', lr: float = 0.01, beta: float = 0.9, eps: float = 0.0001, warmup: int = 0, init_lr: float = 0.01, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

beta: float = 0.9
eps: float = 0.0001
init_lr: float = 0.01
lr: float = 0.01
warmup: int = 0
weight_decay: float = 0
class modulus.hydra.optimizer.BFGSConf(_params_: Any = <factory>, _target_: str = 'torch.optim.LBFGS', lr: float = 1.0, max_iter: int = 1000, max_eval: Any = None, tolerance_grad: float = 1e-07, tolerance_change: float = 1e-09, history_size: int = 100, line_search_fn: Any = None)

Bases: modulus.hydra.optimizer.OptimizerConf

history_size: int = 100
line_search_fn: Any = None
lr: float = 1.0
max_eval: Any = None
max_iter: int = 1000
tolerance_change: float = 1e-09
tolerance_grad: float = 1e-07
class modulus.hydra.optimizer.DiffGradConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.DiffGrad', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 1e-08
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.LambConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Lamb', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 1e-08
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.MADGRADConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.MADGRAD', lr: float = 0.01, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-06)

Bases: modulus.hydra.optimizer.OptimizerConf

eps: float = 1e-06
lr: float = 0.01
momentum: float = 0.9
weight_decay: float = 0
class modulus.hydra.optimizer.NAdamConf(_params_: Any = <factory>, _target_: str = 'torch.optim.NAdam', lr: float = 0.002, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0, momentum_decay: float = 0.004)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 1e-08
lr: float = 0.002
momentum_decay: float = 0.004
weight_decay: float = 0
class modulus.hydra.optimizer.NovoGradConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.NovoGrad', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0, grad_averaging: bool = False, amsgrad: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

amsgrad: bool = False
betas: List[float]
eps: float = 1e-08
grad_averaging: bool = False
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.OptimizerConf(_params_: Any = <factory>)

Bases: object

class modulus.hydra.optimizer.PIDConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.PID', lr: float = 0.001, momentum: float = 0, dampening: float = 0, weight_decay: float = 0.01, integral: float = 5.0, derivative: float = 10.0)

Bases: modulus.hydra.optimizer.OptimizerConf

dampening: float = 0
derivative: float = 10.0
integral: float = 5.0
lr: float = 0.001
momentum: float = 0
weight_decay: float = 0.01
class modulus.hydra.optimizer.QHAdamConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.QHAdam', lr: float = 0.001, betas: List[float] = <factory>, nus: List[float] = <factory>, weight_decay: float = 0, decouple_weight_decay: bool = False, eps: float = 1e-08)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
decouple_weight_decay: bool = False
eps: float = 1e-08
lr: float = 0.001
nus: List[float]
weight_decay: float = 0
class modulus.hydra.optimizer.QHMConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.QHM', lr: float = 0.001, momentum: float = 0, nu: float = 0.7, weight_decay: float = 0.01, weight_decay_type: str = 'grad')

Bases: modulus.hydra.optimizer.OptimizerConf

lr: float = 0.001
momentum: float = 0
nu: float = 0.7
weight_decay: float = 0.01
weight_decay_type: str = 'grad'
class modulus.hydra.optimizer.RAdamConf(_params_: Any = <factory>, _target_: str = 'torch.optim.RAdam', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 1e-08
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.RMSpropConf(_params_: Any = <factory>, _target_: str = 'torch.optim.RMSprop', lr: float = 0.01, alpha: float = 0.99, eps: float = 1e-08, weight_decay: float = 0, momentum: float = 0, centered: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

alpha: float = 0.99
centered: bool = False
eps: float = 1e-08
lr: float = 0.01
momentum: float = 0
weight_decay: float = 0
class modulus.hydra.optimizer.RangerConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Ranger', lr: float = 0.001, alpha: float = 0.5, k: int = 6, N_sma_threshhold: int = 5, betas: List[float] = <factory>, eps: float = 1e-05, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

N_sma_threshhold: int = 5
alpha: float = 0.5
betas: List[float]
eps: float = 1e-05
k: int = 6
lr: float = 0.001
weight_decay: float = 0
class modulus.hydra.optimizer.RangerQHConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.RangerQH', lr: float = 0.001, betas: List[float] = <factory>, nus: List[float] = <factory>, weight_decay: float = 0, k: int = 6, alpha: float = 0.5, decouple_weight_decay: bool = False, eps: float = 1e-08)

Bases: modulus.hydra.optimizer.OptimizerConf

alpha: float = 0.5
betas: List[float]
decouple_weight_decay: bool = False
eps: float = 1e-08
k: int = 6
lr: float = 0.001
nus: List[float]
weight_decay: float = 0
class modulus.hydra.optimizer.RangerVAConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.RangerVA', lr: float = 0.001, alpha: float = 0.5, k: int = 6, n_sma_threshhold: int = 5, betas: List[float] = <factory>, eps: float = 1e-05, weight_decay: float = 0, amsgrad: bool = True, transformer: str = 'softplus', smooth: int = 50, grad_transformer: str = 'square')

Bases: modulus.hydra.optimizer.OptimizerConf

alpha: float = 0.5
amsgrad: bool = True
betas: List[float]
eps: float = 1e-05
grad_transformer: str = 'square'
k: int = 6
lr: float = 0.001
n_sma_threshhold: int = 5
smooth: int = 50
transformer: str = 'softplus'
weight_decay: float = 0
class modulus.hydra.optimizer.RpropConf(_params_: Any = <factory>, _target_: str = 'torch.optim.Rprop', lr: float = 0.01, etas: List[float] = <factory>, step_sizes: List[float] = <factory>)

Bases: modulus.hydra.optimizer.OptimizerConf

etas: List[float]
lr: float = 0.01
step_sizes: List[float]
class modulus.hydra.optimizer.SGDConf(_params_: Any = <factory>, _target_: str = 'torch.optim.SGD', lr: float = 0.001, momentum: float = 0.01, dampening: float = 0, weight_decay: float = 0, nesterov: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

dampening: float = 0
lr: float = 0.001
momentum: float = 0.01
nesterov: bool = False
weight_decay: float = 0
class modulus.hydra.optimizer.SGDPConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.SGDP', lr: float = 0.001, momentum: float = 0, dampening: float = 0, weight_decay: float = 0.01, nesterov: bool = False, delta: float = 0.1, wd_ratio: float = 0.1)

Bases: modulus.hydra.optimizer.OptimizerConf

dampening: float = 0
delta: float = 0.1
lr: float = 0.001
momentum: float = 0
nesterov: bool = False
wd_ratio: float = 0.1
weight_decay: float = 0.01
class modulus.hydra.optimizer.SGDWConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.SGDW', lr: float = 0.001, momentum: float = 0, dampening: float = 0, weight_decay: float = 0.01, nesterov: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

dampening: float = 0
lr: float = 0.001
momentum: float = 0
nesterov: bool = False
weight_decay: float = 0.01
class modulus.hydra.optimizer.SWATSConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.SWATS', lr: float = 0.1, betas: List[float] = <factory>, eps: float = 0.001, weight_decay: float = 0, amsgrad: bool = False, nesterov: bool = False)

Bases: modulus.hydra.optimizer.OptimizerConf

amsgrad: bool = False
betas: List[float]
eps: float = 0.001
lr: float = 0.1
nesterov: bool = False
weight_decay: float = 0
class modulus.hydra.optimizer.ShampooConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Shampoo', lr: float = 0.1, momentum: float = 0, weight_decay: float = 0, epsilon: float = 0.0001, update_freq: int = 1)

Bases: modulus.hydra.optimizer.OptimizerConf

epsilon: float = 0.0001
lr: float = 0.1
momentum: float = 0
update_freq: int = 1
weight_decay: float = 0
class modulus.hydra.optimizer.SparseAdamConf(_params_: Any = <factory>, _target_: str = 'torch.optim.SparseAdam', lr: float = 0.001, betas: List[float] = <factory>, eps: float = 1e-08)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 1e-08
lr: float = 0.001
class modulus.hydra.optimizer.YogiConf(_params_: Any = <factory>, _target_: str = 'torch_optimizer.Yogi', lr: float = 0.01, betas: List[float] = <factory>, eps: float = 0.001, initial_accumulator: float = 1e-06, weight_decay: float = 0)

Bases: modulus.hydra.optimizer.OptimizerConf

betas: List[float]
eps: float = 0.001
initial_accumulator: float = 1e-06
lr: float = 0.01
weight_decay: float = 0
modulus.hydra.optimizer.register_optimizer_configs() None

hydra.profiler

Profiler config

class modulus.hydra.profiler.NvtxProfiler(profile: bool = False, start_step: int = 0, end_step: int = 100, name: str = 'nvtx')

Bases: modulus.hydra.profiler.ProfilerConf

end_step: int = 100
name: str = 'nvtx'
profile: bool = False
start_step: int = 0
class modulus.hydra.profiler.ProfilerConf(profile: bool = '???', start_step: int = '???', end_step: int = '???')

Bases: object

end_step: int = '???'
profile: bool = '???'
start_step: int = '???'
class modulus.hydra.profiler.TensorBoardProfiler(profile: bool = False, start_step: int = 0, end_step: int = 100, name: str = 'tensorboard', warmup: int = 5, repeat: int = 1, filename: str = '${hydra.job.override_dirname}-${hydra.job.name}.profile')

Bases: modulus.hydra.profiler.ProfilerConf

end_step: int = 100
filename: str = '${hydra.job.override_dirname}-${hydra.job.name}.profile'
name: str = 'tensorboard'
profile: bool = False
repeat: int = 1
start_step: int = 0
warmup: int = 5
modulus.hydra.profiler.register_profiler_configs() None

hydra.scheduler

Supported PyTorch scheduler configs

class modulus.hydra.scheduler.ExponentialLRConf(_target_: str = 'torch.optim.lr_scheduler.ExponentialLR', gamma: float = 0.99998718)

Bases: modulus.hydra.scheduler.SchedulerConf

gamma: float = 0.99998718
class modulus.hydra.scheduler.SchedulerConf

Bases: object

class modulus.hydra.scheduler.TFExponentialLRConf(_target_: str = 'custom', _name_: str = 'tf.ExponentialLR', decay_rate: float = 0.95, decay_steps: int = 1000)

Bases: modulus.hydra.scheduler.SchedulerConf

decay_rate: float = 0.95
decay_steps: int = 1000
modulus.hydra.scheduler.register_scheduler_configs() None

hydra.training

Supported modulus training paradigms

class modulus.hydra.training.DefaultTraining(max_steps: int = 10000, grad_agg_freq: int = 1, rec_results_freq: int = 1000, rec_validation_freq: int = '${training.rec_results_freq}', rec_inference_freq: int = '${training.rec_results_freq}', rec_monitor_freq: int = '${training.rec_results_freq}', rec_constraint_freq: int = '${training.rec_results_freq}', save_network_freq: int = 1000, print_stats_freq: int = 100, summary_freq: int = 1000, amp: bool = False, amp_dtype: str = 'float16', ntk: modulus.hydra.loss.NTKConf = NTKConf(use_ntk=False, save_name=None, run_freq=1000))

Bases: modulus.hydra.training.TrainingConf

amp: bool = False
amp_dtype: str = 'float16'
grad_agg_freq: int = 1
max_steps: int = 10000
ntk: modulus.hydra.loss.NTKConf = NTKConf(use_ntk=False, save_name=None, run_freq=1000)
print_stats_freq: int = 100
rec_constraint_freq: int = '${training.rec_results_freq}'
rec_inference_freq: int = '${training.rec_results_freq}'
rec_monitor_freq: int = '${training.rec_results_freq}'
rec_results_freq: int = 1000
rec_validation_freq: int = '${training.rec_results_freq}'
save_network_freq: int = 1000
summary_freq: int = 1000
class modulus.hydra.training.TrainingConf(max_steps: int = '???', grad_agg_freq: int = '???', rec_results_freq: int = '???', rec_validation_freq: int = '???', rec_inference_freq: int = '???', rec_monitor_freq: int = '???', rec_constraint_freq: int = '???', save_network_freq: int = '???', print_stats_freq: int = '???', summary_freq: int = '???', amp: bool = '???', amp_dtype: str = '???')

Bases: object

amp: bool = '???'
amp_dtype: str = '???'
grad_agg_freq: int = '???'
max_steps: int = '???'
print_stats_freq: int = '???'
rec_constraint_freq: int = '???'
rec_inference_freq: int = '???'
rec_monitor_freq: int = '???'
rec_results_freq: int = '???'
rec_validation_freq: int = '???'
save_network_freq: int = '???'
summary_freq: int = '???'
class modulus.hydra.training.VariationalTraining(max_steps: int = 10000, grad_agg_freq: int = 1, rec_results_freq: int = 1000, rec_validation_freq: int = '${training.rec_results_freq}', rec_inference_freq: int = '${training.rec_results_freq}', rec_monitor_freq: int = '${training.rec_results_freq}', rec_constraint_freq: int = '${training.rec_results_freq}', save_network_freq: int = 1000, print_stats_freq: int = 100, summary_freq: int = 1000, amp: bool = False, amp_dtype: str = 'float16', ntk: modulus.hydra.loss.NTKConf = NTKConf(use_ntk=False, save_name=None, run_freq=1000), test_function: str = '???', use_quadratures: bool = False)

Bases: modulus.hydra.training.DefaultTraining

test_function: str = '???'
use_quadratures: bool = False
modulus.hydra.training.register_training_configs() None

hydra.utils

modulus.hydra.utils.add_hydra_run_path(path: Union[str, pathlib.Path]) pathlib.Path

Prepends current hydra run path

modulus.hydra.utils.compose(config_name: Optional[str] = None, config_path: Optional[str] = None, overrides: List[str] = [], return_hydra_config: bool = False, job_name: Optional[str] = 'app', caller_stack_depth: int = 2) omegaconf.dictconfig.DictConfig

Internal Modulus config initializer and compose function. This is an alternative for initializing a Hydra config which should be used as a last ditch effort in cases where @modulus.main() cannot work. For more info see: https://hydra.cc/docs/advanced/compose_api/

config_namestr

Modulus config name

config_pathstr

Path to config file relative to the caller at location caller_stack_depth

overrideslist of strings

List of overrides

return_hydra_configbool

Return the hydra options in the dict config

job_namestring

Name of program run instance

caller_stack_depthint

Stack depth of this function call (needed for finding config relative to python).

modulus.hydra.utils.main(config_path: str, config_name: str = 'config')

Modified decorator for loading hydra configs in modulus See: https://github.com/facebookresearch/hydra/blob/main/hydra/main.py

modulus.hydra.utils.to_absolute_path(*args: Union[str, pathlib.Path])

Converts file path to absolute path based on run file location Modified from: https://github.com/facebookresearch/hydra/blob/main/hydra/utils.py

modulus.hydra.utils.to_yaml(cfg: omegaconf.dictconfig.DictConfig)

Converges dict config into a YML string