bridge.recipes.moonlight.moonlight_16b#

Module Contents#

Classes#

MoonlightCommonKwargs

Typed options accepted by Moonlight family recipe helpers.

MoonlightFinetuneKwargs

Typed options accepted by Moonlight-16B finetune recipe helpers.

Functions#

moonlight_16b_pretrain_config

Return a pre-training config for Moonlight-16B.

_moonlight_common

Create a pre-training configuration for Moonlight-16B model.

_model_config

Configure the Moonlight-16B model.

moonlight_16b_finetune_config

Return a finetuning config for Moonlight-16B.

_moonlight_finetune_common

Create a finetuning configuration for Moonlight-16B model.

Data#

API#

bridge.recipes.moonlight.moonlight_16b.logger#

β€˜getLogger(…)’

class bridge.recipes.moonlight.moonlight_16b.MoonlightCommonKwargs#

Bases: typing_extensions.TypedDict

Typed options accepted by Moonlight family recipe helpers.

Initialization

Initialize self. See help(type(self)) for accurate signature.

dir: Optional[str]#

None

name: str#

None

data_paths: Optional[List[str]]#

None

data_args_path: Optional[str]#

None

train_data_path: Optional[List[str]]#

None

valid_data_path: Optional[List[str]]#

None

test_data_path: Optional[List[str]]#

None

per_split_data_args_path: Optional[str]#

None

mock: bool#

None

tensor_model_parallel_size: int#

None

pipeline_model_parallel_size: int#

None

pipeline_dtype: Optional[torch.dtype]#

None

virtual_pipeline_model_parallel_size: Optional[int]#

None

context_parallel_size: int#

None

expert_model_parallel_size: int#

None

sequence_parallel: bool#

None

recompute_granularity: str#

None

recompute_modules: Optional[List[str]]#

None

recompute_method: Optional[str]#

None

recompute_num_layers: Optional[int]#

None

enable_deepep: bool#

None

apply_rope_fusion: bool#

None

train_iters: int#

None

global_batch_size: int#

None

micro_batch_size: int#

None

seq_length: int#

None

lr: float#

None

min_lr: float#

None

lr_warmup_iters: int#

None

optimizer_type: str#

None

eval_interval: int#

None

save_interval: int#

None

precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]]#

None

comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig]#

None

class bridge.recipes.moonlight.moonlight_16b.MoonlightFinetuneKwargs#

Bases: typing_extensions.TypedDict

Typed options accepted by Moonlight-16B finetune recipe helpers.

Initialization

Initialize self. See help(type(self)) for accurate signature.

tokenizer_path: str#

None

dir: Optional[str]#

None

name: str#

None

tensor_model_parallel_size: int#

None

pipeline_model_parallel_size: int#

None

pipeline_dtype: Optional[torch.dtype]#

None

virtual_pipeline_model_parallel_size: Optional[int]#

None

context_parallel_size: int#

None

expert_model_parallel_size: int#

None

sequence_parallel: bool#

None

recompute_granularity: str#

None

recompute_modules: Optional[List[str]]#

None

recompute_method: Optional[str]#

None

recompute_num_layers: Optional[int]#

None

enable_deepep: bool#

None

apply_rope_fusion: bool#

None

pretrained_checkpoint: Optional[str]#

None

peft: Optional[Union[str, megatron.bridge.peft.base.PEFT]]#

None

packed_sequence: bool#

None

train_iters: int#

None

global_batch_size: Optional[int]#

None

micro_batch_size: int#

None

seq_length: int#

None

finetune_lr: float#

None

min_lr: float#

None

lr_warmup_iters: int#

None

lr_decay_iters: Optional[int]#

None

eval_interval: int#

None

save_interval: int#

None

precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]]#

None

comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig]#

None

wandb_project: Optional[str]#

None

wandb_entity: Optional[str]#

None

wandb_exp_name: Optional[str]#

None

bridge.recipes.moonlight.moonlight_16b.moonlight_16b_pretrain_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.moonlight.moonlight_16b.MoonlightCommonKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a pre-training config for Moonlight-16B.

See _moonlight_common for the full list of parameters.

bridge.recipes.moonlight.moonlight_16b._moonlight_common(
dir: Optional[str] = None,
name: str = 'default',
data_paths: Optional[List[str]] = None,
data_args_path: Optional[str] = None,
train_data_path: Optional[List[str]] = None,
valid_data_path: Optional[List[str]] = None,
test_data_path: Optional[List[str]] = None,
per_split_data_args_path: Optional[str] = None,
mock: bool = False,
tensor_model_parallel_size: int = 2,
pipeline_model_parallel_size: int = 2,
pipeline_dtype: Optional[torch.dtype] = torch.bfloat16,
virtual_pipeline_model_parallel_size: Optional[int] = None,
context_parallel_size: int = 1,
expert_model_parallel_size: int = 4,
sequence_parallel: bool = True,
recompute_granularity: str = 'selective',
recompute_modules: Optional[List[str]] = None,
recompute_method: Optional[str] = None,
recompute_num_layers: Optional[int] = None,
enable_deepep: bool = False,
apply_rope_fusion: bool = False,
train_iters: int = 500000,
global_batch_size: int = 2048,
micro_batch_size: int = 1,
seq_length: int = 4096,
lr: float = 0.0003,
min_lr: float = 3e-05,
lr_warmup_iters: int = 2000,
optimizer_type: str = 'adam',
eval_interval: int = 2000,
save_interval: int = 2000,
precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]] = None,
comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig] = None,
) megatron.bridge.training.config.ConfigContainer#

Create a pre-training configuration for Moonlight-16B model.

Parameters:
  • dir (Optional[str]) – Base directory for saving logs and checkpoints.

  • name (str) – Name of the pre-training run.

  • data_paths (Optional[List[str]]) – List of paths to dataset files. If None, mock data will be used.

  • data_args_path (Optional[str]) – Path to file containing data arguments.

  • train_data_path (Optional[List[str]]) – List of training data paths.

  • valid_data_path (Optional[List[str]]) – List of validation data paths.

  • test_data_path (Optional[List[str]]) – List of test data paths.

  • per_split_data_args_path (Optional[str]) – Path to JSON file with per-split data configuration.

  • mock (bool) – Whether to use mock data. If True, ignores data_paths.

  • tensor_model_parallel_size (int) – Degree of tensor model parallelism.

  • pipeline_model_parallel_size (int) – Degree of pipeline model parallelism.

  • pipeline_dtype (Optional[torch.dtype]) – Data type for pipeline parallelism.

  • virtual_pipeline_model_parallel_size (Optional[int]) – Size of virtual pipeline parallelism.

  • context_parallel_size (int) – Degree of context parallelism.

  • expert_model_parallel_size (int) – Degree of expert model parallelism.

  • sequence_parallel (bool) – Whether to use sequence parallelism.

  • recompute_granularity (str) – Recomputation granularity.

  • recompute_modules (Optional[List[str]]) – Modules to recompute.

  • recompute_method (Optional[str]) – Recomputation method.

  • recompute_num_layers (Optional[int]) – Number of layers to recompute.

  • enable_deepep (bool) – Whether to use DeePEP.

  • apply_rope_fusion (bool) – Whether to apply RoPE fusion.

  • train_iters (int) – Total number of training iterations.

  • global_batch_size (int) – Global batch size for training.

  • micro_batch_size (int) – Micro batch size for training.

  • seq_length (int) – Sequence length for training data.

  • lr (float) – Learning rate.

  • min_lr (float) – Minimum learning rate for cosine decay.

  • lr_warmup_iters (int) – Number of warmup iterations for the learning rate.

  • optimizer_type (str) – Type of optimizer to use.

  • eval_interval (int) – Interval for evaluation.

  • save_interval (int) – Interval for saving checkpoints.

  • precision_config (Optional[Union[MixedPrecisionConfig, str]]) – Precision configuration for the model.

  • comm_overlap_config (Optional[CommOverlapConfig]) – Communication overlap configuration.

Returns:

Configuration for pre-training.

Return type:

ConfigContainer

bridge.recipes.moonlight.moonlight_16b._model_config(
tensor_model_parallel_size: int = 2,
pipeline_model_parallel_size: int = 1,
pipeline_dtype: Optional[torch.dtype] = None,
virtual_pipeline_model_parallel_size: Optional[int] = None,
context_parallel_size: int = 1,
expert_model_parallel_size: int = 8,
sequence_parallel: bool = True,
recompute_granularity: str = 'selective',
recompute_modules: Optional[List[str]] = None,
recompute_method: Optional[str] = None,
recompute_num_layers: Optional[int] = None,
enable_deepep: bool = False,
apply_rope_fusion: bool = False,
) megatron.bridge.models.deepseek.MoonlightModelProvider16B#

Configure the Moonlight-16B model.

Parameters:
  • tensor_model_parallel_size – Degree of tensor model parallelism.

  • pipeline_model_parallel_size – Degree of pipeline model parallelism.

  • pipeline_dtype – Data type for pipeline parallelism.

  • virtual_pipeline_model_parallel_size – Size of virtual pipeline parallelism.

  • context_parallel_size – Degree of context parallelism.

  • expert_model_parallel_size – Degree of expert model parallelism.

  • sequence_parallel – Whether to use sequence parallelism.

  • recompute_granularity – Recomputation granularity.

  • recompute_modules – Modules to recompute.

  • recompute_method – Recomputation method.

  • recompute_num_layers – Number of layers to recompute.

  • enable_deepep – Whether to use DeePEP.

  • apply_rope_fusion – Whether to apply RoPE fusion.

Returns:

Configuration for the Moonlight-16B model.

Return type:

MoonlightModelProvider16B

bridge.recipes.moonlight.moonlight_16b.moonlight_16b_finetune_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.moonlight.moonlight_16b.MoonlightFinetuneKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a finetuning config for Moonlight-16B.

Default configuration: 1 node, 8 GPUs

  • LoRA/DoRA: TP=1, PP=1, EP=2, LR=1e-4

  • Full SFT: TP=2, PP=1, EP=8, lower LR (5e-6)

See _moonlight_finetune_common for the full list of parameters.

bridge.recipes.moonlight.moonlight_16b._moonlight_finetune_common(
tokenizer_path: str,
dir: Optional[str] = None,
name: str = 'default',
tensor_model_parallel_size: int = 2,
pipeline_model_parallel_size: int = 1,
pipeline_dtype: Optional[torch.dtype] = torch.bfloat16,
virtual_pipeline_model_parallel_size: Optional[int] = None,
context_parallel_size: int = 1,
expert_model_parallel_size: int = 8,
sequence_parallel: bool = True,
recompute_granularity: str = 'selective',
recompute_modules: Optional[List[str]] = None,
recompute_method: Optional[str] = None,
recompute_num_layers: Optional[int] = None,
enable_deepep: bool = False,
apply_rope_fusion: bool = False,
pretrained_checkpoint: Optional[str] = None,
peft: Optional[Union[str, megatron.bridge.peft.base.PEFT]] = 'lora',
packed_sequence: bool = False,
train_iters: int = 1000,
global_batch_size: int = 128,
micro_batch_size: int = 1,
seq_length: int = 4096,
eval_interval: int = 50,
save_interval: int = 50,
finetune_lr: float = 0.0001,
min_lr: float = 0.0,
lr_warmup_iters: int = 50,
lr_decay_iters: Optional[int] = None,
precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]] = None,
comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig] = None,
wandb_project: Optional[str] = None,
wandb_entity: Optional[str] = None,
wandb_exp_name: Optional[str] = None,
) megatron.bridge.training.config.ConfigContainer#

Create a finetuning configuration for Moonlight-16B model.

Parameters:
  • tokenizer_path (str) – Path to the tokenizer (HuggingFace tokenizer).

  • dir (Optional[str]) – Base directory for saving logs and checkpoints.

  • name (str) – Name of the finetuning run.

  • tensor_model_parallel_size (int) – Degree of tensor model parallelism.

  • pipeline_model_parallel_size (int) – Degree of pipeline model parallelism.

  • pipeline_dtype (Optional[torch.dtype]) – Data type for pipeline parallelism.

  • virtual_pipeline_model_parallel_size (Optional[int]) – Size of virtual pipeline parallelism.

  • context_parallel_size (int) – Degree of context parallelism.

  • expert_model_parallel_size (int) – Degree of expert model parallelism.

  • sequence_parallel (bool) – Whether to use sequence parallelism.

  • recompute_granularity (str) – Recomputation granularity.

  • recompute_modules (Optional[List[str]]) – Modules to recompute.

  • recompute_method (Optional[str]) – Recomputation method.

  • recompute_num_layers (Optional[int]) – Number of layers to recompute.

  • enable_deepep (bool) – Whether to use DeePEP.

  • apply_rope_fusion (bool) – Whether to apply RoPE fusion.

  • pretrained_checkpoint (Optional[str]) – Path to pretrained checkpoint.

  • peft (Optional[Union[str, PEFT]]) – PEFT configuration (e.g., β€œlora”, β€œdora”, or None for full SFT).

  • packed_sequence (bool) – Whether to use packed sequences.

  • train_iters (int) – Total number of training iterations.

  • global_batch_size (int) – Global batch size for training.

  • micro_batch_size (int) – Micro batch size for training.

  • seq_length (int) – Sequence length for training data.

  • eval_interval (int) – Interval for evaluation.

  • save_interval (int) – Interval for saving checkpoints.

  • finetune_lr (float) – Learning rate for finetuning.

  • min_lr (float) – Minimum learning rate for cosine decay.

  • lr_warmup_iters (int) – Number of warmup iterations for the learning rate.

  • lr_decay_iters (Optional[int]) – Number of decay iterations for the learning rate.

  • precision_config (Optional[Union[MixedPrecisionConfig, str]]) – Precision configuration for the model.

  • comm_overlap_config (Optional[CommOverlapConfig]) – Communication overlap configuration.

  • wandb_project (Optional[str]) – Weights & Biases project name.

  • wandb_entity (Optional[str]) – Weights & Biases entity name.

  • wandb_exp_name (Optional[str]) – Weights & Biases experiment name.

Returns:

Configuration for finetuning.

Return type:

ConfigContainer