bridge.recipes.deepseek.deepseek_v3#

Module Contents#

Classes#

DeepSeekV3CommonKwargs

Typed options accepted by DeepSeek V3 recipe helper functions.

Functions#

deepseek_v3_pretrain_config

Return a pre-training config for DeepSeek-V3.

deepseek_v3_pretrain_config_32nodes

Create a pre-training configuration for DeepSeek-V3 (671B) model with minimal number of nodes (32).

_deepseek_v3_common

Create a pre-training configuration for DeepSeek-V3 models using a given HuggingFace path.

API#

class bridge.recipes.deepseek.deepseek_v3.DeepSeekV3CommonKwargs#

Bases: typing_extensions.TypedDict

Typed options accepted by DeepSeek V3 recipe helper functions.

Initialization

Initialize self. See help(type(self)) for accurate signature.

hf_path: str#

None

dir: Optional[str]#

None

name: str#

None

data_paths: Optional[List[str]]#

None

data_args_path: Optional[str]#

None

train_data_path: Optional[List[str]]#

None

valid_data_path: Optional[List[str]]#

None

test_data_path: Optional[List[str]]#

None

per_split_data_args_path: Optional[str]#

None

mock: bool#

None

tensor_parallelism: int#

None

pipeline_parallelism: int#

None

pipeline_parallelism_dtype: Optional[torch.dtype]#

None

virtual_pipeline_parallelism: Optional[int]#

None

context_parallelism: int#

None

expert_parallelism: int#

None

sequence_parallelism: bool#

None

use_megatron_fsdp: bool#

None

check_for_nan_in_grad: bool#

None

recompute_granularity: Optional[str]#

None

recompute_modules: Optional[List[str]]#

None

recompute_method: Optional[str]#

None

recompute_num_layers: Optional[int]#

None

mtp_num_layers: Optional[int]#

None

mtp_loss_scaling_factor: Optional[float]#

None

train_iters: int#

None

global_batch_size: int#

None

micro_batch_size: int#

None

seq_length: int#

None

lr: float#

None

min_lr: float#

None

lr_warmup_iters: int#

None

lr_decay_iters: Optional[int]#

None

eval_interval: int#

None

save_interval: int#

None

use_null_tokenizer: bool#

None

precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]]#

None

comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig]#

None

enable_deepep: bool#

None

apply_rope_fusion: bool#

None

layout: Optional[Union[str, List[List[str]]]]#

None

bridge.recipes.deepseek.deepseek_v3.deepseek_v3_pretrain_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.deepseek.deepseek_v3.DeepSeekV3CommonKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a pre-training config for DeepSeek-V3.

See _deepseek_v3_common for the full list of parameters.

bridge.recipes.deepseek.deepseek_v3.deepseek_v3_pretrain_config_32nodes(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.deepseek.deepseek_v3.DeepSeekV3CommonKwargs],
) megatron.bridge.training.config.ConfigContainer#

Create a pre-training configuration for DeepSeek-V3 (671B) model with minimal number of nodes (32).

Returns:

Configuration for pre-training.

Return type:

ConfigContainer

bridge.recipes.deepseek.deepseek_v3._deepseek_v3_common(
hf_path: str,
dir: Optional[str] = None,
name: str = 'default',
data_paths: Optional[List[str]] = None,
data_args_path: Optional[str] = None,
train_data_path: Optional[List[str]] = None,
valid_data_path: Optional[List[str]] = None,
test_data_path: Optional[List[str]] = None,
per_split_data_args_path: Optional[str] = None,
mock: bool = False,
tensor_parallelism: int = 2,
pipeline_parallelism: int = 16,
pipeline_parallelism_dtype: Optional[torch.dtype] = torch.bfloat16,
virtual_pipeline_parallelism: Optional[int] = None,
context_parallelism: int = 1,
expert_parallelism: int = 64,
sequence_parallelism: bool = True,
use_megatron_fsdp: bool = False,
check_for_nan_in_grad: bool = True,
recompute_granularity: Optional[str] = 'selective',
recompute_modules: Optional[List[str]] = None,
recompute_method: Optional[str] = None,
recompute_num_layers: Optional[int] = None,
mtp_num_layers: Optional[int] = 1,
mtp_loss_scaling_factor: Optional[float] = 0.1,
train_iters: int = 1000000,
global_batch_size: int = 4096,
micro_batch_size: int = 1,
seq_length: int = 4096,
lr: float = 0.0003,
min_lr: float = 3e-05,
lr_warmup_iters: int = 2000,
lr_decay_iters: Optional[int] = None,
eval_interval: int = 2000,
save_interval: int = 2000,
use_null_tokenizer: bool = True,
precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]] = None,
comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig] = None,
enable_deepep: bool = False,
apply_rope_fusion: bool = False,
layout: Optional[Union[str, List[List[str]]]] = None,
) megatron.bridge.training.config.ConfigContainer#

Create a pre-training configuration for DeepSeek-V3 models using a given HuggingFace path.