bridge.recipes.gemma.gemma2#

Module Contents#

Classes#

Gemma2CommonKwargs

Typed options accepted by Gemma2 recipe helper functions.

Gemma2FinetuneKwargs

Typed options accepted by Gemma2 finetuning recipe helper functions.

Functions#

gemma2_2b_pretrain_config

Return a pre-training config for Gemma2 2B.

gemma2_9b_pretrain_config

Return a pre-training config for Gemma2 9B.

gemma2_27b_pretrain_config

Return a pre-training config for Gemma2 27B.

_gemma2_common

Create a pre-training configuration for Gemma2 models.

gemma2_2b_finetune_config

Return a finetuning config for Gemma2 2B.

gemma2_9b_finetune_config

Return a finetuning config for Gemma2 9B.

gemma2_27b_finetune_config

Return a finetuning config for Gemma2 27B.

_gemma2_finetune_common

Common finetuning configuration for all Gemma2 models.

API#

class bridge.recipes.gemma.gemma2.Gemma2CommonKwargs#

Bases: typing_extensions.TypedDict

Typed options accepted by Gemma2 recipe helper functions.

Initialization

Initialize self. See help(type(self)) for accurate signature.

hf_path: str#

None

dir: Optional[str]#

None

name: str#

None

data_paths: Optional[List[str]]#

None

data_args_path: Optional[str]#

None

train_data_path: Optional[List[str]]#

None

valid_data_path: Optional[List[str]]#

None

test_data_path: Optional[str]#

None

per_split_data_args_path: Optional[str]#

None

mock: bool#

None

tensor_model_parallel_size: int#

None

pipeline_model_parallel_size: int#

None

pipeline_dtype: Optional[torch.dtype]#

None

virtual_pipeline_model_parallel_size: Optional[int]#

None

context_parallel_size: int#

None

sequence_parallel: bool#

None

use_megatron_fsdp: bool#

None

train_iters: int#

None

global_batch_size: int#

None

micro_batch_size: int#

None

seq_length: int#

None

lr: float#

None

min_lr: float#

None

lr_warmup_iters: int#

None

lr_decay_iters: Optional[int]#

None

eval_interval: int#

None

save_interval: int#

None

use_null_tokenizer: bool#

None

precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]]#

None

comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig]#

None

class bridge.recipes.gemma.gemma2.Gemma2FinetuneKwargs#

Bases: typing_extensions.TypedDict

Typed options accepted by Gemma2 finetuning recipe helper functions.

Initialization

Initialize self. See help(type(self)) for accurate signature.

hf_path: str#

None

dir: Optional[str]#

None

name: str#

None

pretrained_checkpoint: Optional[str]#

None

peft: Union[str, megatron.bridge.peft.base.PEFT, None]#

None

packed_sequence: bool#

None

train_iters: int#

None

global_batch_size: Optional[int]#

None

micro_batch_size: int#

None

seq_length: Optional[int]#

None

eval_interval: int#

None

save_interval: int#

None

finetune_lr: Optional[float]#

None

min_lr: float#

None

lr_warmup_iters: int#

None

lr_decay_iters: Optional[int]#

None

wandb_project: Optional[str]#

None

wandb_entity: Optional[str]#

None

wandb_exp_name: Optional[str]#

None

precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]]#

None

bridge.recipes.gemma.gemma2.gemma2_2b_pretrain_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.gemma.gemma2.Gemma2CommonKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a pre-training config for Gemma2 2B.

Default parallelism: TP=2, PP=1

bridge.recipes.gemma.gemma2.gemma2_9b_pretrain_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.gemma.gemma2.Gemma2CommonKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a pre-training config for Gemma2 9B.

Default parallelism: TP=8, PP=1

bridge.recipes.gemma.gemma2.gemma2_27b_pretrain_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.gemma.gemma2.Gemma2CommonKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a pre-training config for Gemma2 27B.

Default parallelism: TP=8, PP=2

bridge.recipes.gemma.gemma2._gemma2_common(
hf_path: str,
dir: Optional[str] = None,
name: str = 'default',
data_paths: Optional[List[str]] = None,
data_args_path: Optional[str] = None,
train_data_path: Optional[List[str]] = None,
valid_data_path: Optional[List[str]] = None,
test_data_path: Optional[List[str]] = None,
per_split_data_args_path: Optional[str] = None,
mock: bool = False,
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
pipeline_dtype: Optional[torch.dtype] = None,
virtual_pipeline_model_parallel_size: Optional[int] = None,
context_parallel_size: int = 1,
sequence_parallel: bool = False,
use_megatron_fsdp: bool = False,
train_iters: int = 300000,
global_batch_size: int = 32,
micro_batch_size: int = 2,
seq_length: int = 4096,
lr: float = 0.0003,
min_lr: float = 3e-05,
lr_warmup_iters: int = 500,
lr_decay_iters: Optional[int] = None,
eval_interval: int = 500,
save_interval: int = 500,
use_null_tokenizer: bool = False,
precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]] = 'bf16_mixed',
comm_overlap_config: Optional[megatron.bridge.training.comm_overlap.CommOverlapConfig] = None,
) megatron.bridge.training.config.ConfigContainer#

Create a pre-training configuration for Gemma2 models.

bridge.recipes.gemma.gemma2.gemma2_2b_finetune_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.gemma.gemma2.Gemma2FinetuneKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a finetuning config for Gemma2 2B.

Default configuration: 1 node, 8 GPUs

  • LoRA/DoRA: TP=1, PP=1, LR=1e-4

  • Full SFT: TP=1, PP=1, LR=5e-6

bridge.recipes.gemma.gemma2.gemma2_9b_finetune_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.gemma.gemma2.Gemma2FinetuneKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a finetuning config for Gemma2 9B.

Default configuration: 1 node, 8 GPUs

  • LoRA/DoRA: TP=1, PP=1, LR=1e-4

  • Full SFT: TP=4, PP=1, LR=5e-6

bridge.recipes.gemma.gemma2.gemma2_27b_finetune_config(
**user_kwargs: typing_extensions.Unpack[bridge.recipes.gemma.gemma2.Gemma2FinetuneKwargs],
) megatron.bridge.training.config.ConfigContainer#

Return a finetuning config for Gemma2 27B.

Default configuration: 2 nodes (SFT) or 1 node (LoRA), 8 GPUs per node

  • LoRA/DoRA: TP=4, PP=1, LR=1e-4

  • Full SFT: TP=8, PP=2, LR=5e-6

bridge.recipes.gemma.gemma2._gemma2_finetune_common(
hf_path: str,
dir: Optional[str] = None,
name: str = 'default',
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
pipeline_dtype: Optional[torch.dtype] = None,
virtual_pipeline_model_parallel_size: Optional[int] = None,
context_parallel_size: int = 1,
sequence_parallel: bool = False,
pretrained_checkpoint: Optional[str] = None,
peft: Union[str, megatron.bridge.peft.base.PEFT, None] = 'lora',
packed_sequence: bool = False,
train_iters: int = 100,
global_batch_size: Optional[int] = None,
micro_batch_size: int = 1,
seq_length: Optional[int] = None,
eval_interval: int = 50,
save_interval: int = 100,
finetune_lr: Optional[float] = None,
min_lr: float = 0.0,
lr_warmup_iters: int = 10,
lr_decay_iters: Optional[int] = None,
wandb_project: Optional[str] = None,
wandb_entity: Optional[str] = None,
wandb_exp_name: Optional[str] = None,
precision_config: Optional[Union[megatron.bridge.training.mixed_precision.MixedPrecisionConfig, str]] = None,
) megatron.bridge.training.config.ConfigContainer#

Common finetuning configuration for all Gemma2 models.