nemo_automodel.components.models.qwen3_moe.model
#
Module Contents#
Classes#
Data#
API#
- class nemo_automodel.components.models.qwen3_moe.model.Block(
- layer_idx: int,
- config: transformers.models.qwen3_moe.configuration_qwen3_moe.Qwen3MoeConfig,
- moe_config: nemo_automodel.components.moe.layers.MoEConfig,
- backend: nemo_automodel.components.moe.utils.BackendConfig,
Bases:
torch.nn.Module
Initialization
- forward(
- x: torch.Tensor,
- *,
- freqs_cis: torch.Tensor,
- attention_mask: torch.Tensor | None = None,
- padding_mask: torch.Tensor | None = None,
- **attn_kwargs: Any,
- _mlp(
- x: torch.Tensor,
- padding_mask: torch.Tensor | None,
- init_weights(buffer_device: torch.device)#
- class nemo_automodel.components.models.qwen3_moe.model.Qwen3MoeModel(
- config: transformers.models.qwen3_moe.configuration_qwen3_moe.Qwen3MoeConfig,
- backend: nemo_automodel.components.moe.utils.BackendConfig,
- *,
- moe_config: nemo_automodel.components.moe.layers.MoEConfig | None = None,
Bases:
torch.nn.Module
Initialization
- forward(
- input_ids: torch.Tensor,
- *,
- position_ids: torch.Tensor | None = None,
- attention_mask: torch.Tensor | None = None,
- padding_mask: torch.Tensor | None = None,
- **attn_kwargs: Any,
- init_weights(buffer_device: torch.device | None = None) None #
- class nemo_automodel.components.models.qwen3_moe.model.Qwen3MoeForCausalLM(
- config: transformers.models.qwen3_moe.configuration_qwen3_moe.Qwen3MoeConfig,
- moe_config: nemo_automodel.components.moe.layers.MoEConfig | None = None,
- backend: nemo_automodel.components.moe.utils.BackendConfig | None = None,
- **kwargs,
Bases:
torch.nn.Module
,nemo_automodel.components.moe.fsdp_mixin.MoEFSDPSyncMixin
- classmethod from_config(
- config: transformers.models.qwen3_moe.configuration_qwen3_moe.Qwen3MoeConfig,
- moe_config: nemo_automodel.components.moe.layers.MoEConfig | None = None,
- backend: nemo_automodel.components.moe.utils.BackendConfig | None = None,
- **kwargs,
- classmethod from_pretrained(
- pretrained_model_name_or_path: str,
- *model_args,
- **kwargs,
- forward(
- input_ids: torch.Tensor,
- *,
- position_ids: torch.Tensor | None = None,
- attention_mask: torch.Tensor | None = None,
- padding_mask: torch.Tensor | None = None,
- **attn_kwargs: Any,
- initialize_weights(
- buffer_device: torch.device | None = None,
- dtype: torch.dtype = torch.bfloat16,
- nemo_automodel.components.models.qwen3_moe.model.ModelClass#
None