Skip to content

Model

MegatronBioBertModelType = TypeVar('MegatronBioBertModelType', bound=MegatronBioBertModel) module-attribute

A megatron model that is or extends the MegatronBioBertModel.

PositionEmbeddingKinds = Literal['learned_absolute', 'rope'] module-attribute

Kinds of supported positional embeddings.

BioBertConfig dataclass

Bases: MegatronBioNeMoTrainableModelConfig[MegatronBioBertModelType, MegatronLossType]

Config class for BioBert model, responsible for the partial configuration of Transformer models.

NOTE: do not use this config directly, define a child config that overrides items from this parent config

configure_model() is ultimately called by the LightningModule using PTL lightning module hooks.

Source code in bionemo/llm/model/biobert/model.py
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
@dataclass
class BioBertConfig(
    MegatronBioNeMoTrainableModelConfig[MegatronBioBertModelType, MegatronLossType],
):
    """Config class for BioBert model, responsible for the partial configuration of Transformer models.

    NOTE: do not use this config directly, define a child config that overrides items from this parent config

    `configure_model()` is ultimately called by the LightningModule using PTL lightning module hooks.
    """

    # From megatron.core.models.gpt.bert_model.GPTModel
    fp16_lm_cross_entropy: bool = False
    apply_rope_fusion: bool = True
    parallel_output: bool = True
    bias_dropout_fusion: bool = True
    bias_activation_fusion: bool = True
    masked_softmax_fusion: bool = True
    persist_layer_norm: bool = True
    get_attention_mask_from_fusion: bool = True

    share_embeddings_and_output_weights: bool = False  # try True
    make_vocab_size_divisible_by: int = 128
    position_embedding_type: PositionEmbeddingKinds = "learned_absolute"
    rotary_base: int = 10000
    rotary_percent: float = 1.0
    seq_len_interpolation_factor: Optional[float] = None
    seq_length: int = 1024
    hidden_size: int = 512
    num_attention_heads: int = 8
    num_layers: int = 6
    init_method_std: float = 0.02
    biobert_spec_option: BiobertSpecOption = BiobertSpecOption.bert_layer_with_transformer_engine_spec

    optimizer_fn: Optional[Callable[["MegatronBioBertModel"], Optimizer]] = None
    # TODO (@skothenhill,@georgea) update to use the nemo2 checkpoint mixins
    #  support HF (requires weight interleaving on qkv layer) and nemo1 checkpoints ideally.
    # TODO (@skothenhill,@jstjohn) come up with a nice way of doing fine-tuning checkpoint loading,
    #  where some acceptible layers (eg lm_head) may or may not be absent from the model, and others
    #  (like a new head) may be new and missing from the initial checkpoint.
    nemo1_ckpt_path: Optional[str] = None

    initial_ckpt_path: Optional[str] = None
    initial_ckpt_skip_keys_with_these_prefixes: List[str] = field(default_factory=list)
    # Used if initializing from a checkpoint, set this to any fields you want to override rather than re-set.
    #  by default all fields will be overridden.
    override_parent_fields: List[str] = field(default_factory=lambda: _OVERRIDE_BIOBERT_CONFIG_DEFAULTS)
    return_embeddings: bool = False
    return_only_hidden_states: bool = False
    include_hiddens: bool = False  # Include hidden layers in the output of the model
    core_attention_override: Type[torch.nn.Module] | None = None

    # loss reduction class
    loss_reduction_class: Type[MegatronLossType] = BERTMLMLossWithReduction

    def configure_model(self, tokenizer: AutoTokenizer) -> MegatronBioBertModelType:  # noqa: D102
        vp_size = self.virtual_pipeline_model_parallel_size
        if vp_size:
            p_size = self.pipeline_model_parallel_size
            assert (
                self.num_layers // p_size
            ) % vp_size == 0, "Make sure the number of model chunks is the same across all pipeline stages."

        # The local specs all require the standard full attention mask. For transformer engine only the NVTE_FLASH_ATTN=0
        #  option requires this full attention mask.
        use_full_attention_mask: bool = "transformer_engine" not in self.biobert_spec_option

        do_next_sentence = False
        if self.model_cls is None:
            raise ValueError(
                f"You must supply `model_cls` to the {type(self)} for module to initialization in `configure_model`."
            )

        if self.initial_ckpt_path:
            self.load_settings_from_checkpoint(self.initial_ckpt_path)

        model = self.model_cls(
            self,
            transformer_layer_spec=get_biobert_spec(
                self.biobert_spec_option,
                qk_layernorm=self.qk_layernorm,
                core_attention=self.core_attention_override,
            ),
            num_tokentypes=2 if do_next_sentence else 0,
            vocab_size=get_vocab_size(self, tokenizer.vocab_size, self.make_vocab_size_divisible_by),
            max_sequence_length=self.seq_length,
            tokenizer=tokenizer,
            fp16_lm_cross_entropy=self.fp16_lm_cross_entropy,
            parallel_output=self.parallel_output,
            share_embeddings_and_output_weights=self.share_embeddings_and_output_weights,
            position_embedding_type=self.position_embedding_type,
            rotary_percent=self.rotary_percent,
            seq_len_interpolation_factor=self.seq_len_interpolation_factor,
            return_embeddings=self.return_embeddings,
            pre_process=parallel_state.is_pipeline_first_stage(),
            post_process=parallel_state.is_pipeline_last_stage(),  # set to False for inference
            add_binary_head=do_next_sentence,
            use_full_attention_mask=use_full_attention_mask,
            include_hiddens=self.include_hiddens,
        )
        # TODO (@skothenhill) this is a hack to load the old checkpoint.
        # This should be removed once we have a proper checkpoint conversion
        # see NeMo/nemo/collections/llm/gpt/model/mixtral.py for how we should do it.
        # We should eventually have an adapter for nemo1 checkpoints, HF checkpoints (at least for ESM2 @georgea)
        # and an adapter may also be the right way to handle expected missing/extra keys when importing
        # a checkpoint for fine-tuning (eg ignore misisng lm_head, if not there in model, etc).
        if self.nemo1_ckpt_path is not None:
            assert self.initial_ckpt_path is None, "Mutually exclusive checkpoint path used twice"
            te_mapping = "transformer_engine" in self.biobert_spec_option.value
            with tarfile.open(self.nemo1_ckpt_path, "r") as old_ckpt:
                ckpt_file = old_ckpt.extractfile("./model_weights.ckpt")
                if ckpt_file is None:
                    raise ValueError(f"Failure to read checkpoint file: {old_ckpt}/model_weights/ckpt")
                old_weights = torch.load(ckpt_file)
                new_state_dict_from_old = {}
                for k, v in old_weights.items():
                    new_key = nemo1_to_nemo2_biobert_key_mapping(k, new_model_prefix="", te_mapping=te_mapping)
                    new_state_dict_from_old[new_key] = v
                # TE adds non-null ._extra_state objects to layers, which store some kind of buffer bits
                #  so we need to allow those to pass through if we're loading from bionemo1 which did not
                #  use TE.
                model.load_state_dict(new_state_dict_from_old, strict=not te_mapping)
        if self.initial_ckpt_path is not None:
            assert self.nemo1_ckpt_path is None, "Mutually exclusive checkpoint path used twice"
            self.update_model_from_checkpoint(model, self.initial_ckpt_path)

        # TODO (@jstjohn) come up with a cleaner way in the biobert module to return hidden states.
        #  maybe a suite of options like hugging face has so a user can ask for several or only one thing.
        if self.return_only_hidden_states:
            # this applies the final layernorm in the encoder to the hidden states which was
            #  the default in nemo1.
            model.post_process = False
            model.encoder.post_process = True
            model.encoder.post_layer_norm = True
        return model

    def get_loss_reduction_class(self) -> Type[MegatronLossType]:  # noqa: D102
        # You could optionally return a different loss reduction class here based on the config settings.
        return self.loss_reduction_class

BioBertOutput

Bases: BioBertOutputCore

The megatron bionemo bert model inference type.

Source code in bionemo/llm/model/biobert/model.py
113
114
115
116
class BioBertOutput(BioBertOutputCore, total=False):
    """The megatron bionemo bert model inference type."""

    hidden_states: Tensor

BioBertOutputCore

Bases: TypedDict

Keys always present in the bionemo bert model inference output.

Source code in bionemo/llm/model/biobert/model.py
106
107
108
109
110
class BioBertOutputCore(TypedDict):
    """Keys always present in the bionemo bert model inference output."""

    token_logits: Tensor
    binary_logits: Optional[Tensor]

MegatronBioBertModel

Bases: LanguageModule

Transformer language model.

Parameters:

Name Type Description Default
config TransformerConfig

transformer config

required
num_tokentypes int

Set to 2 when args.bert_binary_head is True, and 0 otherwise. Defaults to 0.

required
transformer_layer_spec ModuleSpec

Specifies module to use for transformer layers

required
vocab_size int

vocabulary size

required
max_sequence_length int

maximum size of sequence. This is used for positional embedding

required
pre_process bool

Include embedding layer (used with pipeline parallelism)

True
post_process bool

Include an output layer (used with pipeline parallelism)

True
parallel_output bool

Do not gather the outputs, keep them split across tensor parallel ranks

True
share_embeddings_and_output_weights bool

When True, input embeddings and output logit weights are shared. Defaults to False.

False
position_embedding_type PositionEmbeddingKinds

Position embedding type. Options ["learned_absolute", "rope"]. Defaults is 'learned_absolute'.

'learned_absolute'
rotary_percent float

Percent of rotary dimension to use for rotary position embeddings. Defaults to 1.0 (100%). Ignored unless position_embedding_type is 'rope'.

1.0
Source code in bionemo/llm/model/biobert/model.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
class MegatronBioBertModel(LanguageModule):
    """Transformer language model.

    Args:
        config: transformer config
        num_tokentypes: Set to 2 when args.bert_binary_head is True, and 0 otherwise. Defaults to 0.
        transformer_layer_spec: Specifies module to use for transformer layers
        vocab_size: vocabulary size
        max_sequence_length: maximum size of sequence. This is used for positional embedding
        pre_process: Include embedding layer (used with pipeline parallelism)
        post_process: Include an output layer (used with pipeline parallelism)
        parallel_output: Do not gather the outputs, keep them split across tensor parallel ranks
        share_embeddings_and_output_weights: When True, input embeddings and output logit weights are shared.
            Defaults to False.
        position_embedding_type: Position embedding type. Options ["learned_absolute", "rope"].
            Defaults is 'learned_absolute'.
        rotary_percent: Percent of rotary dimension to use for rotary position embeddings.
            Defaults to 1.0 (100%). Ignored unless position_embedding_type is 'rope'.
    """

    def __init__(  # noqa: D107
        self,
        config: TransformerConfig,
        num_tokentypes: int,
        transformer_layer_spec: ModuleSpec,
        vocab_size: int,
        max_sequence_length: int,
        tokenizer: Optional[AutoTokenizer] = None,
        pre_process: bool = True,
        post_process: bool = True,
        fp16_lm_cross_entropy: bool = False,
        parallel_output: bool = True,
        share_embeddings_and_output_weights: bool = False,
        position_embedding_type: PositionEmbeddingKinds = "learned_absolute",
        rotary_percent: float = 1.0,
        seq_len_interpolation_factor: Optional[float] = None,
        add_binary_head: bool = True,
        return_embeddings: bool = False,
        use_full_attention_mask: bool = False,
        include_hiddens: bool = False,
    ):
        # TODO (@jstjohn) come up with a cleaner way for this model to return a set of things the user wants.
        #  hidden states, embeddings, logits, etc. The defaults should work for training but we need to make it
        #  customizable and easy to tell how to make it work well for inference as well as trouble shooting.
        #  Also make sure that everything returned that the user wants gets transposed to the b,s,h format.
        super(MegatronBioBertModel, self).__init__(config=config)
        self.post_process = post_process
        self.add_binary_head = add_binary_head
        if return_embeddings:
            assert self.post_process, "only return embeddings on the last pipeline stage"
        # `b` = batch, `s` = sequence.
        # The old flash attention mechanism apparently wants you to use a b x 1 x s x s attention mask while
        #  the new one wants a b x 1 x 1 x s attention mask. This is a hack to allow us to switch between the two.
        self.use_full_attention_mask = use_full_attention_mask
        self.config: TransformerConfig = config
        self.transformer_layer_spec: ModuleSpec = transformer_layer_spec
        self.vocab_size = vocab_size
        self.max_sequence_length = max_sequence_length
        self.tokenizer = tokenizer
        self.pre_process = pre_process
        self.post_process = post_process
        self.fp16_lm_cross_entropy = fp16_lm_cross_entropy
        self.parallel_output = parallel_output
        self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
        self.position_embedding_type = position_embedding_type
        self.add_binary_head = add_binary_head
        self.return_embeddings = return_embeddings
        self.include_hiddens = include_hiddens

        # megatron core pipelining currently depends on model type
        self.model_type = ModelType.encoder_or_decoder

        # Embeddings.
        if self.pre_process:
            self.embedding = LanguageModelEmbedding(
                config=self.config,
                vocab_size=self.vocab_size,
                max_sequence_length=self.max_sequence_length,
                position_embedding_type=position_embedding_type,
                num_tokentypes=num_tokentypes,
            )

        if self.position_embedding_type == "rope":
            self.rotary_pos_emb = RotaryEmbedding(
                kv_channels=self.config.kv_channels,
                rotary_percent=rotary_percent,
                rotary_interleaved=self.config.rotary_interleaved,
                # bug in megatron: they list the type as `float` but they default to `None` so it should be `Optional[float]`
                seq_len_interpolation_factor=seq_len_interpolation_factor,  # type: ignore
            )

        # Transformer.
        self.encoder = TransformerBlock(
            config=self.config,
            spec=self.transformer_layer_spec,
            pre_process=self.pre_process,
            post_process=self.post_process,  # NOTE: in bionemo1 this is hard-coded to True
        )

        # Output
        if post_process:
            # TODO: Make sure you are passing in the mpu_vocab_size properly
            self.lm_head = BertLMHead(
                config.hidden_size,
                config,
            )

            self.output_layer = tensor_parallel.ColumnParallelLinear(
                config.hidden_size,
                self.vocab_size,
                config=config,
                init_method=config.init_method,
                bias=True,
                skip_bias_add=False,
                gather_output=not self.parallel_output,
                skip_weight_param_allocation=pre_process and share_embeddings_and_output_weights,
            )

            self.binary_head = None
            if self.add_binary_head:
                # TODO: Shoudl switch this to TE ?
                self.binary_head = get_linear_layer(
                    config.hidden_size, 2, config.init_method, config.perform_initialization
                )
                self.pooler = Pooler(config.hidden_size, config.init_method, config, config.sequence_parallel)

        if self.pre_process or self.post_process:
            self.setup_embeddings_and_output_layer()

    def bert_extended_attention_mask(self, attention_mask: Tensor) -> Tensor:
        """Creates the extended attention mask

        Converts the attention mask of dimension [batch size, 1, seq len] to [batch size, 1, seq len, seq len] and makes it binary

        Args:
            attention_mask (Tensor): The input attention mask

        Returns:
            Tensor: The extended binary attention mask
        """  # noqa: D415
        # We create a 3D attention mask from a 2D tensor mask.
        # [b, 1, s]
        attention_mask_b1s = attention_mask.unsqueeze(1)

        if self.use_full_attention_mask:
            # [b, s, 1]
            attention_mask_bs1 = attention_mask.unsqueeze(2)
            # [b, s, s]
            attention_mask_bss = attention_mask_b1s * attention_mask_bs1
            # [b, 1, s, s]
            extended_attention_mask = attention_mask_bss.unsqueeze(1)
        else:
            # Tensor Engine requires a 1x1xS attention mask which it internally
            #  converts into a 1xSxS mask.
            # [b, 1, 1, s]
            extended_attention_mask = attention_mask_b1s.unsqueeze(1)

        # Convert attention mask to binary, and flip the values from 0 to 1 and vice versa so that
        #  extended_attention_mask._mask_fill(-1000) that megatron does internally result in
        #  masking out pad positions.
        extended_attention_mask = extended_attention_mask < 0.5

        return extended_attention_mask

    def bert_position_ids(self, token_ids):  # noqa: D102
        # Create position ids
        seq_length = token_ids.size(1)
        position_ids = torch.arange(seq_length, dtype=torch.long, device=token_ids.device)
        position_ids = position_ids.unsqueeze(0).expand_as(token_ids)

        return position_ids

    def embedding_forward(
        self,
        input_ids: Tensor,
        position_ids: Tensor,
        tokentype_ids: Optional[Tensor] = None,
        attention_mask: Optional[Tensor] = None,
    ) -> Tensor:
        """Produce embeddings."""
        return self.embedding(input_ids=input_ids, position_ids=position_ids, tokentype_ids=tokentype_ids)

    def set_input_tensor(self, input_tensor: Tensor | list[Tensor]) -> None:
        """Sets input tensor to the model.

        See megatron.model.transformer.set_input_tensor()

        Args:
            input_tensor: Sets the input tensor for the model.

        Raises:
            ValueError: Iff the input tensor is a list that doesn't have exactly 1 tensor.
        """
        # This is usually handled in schedules.py but some inference code still gives us non-lists or None.
        if isinstance(input_tensor, list):
            if len(input_tensor) != 1:
                raise ValueError(f"input_tensor should only be length 1 for gpt/bert, not length: {len(input_tensor)}")
            single_input_tensor: Tensor = input_tensor[0]
        else:
            single_input_tensor = input_tensor
        self.encoder.set_input_tensor(single_input_tensor)

    def forward(
        self,
        input_ids: Tensor,
        attention_mask: Tensor,
        tokentype_ids: Optional[Tensor] = None,
        lm_labels: Optional[Tensor] = None,
        inference_params: Any | None = None,
    ) -> BioBertOutput | Tensor:
        """Forward function of BERT model

        Forward function of the BERT Model This function passes the input tensors
        through the embedding layer, and then the encoder and finally into the post
        processing layer (optional).

        It either returns the Loss values if labels are given or the final hidden units.
        """  # noqa: D415
        # TODO! If we upgrade to TE 1.7 why does bit flipping back to 1 help the loss in TE 1.7? It claimed that they now follow standards, did
        #  nemo/megatron flip again internally to be compatible wtih TE somewhere?
        #  change the following line to ~self.bert... and see if it helps if we upgrade to TE 1.7 and NeMo/Megatron have not compensated.
        extended_attention_mask = self.bert_extended_attention_mask(attention_mask)

        if parallel_state.is_pipeline_first_stage():
            using_input_ids: Optional[Tensor] = input_ids
            using_position_ids: Optional[Tensor] = self.bert_position_ids(input_ids)
        else:
            using_input_ids = None
            using_position_ids = None

        # Encoder embedding.
        if self.pre_process:
            encoder_input: Optional[Tensor] = self.embedding_forward(
                input_ids=using_input_ids,
                position_ids=using_position_ids,
                tokentype_ids=tokentype_ids,
                attention_mask=attention_mask,
            )
        else:
            # intermediate stage of pipeline
            # encoder will get hidden_states from encoder.input_tensor
            encoder_input = None

        # Rotary positional embeddings (Why not move this into BERT/GPTEmberdding ?)
        rotary_pos_emb = None
        if self.position_embedding_type == "rope":
            rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
                inference_params, self.encoder, encoder_input, self.config
            )
            rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len)

        # Run encoder.
        hidden_states = self.encoder(
            hidden_states=encoder_input,
            attention_mask=extended_attention_mask,
            inference_params=inference_params,
            rotary_pos_emb=rotary_pos_emb,
        )
        if not self.post_process:
            return hidden_states

        if self.add_binary_head:
            pooled_output = self.pooler(hidden_states, 0)

        if self.return_embeddings:
            embeddings = torch.transpose(hidden_states, 0, 1)
            masks = torch.sum(attention_mask, dim=1)
            # Collect masked embeddings.
            output = torch.zeros(
                size=(embeddings.shape[0], embeddings.shape[2]),
                dtype=embeddings.dtype,
                device=torch.cuda.current_device(),
            )
            for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
                output[i, :] = torch.mean(embedding[1 : mask - 1], dim=0)
            return output

        # logits and loss
        output_weight = None
        if self.share_embeddings_and_output_weights:
            output_weight = self.shared_embedding_or_output_weight()

        hidden_states_after_lm_head = self.lm_head(hidden_states=hidden_states)
        logits, _ = self.output_layer(hidden_states_after_lm_head, weight=output_weight)

        binary_logits = None
        if self.binary_head is not None:
            binary_logits = self.binary_head(pooled_output)

        # [s b h] => [b s h]  # move batch to the first dimension after forward
        logits = logits.transpose(0, 1).contiguous()
        output = {"token_logits": logits, "binary_logits": binary_logits}
        if self.include_hiddens:
            output["hidden_states"] = hidden_states.transpose(0, 1).contiguous()  # [s b h] => [b s h]
        return output

bert_extended_attention_mask(attention_mask)

Creates the extended attention mask

Converts the attention mask of dimension [batch size, 1, seq len] to [batch size, 1, seq len, seq len] and makes it binary

Parameters:

Name Type Description Default
attention_mask Tensor

The input attention mask

required

Returns:

Name Type Description
Tensor Tensor

The extended binary attention mask

Source code in bionemo/llm/model/biobert/model.py
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
def bert_extended_attention_mask(self, attention_mask: Tensor) -> Tensor:
    """Creates the extended attention mask

    Converts the attention mask of dimension [batch size, 1, seq len] to [batch size, 1, seq len, seq len] and makes it binary

    Args:
        attention_mask (Tensor): The input attention mask

    Returns:
        Tensor: The extended binary attention mask
    """  # noqa: D415
    # We create a 3D attention mask from a 2D tensor mask.
    # [b, 1, s]
    attention_mask_b1s = attention_mask.unsqueeze(1)

    if self.use_full_attention_mask:
        # [b, s, 1]
        attention_mask_bs1 = attention_mask.unsqueeze(2)
        # [b, s, s]
        attention_mask_bss = attention_mask_b1s * attention_mask_bs1
        # [b, 1, s, s]
        extended_attention_mask = attention_mask_bss.unsqueeze(1)
    else:
        # Tensor Engine requires a 1x1xS attention mask which it internally
        #  converts into a 1xSxS mask.
        # [b, 1, 1, s]
        extended_attention_mask = attention_mask_b1s.unsqueeze(1)

    # Convert attention mask to binary, and flip the values from 0 to 1 and vice versa so that
    #  extended_attention_mask._mask_fill(-1000) that megatron does internally result in
    #  masking out pad positions.
    extended_attention_mask = extended_attention_mask < 0.5

    return extended_attention_mask

embedding_forward(input_ids, position_ids, tokentype_ids=None, attention_mask=None)

Produce embeddings.

Source code in bionemo/llm/model/biobert/model.py
296
297
298
299
300
301
302
303
304
def embedding_forward(
    self,
    input_ids: Tensor,
    position_ids: Tensor,
    tokentype_ids: Optional[Tensor] = None,
    attention_mask: Optional[Tensor] = None,
) -> Tensor:
    """Produce embeddings."""
    return self.embedding(input_ids=input_ids, position_ids=position_ids, tokentype_ids=tokentype_ids)

forward(input_ids, attention_mask, tokentype_ids=None, lm_labels=None, inference_params=None)

Forward function of BERT model

Forward function of the BERT Model This function passes the input tensors through the embedding layer, and then the encoder and finally into the post processing layer (optional).

It either returns the Loss values if labels are given or the final hidden units.

Source code in bionemo/llm/model/biobert/model.py
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
def forward(
    self,
    input_ids: Tensor,
    attention_mask: Tensor,
    tokentype_ids: Optional[Tensor] = None,
    lm_labels: Optional[Tensor] = None,
    inference_params: Any | None = None,
) -> BioBertOutput | Tensor:
    """Forward function of BERT model

    Forward function of the BERT Model This function passes the input tensors
    through the embedding layer, and then the encoder and finally into the post
    processing layer (optional).

    It either returns the Loss values if labels are given or the final hidden units.
    """  # noqa: D415
    # TODO! If we upgrade to TE 1.7 why does bit flipping back to 1 help the loss in TE 1.7? It claimed that they now follow standards, did
    #  nemo/megatron flip again internally to be compatible wtih TE somewhere?
    #  change the following line to ~self.bert... and see if it helps if we upgrade to TE 1.7 and NeMo/Megatron have not compensated.
    extended_attention_mask = self.bert_extended_attention_mask(attention_mask)

    if parallel_state.is_pipeline_first_stage():
        using_input_ids: Optional[Tensor] = input_ids
        using_position_ids: Optional[Tensor] = self.bert_position_ids(input_ids)
    else:
        using_input_ids = None
        using_position_ids = None

    # Encoder embedding.
    if self.pre_process:
        encoder_input: Optional[Tensor] = self.embedding_forward(
            input_ids=using_input_ids,
            position_ids=using_position_ids,
            tokentype_ids=tokentype_ids,
            attention_mask=attention_mask,
        )
    else:
        # intermediate stage of pipeline
        # encoder will get hidden_states from encoder.input_tensor
        encoder_input = None

    # Rotary positional embeddings (Why not move this into BERT/GPTEmberdding ?)
    rotary_pos_emb = None
    if self.position_embedding_type == "rope":
        rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
            inference_params, self.encoder, encoder_input, self.config
        )
        rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len)

    # Run encoder.
    hidden_states = self.encoder(
        hidden_states=encoder_input,
        attention_mask=extended_attention_mask,
        inference_params=inference_params,
        rotary_pos_emb=rotary_pos_emb,
    )
    if not self.post_process:
        return hidden_states

    if self.add_binary_head:
        pooled_output = self.pooler(hidden_states, 0)

    if self.return_embeddings:
        embeddings = torch.transpose(hidden_states, 0, 1)
        masks = torch.sum(attention_mask, dim=1)
        # Collect masked embeddings.
        output = torch.zeros(
            size=(embeddings.shape[0], embeddings.shape[2]),
            dtype=embeddings.dtype,
            device=torch.cuda.current_device(),
        )
        for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
            output[i, :] = torch.mean(embedding[1 : mask - 1], dim=0)
        return output

    # logits and loss
    output_weight = None
    if self.share_embeddings_and_output_weights:
        output_weight = self.shared_embedding_or_output_weight()

    hidden_states_after_lm_head = self.lm_head(hidden_states=hidden_states)
    logits, _ = self.output_layer(hidden_states_after_lm_head, weight=output_weight)

    binary_logits = None
    if self.binary_head is not None:
        binary_logits = self.binary_head(pooled_output)

    # [s b h] => [b s h]  # move batch to the first dimension after forward
    logits = logits.transpose(0, 1).contiguous()
    output = {"token_logits": logits, "binary_logits": binary_logits}
    if self.include_hiddens:
        output["hidden_states"] = hidden_states.transpose(0, 1).contiguous()  # [s b h] => [b s h]
    return output

set_input_tensor(input_tensor)

Sets input tensor to the model.

See megatron.model.transformer.set_input_tensor()

Parameters:

Name Type Description Default
input_tensor Tensor | list[Tensor]

Sets the input tensor for the model.

required

Raises:

Type Description
ValueError

Iff the input tensor is a list that doesn't have exactly 1 tensor.

Source code in bionemo/llm/model/biobert/model.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
def set_input_tensor(self, input_tensor: Tensor | list[Tensor]) -> None:
    """Sets input tensor to the model.

    See megatron.model.transformer.set_input_tensor()

    Args:
        input_tensor: Sets the input tensor for the model.

    Raises:
        ValueError: Iff the input tensor is a list that doesn't have exactly 1 tensor.
    """
    # This is usually handled in schedules.py but some inference code still gives us non-lists or None.
    if isinstance(input_tensor, list):
        if len(input_tensor) != 1:
            raise ValueError(f"input_tensor should only be length 1 for gpt/bert, not length: {len(input_tensor)}")
        single_input_tensor: Tensor = input_tensor[0]
    else:
        single_input_tensor = input_tensor
    self.encoder.set_input_tensor(single_input_tensor)