bridge.training.tokenizers.config#

Module Contents#

Classes#

TokenizerConfig

Configuration settings for the tokenizer.

API#

class bridge.training.tokenizers.config.TokenizerConfig#

Configuration settings for the tokenizer.

vocab_size: Optional[int]#

None

Size of vocab before EOD or padding.

vocab_file: Optional[str]#

None

Path to the vocab file.

merge_file: Optional[str]#

None

Path to the BPE merge file.

vocab_extra_ids: int#

0

Number of additional vocabulary tokens. They are used for span masking in the T5 model

tokenizer_type: Optional[Literal[BertWordPieceLowerCase, BertWordPieceCase, GPT2BPETokenizer, SentencePieceTokenizer, GPTSentencePieceTokenizer, HuggingFaceTokenizer, Llama2Tokenizer, TikTokenizer, MultimodalTokenizer, NullTokenizer]]#

None

What type of tokenizer to use.

tokenizer_model: Optional[str]#

None

Sentencepiece tokenizer model.

tiktoken_pattern: Optional[str]#

None

Which tiktoken pattern to use. Options: [v1, v2]

tiktoken_num_special_tokens: int#

1000

Number of special tokens in tiktoken tokenizer

tiktoken_special_tokens: Optional[list[str]]#

None

List of tiktoken special tokens, needs to have [””, “”, “”]

tokenizer_prompt_format: Optional[str]#

None

special_tokens: Optional[list[str]]#

None

image_tag_type: Optional[str]#

None

padded_vocab_size: Optional[int]#

None