Source code for nemo.collections.asr.parts.utils.rnnt_utils

# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union

import torch


[docs]@dataclass class Hypothesis: """Hypothesis class for beam search algorithms. score: A float score obtained from an AbstractRNNTDecoder module's score_hypothesis method. y_sequence: Either a sequence of integer ids pointing to some vocabulary, or a packed torch.Tensor behaving in the same manner. dtype must be torch.Long in the latter case. dec_state: A list (or list of list) of LSTM-RNN decoder states. Can be None. text: (Optional) A decoded string after processing via CTC / RNN-T decoding (removing the CTC/RNNT `blank` tokens, and optionally merging word-pieces). Should be used as decoded string for Word Error Rate calculation. timestep: (Optional) A list of integer indices representing at which index in the decoding process did the token appear. Should be of same length as the number of non-blank tokens. alignments: (Optional) Represents the CTC / RNNT token alignments as integer tokens along an axis of time T (for CTC) or Time x Target (TxU). For CTC, represented as a single list of integer indices. For RNNT, represented as a dangling list of list of integer indices. Outer list represents Time dimension (T), inner list represents Target dimension (U). The set of valid indices **includes** the CTC / RNNT blank token in order to represent alignments. length: Represents the length of the sequence (the original length without padding), otherwise defaults to 0. y: (Unused) A list of torch.Tensors representing the list of hypotheses. lm_state: (Unused) A dictionary state cache used by an external Language Model. lm_scores: (Unused) Score of the external Language Model. tokens: (Optional) A list of decoded tokens (can be characters or word-pieces. last_token (Optional): A token or batch of tokens which was predicted in the last step. """ score: float y_sequence: Union[List[int], torch.Tensor] text: Optional[str] = None dec_out: Optional[List[torch.Tensor]] = None dec_state: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor]]] = None timestep: Union[List[int], torch.Tensor] = field(default_factory=list) alignments: Optional[Union[List[int], List[List[int]]]] = None length: Union[int, torch.Tensor] = 0 y: List[torch.tensor] = None lm_state: Optional[Union[Dict[str, Any], List[Any]]] = None lm_scores: Optional[torch.Tensor] = None tokens: Optional[Union[List[int], torch.Tensor]] = None last_token: Optional[torch.Tensor] = None
[docs]@dataclass class NBestHypotheses: """List of N best hypotheses""" n_best_hypotheses: Optional[List[Hypothesis]]
def is_prefix(x: List[int], pref: List[int]) -> bool: """ Obtained from https://github.com/espnet/espnet. Check if pref is a prefix of x. Args: x: Label ID sequence. pref: Prefix label ID sequence. Returns: : Whether pref is a prefix of x. """ if len(pref) >= len(x): return False for i in range(len(pref)): if pref[i] != x[i]: return False return True def select_k_expansions( hyps: List[Hypothesis], logps: torch.Tensor, beam_size: int, gamma: float, beta: int, ) -> List[Tuple[int, Hypothesis]]: """ Obtained from https://github.com/espnet/espnet Return K hypotheses candidates for expansion from a list of hypothesis. K candidates are selected according to the extended hypotheses probabilities and a prune-by-value method. Where K is equal to beam_size + beta. Args: hyps: Hypotheses. beam_logp: Log-probabilities for hypotheses expansions. beam_size: Beam size. gamma: Allowed logp difference for prune-by-value method. beta: Number of additional candidates to store. Return: k_expansions: Best K expansion hypotheses candidates. """ k_expansions = [] for i, hyp in enumerate(hyps): hyp_i = [(int(k), hyp.score + float(logp)) for k, logp in enumerate(logps[i])] k_best_exp_val = max(hyp_i, key=lambda x: x[1]) k_best_exp_idx = k_best_exp_val[0] k_best_exp = k_best_exp_val[1] expansions = sorted(filter(lambda x: (k_best_exp - gamma) <= x[1], hyp_i), key=lambda x: x[1],)[ : beam_size + beta ] if len(expansions) > 0: k_expansions.append(expansions) else: k_expansions.append([(k_best_exp_idx, k_best_exp)]) return k_expansions