NVIDIA Clara Train 3.1
3.1

Bring your own InferencePipeline

You can also cascade or chain you inference in any order by defining your own InferencePipeline. You need to put your custom code in <workspace>/lib folder.

The following interfaces is provided to get/run other inferences.

Copy
Copied!
            

class InferencePipeline(object): @staticmethod def get_model_config(name): """Gets model configuration given name.""" @staticmethod def init_inference(config: ModelConfig, triton_config): """Initializes an inference object""" @staticmethod def run_inference(name, inference, data, config: ModelConfig, triton_config): """Runs inference to get result""" @staticmethod def run_transformer(data, transformers_config, log_prefix='POST', ignore=None): """Runs transforms given transform configs""" @abstractmethod def pipeline(self, name, data, config: ModelConfig, triton_config): """Defines a pipeline"""

An example custom_inference_pipeline.py is providing below. Users will pass in the segmentation models they want to run on this image and this Pipeline will run all of them. And union the segmentation masks together.

Copy
Copied!
            

import logging import numpy as np from ai4med.common.medical_image import MedicalImage from ai4med.common.transform_ctx import TransformContext from nvmidl.apps.aas.configs.modelconfig import ModelConfig, ModelType from nvmidl.apps.aas.inference.inference import Inference from nvmidl.apps.aas.inference.inference_pipeline import InferencePipeline from nvmidl.apps.aas.utils.aiaa_exception import AIAAError, AIAAException class CustomInferencePipeline(InferencePipeline): # HINT:: You can pass any additional init params through config_aiaa.json def __init__(self): self._inferences = dict() def _init_context(self, models, triton_config): logger = logging.getLogger(__name__) logger.info('Init Context for:{}'.format(models)) # Create and cache Inferer for loaded models (you can close Inferer if you don't want to cache for any reasons) for name in models: if self._inferences.get(name): logger.info('Inference for{}is pre-loaded; Using the existing one!') continue config = InferencePipeline.get_model_config(name) if config is None: raise AIAAException(AIAAError.INFERENCE_ERROR, 'Reference Model does not exists') self._inferences[name] = (config, InferencePipeline.init_inference(config, triton_config)) logger.info('PreLoaded Inferences:{}'.format(self._inferences.keys())) def pipeline(self, name, data, config: ModelConfig, triton_config): logger = logging.getLogger(__name__) logger.info('Run Inference Pipeline for:{}'.format(name)) if config.get_type() != ModelType.PIPELINE: raise AIAAException(AIAAError.INFERENCE_ERROR, 'Only Pipeline Models are supported for this Inference') # Better to use Clara Transformers (extended based on the same to take max benefits of MedicalImage) assert isinstance(data, TransformContext) transform_ctx: TransformContext = data img: MedicalImage = transform_ctx.get_image('image') shape_fmt = img.get_shape_format() logger.info('Shape Format:{}; Current Shape:{}'.format(shape_fmt, img.get_data().shape)) input_params = transform_ctx['params'] logger.info('Input Params:{}'.format(input_params)) # get model names from input parameters models = input_params.get('models') logger.info('Input Models:{}'.format(models)) if models is None or len(models) == 0: raise AIAAException(AIAAError.INFERENCE_ERROR, 'Input Params do not have any models to run') # Init all referred Inferences self._init_context(models, triton_config) # WRITE YOUR ANY CUSTOM LOGIC HERE # you can run pre-transforms or post-transforms for an existing model too results = [] count = 0 for model in models: i = self._inferences[model] model_config: ModelConfig = i[0] inference: Inference = i[1] pre_transformers = model_config.get_pre_transforms() post_transformers = model_config.get_post_transforms() # HINT:: Add or Modify anything like this.. # HINT:: You can also send list of transformers to ignore e.g. ignore=['LoadNifti'] # pre_transformers = [t for t in pre_transformers if t['name'] != 'LoadNifti'] result = InferencePipeline.run_transformer(data.copy(), pre_transformers, log_prefix='PRE-{}'.format(count), ignore=['LoadNifti']) # HINT:: You can also chain previous model input as input to next model (depending on use-case) result = InferencePipeline.run_inference(model, inference, result, model_config, triton_config) result = InferencePipeline.run_transformer(result, post_transformers, log_prefix='POST-{}'.format(count)) results.append(result) count += 1 # Do your own way to combine multiple masks here.. result_mask = None result_params = dict() for i, result in enumerate(results): model = models[i] mask = result['model'] logger.info('{}Mask:{}'.format(model, mask.shape)) # Merge Results Mask if result_mask is None: result_mask = mask else: result_mask = np.logical_or(result_mask, mask) # Merge Results Params result_params[model] = result.get('result_params') logger.info('Result Mask:{}'.format(result_mask.shape)) logger.info('Result Params:{}'.format(result_params)) # Set Result Image and Result Params # Use new_image to copy over properties transform_ctx.set_image('model', img.new_image(result_mask, shape_fmt)) transform_ctx['result_params'] = result_params return transform_ctx

An example config_aiaa.json for this pipeline is as below:

Copy
Copied!
            

{ "version": "3", "type": "pipeline", "labels": [ ], "description": "Multiple models running in pipeline to solve some xyz use case", "pre_transforms": [ { "name": "LoadNifti", "args": { "fields": "image", "as_closest_canonical": "false" } } ], "inference": { "name": "custom_inference_pipeline.CustomInferencePipeline" }, "post_transforms": [], "writer": { "name": "WriteNifti", "args": { "field": "model", "dtype": "uint8" } } }

It is the same as you would load an AIAA model but without “data” specified. Example:

Copy
Copied!
            

curl -X PUT "http://127.0.0.1:$LOCAL_PORT/admin/model/custom_pipeline" -F "config=@config_aiaa.json"

Use /inference API to run it Note that for this pipeline to run, you need to load those inferences to AIAA first. For example if we want to run both spleen and liver model we do the following:

Copy
Copied!
            

# Upload spleen model to AIAA from NGC curl -X PUT "http://127.0.0.1:$LOCAL_PORT/admin/model/clara_ct_seg_spleen_amp" \ -H "accept: application/json" \ -H "Content-Type: application/json" \ -d '{"path":"nvidia/med/clara_ct_seg_spleen_amp","version":"1"}' # Upload liver model to AIAA from NGC curl -X PUT "http://127.0.0.1:$LOCAL_PORT/admin/model/clara_ct_seg_liver_and_tumor_amp" \ -H "accept: application/json" \ -H "Content-Type: application/json" \ -d '{"path":"nvidia/med/clara_ct_seg_liver_and_tumor_amp","version":"1"}' # Run Liver and Spleen model curl -X POST "http://127.0.0.1:$LOCAL_PORT/v1/inference?model=custom_pipeline&output=image" \ -H "accept: multipart/form-data" \ -H "Content-Type: multipart/form-data" \ -F 'params={"models": ["clara_ct_seg_liver_and_tumor_amp", "clara_ct_seg_spleen_amp"]}' \ -F "image=@image.nii.gz;type=application/gzip" -o output.nii.gz

© Copyright 2020, NVIDIA. Last updated on Feb 2, 2023.