Bring your own InferencePipeline

You can also cascade or chain you inference in any order by defining your own InferencePipeline. You need to put your custom code in <AIAA workspace>/lib folder.

The following interfaces is provided to get/run other inferences.

Copy
Copied!
            

class InferencePipeline: @staticmethod def get_model_config(name): """Gets model configuration given name.""" @staticmethod def init_inference(config: ModelConfig, backend: str): """Initializes an inference object""" @staticmethod def run_inference(name, inference, data, config: ModelConfig, triton_config): """Runs inference to get result""" @staticmethod def run_transformer(data, transformers_config, log_prefix='POST'): """Runs transforms given transform configs""" @abstractmethod def pipeline(self, name, data, config: ModelConfig, triton_config): """Defines a pipeline"""

This example pipeline will run all the segmentation models passed into it and union the segmentation masks.

Copy
Copied!
            

import logging from typing import List, Optional import numpy as np from aiaa.configs.modelconfig import ModelConfig, ModelType from aiaa.inference.inference import Inference from aiaa.inference.inference_pipeline import InferencePipeline from aiaa.utils.aiaa_exception import AIAAError, AIAAException class CustomInferencePipeline(InferencePipeline): def __init__(self, models: Optional[List] = None, backend='TRITON'): # HINT:: You can pass any additional init params through config_aiaa.json self._inferences = dict() self.models = models self.backend = backend def _init_context(self, models, backend): logger = logging.getLogger(__name__) logger.info('Init Context for:{}'.format(models)) # Create and cache inference object for loaded models for name in models: if self._inferences.get(name): logger.info('Inference for{}is pre-loaded; Using the existing one!') continue config = InferencePipeline.get_model_config(name) if config is None: raise AIAAException(AIAAError.INFERENCE_ERROR, f'Model{name}does not exists/loaded in AIAA.') if config.get_type() != ModelType.SEGMENTATION: raise AIAAException(AIAAError.INFERENCE_ERROR, f'Model{name}is not type{ModelType.SEGMENTATION}') self._inferences[name] = (config, InferencePipeline.init_inference(config, backend)) logger.info('PreLoaded Inferences:{}'.format(self._inferences.keys())) def pipeline(self, name, data, config: ModelConfig, triton_config): logger = logging.getLogger(__name__) logger.info('Run Inference Pipeline for:{}'.format(name)) if config.get_type() != ModelType.PIPELINE: raise AIAAException(AIAAError.INFERENCE_ERROR, f'Config need to specify model type as "{ModelType.PIPELINE}"') input_params = data.get('params') if input_params is None: raise AIAAException(AIAAError.INFERENCE_ERROR, 'Missing input params.') logger.info('Input Params:{}'.format(input_params)) # Get backend from input parameters backend = input_params.get('backend', self.backend) logger.info('Backend:{}'.format(backend)) if backend == 'TRITON' and triton_config is None: raise AIAAException(AIAAError.INFERENCE_ERROR, 'Triton config is none. Please make sure you run AIAA with Triton backend.') # Get model names from input parameters models = input_params.get('models', self.models) logger.info('Input Models:{}'.format(models)) if models is None or len(models) == 0: raise AIAAException(AIAAError.INFERENCE_ERROR, 'Input params do not have any models to run') # Init all referred Inferences self._init_context(models, backend) # WRITE ANY CUSTOM LOGIC HERE results = [] count = 0 for model in models: i = self._inferences[model] model_config: ModelConfig = i[0] inference: Inference = i[1] pre_transformers = model_config.get_pre_transforms() post_transformers = model_config.get_post_transforms() # We exclude common pre-transforms common_pre_transformers = config.get_pre_transforms() pre_transformers = [t for t in pre_transformers if t not in common_pre_transformers] # HINT:: You can run pre-transforms or post-transforms for an existing model result = InferencePipeline.run_transformer(data.copy(), pre_transformers, log_prefix='PRE-{}'.format(count)) # HINT:: You can also chain previous model input as input to next model (depending on use-case) result = InferencePipeline.run_inference(model, inference, result, model_config, triton_config) # We exclude common post-transforms common_post_transformers = config.get_post_transforms() post_transformers = [t for t in post_transformers if t not in common_post_transformers] result = InferencePipeline.run_transformer(result, post_transformers, log_prefix='POST-{}'.format(count)) # here we assume those models only have one output results.append(result[model_config.get_inference_output()]) count += 1 result_mask = None for result in results: # Union result_mask if result_mask is None: result_mask = result else: result_mask = np.logical_or(result_mask, result) logger.info('Result Mask:{}'.format(result_mask.shape)) # Store result back into data (we assume this pipeline has only one output) data[config.get_inference_output()] = result_mask.astype(np.float) return data def close(self): # Release any resources pass

Let’s save this custom inference pipeline in custom_inference_pipeline.py and copy it into <AIAA workspace>/lib folder.

An example config_aiaa.json for this pipeline is as below:

Copy
Copied!
            

{ "version": 1, "type": "pipeline", "labels": [ ], "description": "This pipeline will run multiple models on the input image and union their results", "pre_transforms": [ { "name": "monai.transforms.LoadImaged", "args": { "keys": "image" } }, { "name": "monai.transforms.AddChanneld", "args": { "keys": "image" } }, { "name": "monai.transforms.Spacingd", "args": { "keys": "image", "pixdim": [ 1.0, 1.0, 1.0 ] } } ], "inference": { "input": "image", "output": "pred", "AIAA": { "name": "custom_inference_pipeline.CustomInferencePipeline" }, "TRITON": { "name": "custom_inference_pipeline.CustomInferencePipeline" } }, "post_transforms": [ { "name": "aiaa.transforms.Restored", "args": { "keys": "pred", "ref_image": "image" } }, { "name": "aiaa.transforms.ExtremePointsd", "args": { "keys": "pred", "result": "result", "points": "points" } }, { "name": "aiaa.transforms.BoundingBoxd", "args": { "keys": "pred", "result": "result", "bbox": "bbox" } } ], "writer": { "name": "aiaa.transforms.Writer", "args": { "image": "pred", "json": "result" } } }

We will need to first load the custom pipeline into AIAA. It is the same as you would load an AIAA model but without “data” specified. Example:

Copy
Copied!
            

curl -X PUT "http://127.0.0.1:$AIAA_PORT/admin/model/custom_pipeline" -F "config=@config_aiaa.json"

Use /inference API to run the custom inference pipeline. Note that for this cascade pipeline to run, you need to load those inferences to AIAA first. For example if you want to run both spleen model and your own model you can do the following:

Copy
Copied!
            

# Load spleen model to AIAA from NGC curl -X PUT "http://127.0.0.1:$AIAA_PORT/admin/model/clara_pt_spleen_ct_segmentation" \ -H "accept: application/json" \ -H "Content-Type: application/json" \ -d '{"path":"<NGC Model Path>","version":"1"}' # Load your own model to AIAA curl -X PUT "http://127.0.0.1:$AIAA_PORT/admin/model/my_own_model" \ -F "config=@my_own_model/config_aiaa.json;type=application/json" \ -F "data=@my_own_model/model.ts" # Run both models curl -X POST "http://127.0.0.1:$AIAA_PORT/v1/inference?model=custom_pipeline&output=image" \ -H "accept: multipart/form-data" \ -H "Content-Type: multipart/form-data" \ -F 'params={"models": ["clara_pt_spleen_ct_segmentation", "my_own_model"]}' \ -F "image=@image.nii.gz;type=application/gzip" -o output.nii.gz -f

© Copyright 2020, NVIDIA. Last updated on Feb 2, 2023.