Program Listing for File multiai_inference.hpp
↰ Return to documentation for file (gxf_extensions/multiai_inference/multiai_inference.hpp
)
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NVIDIA_GXF_EXTENSIONS_MULTIAI_INFERENCE_HPP_
#define NVIDIA_GXF_EXTENSIONS_MULTIAI_INFERENCE_HPP_
#include <algorithm>
#include <iostream>
#include <iterator>
#include <map>
#include <memory>
#include <mutex>
#include <numeric>
#include <string>
#include <vector>
#include "gxf/core/entity.hpp"
#include "gxf/core/gxf.h"
#include "gxf/core/parameter.hpp"
#include "gxf/cuda/cuda_stream.hpp"
#include "gxf/cuda/cuda_stream_id.hpp"
#include "gxf/cuda/cuda_stream_pool.hpp"
#include "gxf/multimedia/video.hpp"
#include "gxf/std/allocator.hpp"
#include "gxf/std/clock.hpp"
#include "gxf/std/codelet.hpp"
#include "gxf/std/parameter_parser_std.hpp"
#include "gxf/std/receiver.hpp"
#include "gxf/std/tensor.hpp"
#include "gxf/std/timestamp.hpp"
#include "gxf/std/transmitter.hpp"
#include <holoinfer.hpp>
#include <holoinfer_utils.hpp>
namespace HoloInfer = holoscan::inference;
namespace nvidia {
namespace holoscan {
namespace multiai {
class MultiAIInference : public gxf::Codelet {
public:
gxf_result_t start() override;
gxf_result_t tick() override;
gxf_result_t stop() override;
gxf_result_t registerInterface(gxf::Registrar* registrar) override;
private:
gxf::Parameter<HoloInfer::Mappings> model_path_map_;
gxf::Parameter<HoloInfer::MultiMappings> pre_processor_map_;
gxf::Parameter<HoloInfer::Mappings> inference_map_;
gxf::Parameter<bool> is_engine_path_;
gxf::Parameter<std::vector<std::string> > in_tensor_names_;
gxf::Parameter<std::vector<std::string> > out_tensor_names_;
gxf::Parameter<gxf::Handle<gxf::Allocator> > allocator_;
gxf::Parameter<bool> infer_on_cpu_;
gxf::Parameter<bool> parallel_inference_;
gxf::Parameter<bool> enable_fp16_;
gxf::Parameter<std::string> backend_;
gxf::Parameter<HoloInfer::GXFReceivers> receivers_;
gxf::Parameter<HoloInfer::GXFTransmitters> transmitter_;
gxf::Parameter<bool> input_on_cuda_;
gxf::Parameter<bool> output_on_cuda_;
gxf::Parameter<bool> transmit_on_cuda_;
std::unique_ptr<HoloInfer::InferContext> holoscan_infer_context_;
std::shared_ptr<HoloInfer::MultiAISpecs> multiai_specs_;
gxf::PrimitiveType data_type_ = gxf::PrimitiveType::kFloat32;
std::map<std::string, std::vector<int> > dims_per_tensor_;
const std::string module_{"Multi AI Inference Codelet"};
};
} // namespace multiai
} // namespace holoscan
} // namespace nvidia
#endif// NVIDIA_GXF_EXTENSIONS_MULTIAI_INFERENCE_HPP_