Program Listing for File multi_inference.hpp

Return to documentation for file (morpheus/_lib/include/morpheus/messages/multi_inference.hpp)

Copy
Copied!
            

/* * SPDX-FileCopyrightText: Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "morpheus/messages/memory/tensor_memory.hpp" #include "morpheus/messages/meta.hpp" #include "morpheus/messages/multi.hpp" #include "morpheus/messages/multi_tensor.hpp" #include "morpheus/objects/tensor_object.hpp" #include "morpheus/types.hpp" // for TensorIndex #include <memory> #include <string> namespace morpheus { /****** Component public implementations********************/ /****** MultiInferenceMessage*******************************/ #pragma GCC visibility push(default) class MultiInferenceMessage : public DerivedMultiMessage<MultiInferenceMessage, MultiTensorMessage> { public: MultiInferenceMessage(const MultiInferenceMessage& other) = default; MultiInferenceMessage(std::shared_ptr<MessageMeta> meta, TensorIndex mess_offset = 0, TensorIndex mess_count = -1, std::shared_ptr<TensorMemory> memory = nullptr, TensorIndex offset = 0, TensorIndex count = -1, std::string id_tensor_name = "seq_ids"); const TensorObject get_input(const std::string& name) const; TensorObject get_input(const std::string& name); void set_input(const std::string& name, const TensorObject& value); }; /****** MultiInferenceMessageInterfaceProxy****************/ struct MultiInferenceMessageInterfaceProxy : public MultiTensorMessageInterfaceProxy { static std::shared_ptr<MultiInferenceMessage> init(std::shared_ptr<MessageMeta> meta, TensorIndex mess_offset, TensorIndex mess_count, std::shared_ptr<TensorMemory> memory, TensorIndex offset, TensorIndex count, std::string id_tensor_name); }; #pragma GCC visibility pop// end of group } // namespace morpheus

© Copyright 2023, NVIDIA. Last updated on Apr 11, 2023.