OpDLInference.h#
Fully qualified name: public/src/operator/include/OpDLInference.h
File members: public/src/operator/include/OpDLInference.h
/*
* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
*
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
* property and proprietary rights in and to this material, related
* documentation and any modifications thereto. Any use, reproduction,
* disclosure or distribution of this material and related documentation
* without an express license agreement from NVIDIA CORPORATION or
* its affiliates is strictly prohibited.
*/
#ifndef PVA_SOLUTIONS_OPDLINFERENCE_H
#define PVA_SOLUTIONS_OPDLINFERENCE_H
#include <PvaOperator.h>
#include <PvaOperatorTypes.h>
#include <cupva_host_scheduling.h>
#include <nvcv/Status.h>
#include <nvcv/Tensor.h>
#ifdef __cplusplus
extern "C" {
#endif
#define PVA_DL_INFERENCE_MAX_NUM_INPUTS 8
#define PVA_DL_INFERENCE_MAX_NUM_OUTPUTS 8
typedef struct PvaDLInferenceCreateParamRec
{
NVCVTensorRequirements *inReqs[PVA_DL_INFERENCE_MAX_NUM_INPUTS];
NVCVTensorRequirements *outReqs[PVA_DL_INFERENCE_MAX_NUM_OUTPUTS];
int32_t numInReqs;
int32_t numOutReqs;
char *networkName;
} PvaDLInferenceCreateParams;
typedef struct PvaDLInferenceSubmitParamRec
{
NVCVTensorHandle inTensors[PVA_DL_INFERENCE_MAX_NUM_INPUTS];
NVCVTensorHandle outTensors[PVA_DL_INFERENCE_MAX_NUM_OUTPUTS];
int32_t numInTensors;
int32_t numOutTensors;
} PvaDLInferenceSubmitParams;
NVCVStatus pvaDLInferenceCreate(NVCVOperatorHandle *handle, PvaDLInferenceCreateParams *params);
NVCVStatus pvaDLInferenceSubmit(NVCVOperatorHandle handle, cupvaStream_t stream, PvaDLInferenceSubmitParams *params);
#ifdef __cplusplus
}
#endif
#endif /* PVA_SOLUTIONS_OPDLINFERENCE_H */