NVIDIA DeepStream SDK API Reference
6.4 Release
gstnvinfer_property_parser.h
Go to the documentation of this file.
1
12
#ifndef __GST_NVINFER_PROPERTY_PARSER_H__
13
#define __GST_NVINFER_PROPERTY_PARSER_H__
14
15
#include <glib.h>
16
17
#include "
nvdsinfer_context.h
"
18
#include "
gstnvinfer.h
"
19
20
#define DEFAULT_PRE_CLUSTER_THRESHOLD 0.2
21
#define DEFAULT_POST_CLUSTER_THRESHOLD 0.0
22
#define DEFAULT_EPS 0.0
23
#define DEFAULT_GROUP_THRESHOLD 0
24
#define DEFAULT_MIN_BOXES 0
25
#define DEFAULT_DBSCAN_MIN_SCORE 0
26
#define DEFAULT_NMS_IOU_THRESHOLD 0.3
27
#define DEFAULT_TOP_K -1
28
29
#define CONFIG_GROUP_PROPERTY "property"
30
31
#define CONFIG_GROUP_INFER_PARSE_FUNC "parse-func"
32
34
#define CONFIG_GROUP_INFER_UNIQUE_ID "gie-unique-id"
35
#define CONFIG_GROUP_INFER_PROCESS_MODE "process-mode"
36
#define CONFIG_GROUP_INFER_INTERVAL "interval"
37
#define CONFIG_GROUP_INFER_LABEL "labelfile-path"
38
#define CONFIG_GROUP_INFER_GPU_ID "gpu-id"
39
#define CONFIG_GROUP_INFER_SECONDARY_REINFER_INTERVAL "secondary-reinfer-interval"
40
#define CONFIG_GROUP_INFER_OUTPUT_TENSOR_META "output-tensor-meta"
41
//gpu_post_processing 0 is not 1 is yes
42
#define CONFIG_GROUP_INFER_DISABLE_OUTPUT_HOST_COPY "disable-output-host-copy"
43
44
#define CONFIG_GROUP_INFER_AUTO_INCREASE_MEMORY "auto-inc-mem"
45
#define CONFIG_GROUP_INFER_MAX_GPU_MEMORY_PERCENTAGE "max-gpu-mem-per"
46
47
#define CONFIG_GROUP_INFER_ENABLE_DLA "enable-dla"
48
#define CONFIG_GROUP_INFER_USE_DLA_CORE "use-dla-core"
49
50
#define CONFIG_GROUP_INFER_DUMP_INPUT_TENSOR "dump-input-tensor"
51
#define CONFIG_GROUP_INFER_DUMP_OUTPUT_TENSOR "dump-output-tensor"
52
#define CONFIG_GROUP_INFER_OVERWRITE_INPUT_TENSOR "overwrite-input-tensor"
53
#define CONFIG_GROUP_INFER_OVERWRITE_OUTPUT_TENSOR "overwrite-output-tensor"
54
#define CONFIG_GROUP_INFER_INPUT_TENSOR_FILE "ip-tensor-file"
55
#define CONFIG_GROUP_INFER_OUTPUT_TENSOR_FILES "op-tensor-files"
56
58
#define CONFIG_GROUP_INFER_BATCH_SIZE "batch-size"
59
#define CONFIG_GROUP_INFER_TENSOR_META_POOL_SIZE "tensor-meta-pool-size"
60
#define CONFIG_GROUP_INFER_NETWORK_MODE "network-mode"
61
#define CONFIG_GROUP_INFER_MODEL_ENGINE "model-engine-file"
62
#define CONFIG_GROUP_INFER_INT8_CALIBRATION_FILE "int8-calib-file"
63
#define CONFIG_GROUP_INFER_WORKSPACE_SIZE "workspace-size"
64
66
#define CONFIG_GROUP_INFER_OUTPUT_BLOB_NAMES "output-blob-names"
67
#define CONFIG_GROUP_INFER_IS_CLASSIFIER_LEGACY "is-classifier"
68
#define CONFIG_GROUP_INFER_NETWORK_TYPE "network-type"
69
#define CONFIG_GROUP_INFER_FORCE_IMPLICIT_BATCH_DIM "force-implicit-batch-dim"
70
#define CONFIG_GROUP_INFER_INFER_DIMENSIONS "infer-dims"
71
#define CONFIG_GROUP_INFER_OUTPUT_IO_FORMATS "output-io-formats"
72
#define CONFIG_GROUP_INFER_LAYER_DEVICE_PRECISION "layer-device-precision"
73
75
#define CONFIG_GROUP_INFER_MODEL_COLOR_FORMAT "model-color-format"
76
#define CONFIG_GROUP_INFER_SCALE_FACTOR "net-scale-factor"
77
#define CONFIG_GROUP_INFER_OFFSETS "offsets"
78
#define CONFIG_GROUP_INFER_MEANFILE "mean-file"
79
#define CONFIG_GROUP_INFER_MAINTAIN_ASPECT_RATIO "maintain-aspect-ratio"
80
#define CONFIG_GROUP_INFER_SYMMETRIC_PADDING "symmetric-padding"
81
#define CONFIG_GROUP_INFER_SCALING_FILTER "scaling-filter"
82
#define CONFIG_GROUP_INFER_SCALING_COMPUTE_HW "scaling-compute-hw"
83
#define CONFIG_GROUP_INFER_NET_INPUT_ORDER "network-input-order"
84
#define CONFIG_GROUP_INFER_INPUT_FROM_META "input-tensor-from-meta"
85
87
#define CONFIG_GROUP_INFER_CUSTOM_LIB_PATH "custom-lib-path"
88
#define CONFIG_GROUP_INFER_CUSTOM_PARSE_BBOX_FUNC "parse-bbox-func-name"
89
#define CONFIG_GROUP_INFER_CUSTOM_PARSE_BBOX_IM_FUNC "parse-bbox-instance-mask-func-name"
90
#define CONFIG_GROUP_INFER_CUSTOM_ENGINE_CREATE_FUNC "engine-create-func-name"
91
#define CONFIG_GROUP_INFER_CUSTOM_PARSE_CLASSIFIER_FUNC "parse-classifier-func-name"
92
#define CONFIG_GROUP_INFER_CUSTOM_NETWORK_CONFIG "custom-network-config"
93
95
#define CONFIG_GROUP_INFER_MODEL "model-file"
96
#define CONFIG_GROUP_INFER_PROTO "proto-file"
97
99
#define CONFIG_GROUP_INFER_UFF "uff-file"
100
#define CONFIG_GROUP_INFER_UFF_INPUT_ORDER "uff-input-order"
101
#define CONFIG_GROUP_INFER_UFF_INPUT_DIMENSIONS_LEGACY "input-dims"
102
#define CONFIG_GROUP_INFER_UFF_INPUT_DIMENSIONS_LEGACY_V2 "uff-input-dims"
103
#define CONFIG_GROUP_INFER_UFF_INPUT_BLOB_NAME "uff-input-blob-name"
104
106
#define CONFIG_GROUP_INFER_TLT_ENCODED_MODEL "tlt-encoded-model"
107
#define CONFIG_GROUP_INFER_TLT_MODEL_KEY "tlt-model-key"
108
110
#define CONFIG_GROUP_INFER_ONNX "onnx-file"
111
113
#define CONFIG_GROUP_INFER_NUM_DETECTED_CLASSES "num-detected-classes"
114
#define CONFIG_GROUP_INFER_ENABLE_DBSCAN "enable-dbscan"
115
#define CONFIG_GROUP_INFER_CLUSTER_MODE "cluster-mode"
116
#define CONFIG_GROUP_INFER_CROP_OBJECTS_TO_ROI_BOUNDARY "crop-objects-to-roi-boundary"
117
119
#define CONFIG_GROUP_INFER_CLASSIFIER_TYPE "classifier-type"
120
#define CONFIG_GROUP_INFER_CLASSIFIER_THRESHOLD "classifier-threshold"
121
#define CONFIG_GROUP_INFER_CLASSIFIER_ASYNC_MODE "classifier-async-mode"
122
124
#define CONFIG_GROUP_INFER_SEGMENTATION_THRESHOLD "segmentation-threshold"
125
#define CONFIG_GROUP_INFER_SEGMENTATION_OUTPUT_ORDER "segmentation-output-order"
126
128
#define CONFIG_GROUP_INFER_OUTPUT_INSTANCE_MASK "output-instance-mask"
129
132
#define CONFIG_GROUP_INFER_INPUT_OBJECT_MIN_WIDTH "input-object-min-width"
133
#define CONFIG_GROUP_INFER_INPUT_OBJECT_MIN_HEIGHT "input-object-min-height"
134
#define CONFIG_GROUP_INFER_INPUT_OBJECT_MAX_WIDTH "input-object-max-width"
135
#define CONFIG_GROUP_INFER_INPUT_OBJECT_MAX_HEIGHT "input-object-max-height"
136
139
#define CONFIG_GROUP_INFER_GIE_ID_FOR_OPERATION "operate-on-gie-id"
140
#define CONFIG_GROUP_INFER_CLASS_IDS_FOR_OPERATION "operate-on-class-ids"
141
#define CONFIG_GROUP_INFER_CLASS_IDS_FOR_FILTERING "filter-out-class-ids"
142
144
#define CONFIG_GROUP_INFER_CLASS_ATTRS_PREFIX "class-attrs-"
145
#define CONFIG_GROUP_INFER_CLASS_ATTRS_THRESHOLD "threshold"
146
#define CONFIG_GROUP_INFER_CLASS_ATTRS_PRE_CLUSTER_THRESHOLD "pre-cluster-threshold"
147
#define CONFIG_GROUP_INFER_CLASS_ATTRS_POST_CLUSTER_THRESHOLD "post-cluster-threshold"
148
#define CONFIG_GROUP_INFER_CLASS_ATTRS_EPS "eps"
149
#define CONFIG_GROUP_INFER_CLASS_ATTRS_GROUP_THRESHOLD "group-threshold"
150
#define CONFIG_GROUP_INFER_CLASS_ATTRS_MIN_BOXES "minBoxes"
151
#define CONFIG_GROUP_INFER_CLASS_ATTRS_DBSCAN_MIN_SCORE "dbscan-min-score"
152
#define CONFIG_GROUP_INFER_CLASS_ATTRS_ROI_TOP_OFFSET "roi-top-offset"
153
#define CONFIG_GROUP_INFER_CLASS_ATTRS_ROI_BOTTOM_OFFSET "roi-bottom-offset"
154
#define CONFIG_GROUP_INFER_CLASS_ATTRS_DETECTED_MIN_WIDTH "detected-min-w"
155
#define CONFIG_GROUP_INFER_CLASS_ATTRS_DETECTED_MIN_HEIGHT "detected-min-h"
156
#define CONFIG_GROUP_INFER_CLASS_ATTRS_DETECTED_MAX_WIDTH "detected-max-w"
157
#define CONFIG_GROUP_INFER_CLASS_ATTRS_DETECTED_MAX_HEIGHT "detected-max-h"
158
#define CONFIG_GROUP_INFER_CLASS_ATTRS_BORDER_COLOR "border-color"
159
#define CONFIG_GROUP_INFER_CLASS_ATTRS_BG_COLOR "bg-color"
160
#define CONFIG_GROUP_INFER_CLASS_ATTRS_NMS_IOU_THRESHOLD "nms-iou-threshold"
161
#define CONFIG_GROUP_INFER_CLASS_ATTRS_TOP_K "topk"
162
163
gboolean
gst_nvinfer_parse_config_file
(
GstNvInfer
*nvinfer,
164
NvDsInferContextInitParams
*init_params,
const
gchar * cfg_file_path);
165
166
gboolean
gst_nvinfer_parse_context_params
(
NvDsInferContextInitParams
*params,
167
const
gchar * cfg_file_path);
168
169
170
#endif
/*__GST_NVINFER_PROPERTY_PARSER_H__*/
GstNvInfer
typedefG_BEGIN_DECLS struct _GstNvInfer GstNvInfer
Definition:
gstnvinfer.h:44
gstnvinfer.h
nvdsinfer_context.h
Copyright (c) 2018-2020, NVIDIA CORPORATION.
_NvDsInferContextInitParams
Holds the initialization parameters required for the NvDsInferContext interface.
Definition:
nvdsinfer_context.h:233
gst_nvinfer_parse_config_file
gboolean gst_nvinfer_parse_config_file(GstNvInfer *nvinfer, NvDsInferContextInitParams *init_params, const gchar *cfg_file_path)
gst_nvinfer_parse_context_params
gboolean gst_nvinfer_parse_context_params(NvDsInferContextInitParams *params, const gchar *cfg_file_path)
Advance Information | Subject to Change | Generated by NVIDIA | Mon Dec 11 2023 17:51:24 | PR-09318-R32