Tool/software:
gst-launch-1.0 v4l2src device=/dev/video2 io-mode=2 ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! videoconvert ! video/x-raw,format=NV12 ! tee name=input_tee input_tee. ! queue ! tiovxmultiscaler name=split src_0::roi-startx=0 src_0::roi-starty=0 src_0::roi-width=640 src_0::roi-height=480 target=0 ! queue ! video/x-raw,width=320,height=320 ! tiovxdlpreproc model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 out-pool-size=4 ! application/x-tensor-tiovx ! tidlinferer target=1 model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 ! post.tensor split. ! queue ! video/x-raw,width=480,height=480 ! post.sink tidlpostproc name=post model=/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320 alpha=0.4 viz-threshold=0.6 top-N=5 display-model=true ! tee name=detection_tee detection_tee. ! queue ! videoconvert ! autovideosink sync=false detection_tee. ! queue ! videoconvert ! v4l2h264enc extra-controls="encode,h264_i_frame_period=10,speed_preset=ultrafast" ! h264parse ! splitmuxsink location=video%02d.mkv max-size-time=10000000000 max-files=3 muxer=matroskamux post.text ! queue ! filesink location=output.yaml sync=true
This working pipeline for object detection that performs the following tasks:
-
Captures video from a camera (
v4l2src). -
Processes the video for object detection using
tiovxmultiscaler,tidlinferer, andtidlpostproc. -
Displays the video output on a screen (
autovideosink). -
Saves the processed video using
splitmuxsink. -
Stores detection data in a YAML file.
Iam trying to port this pipeline into a structured format which use element factory to create elements and gst_link methods to link i will give the code i have written below, Here I am struggling to set the tiovxmultiscaler properties correctly and also how can i implement postproc.tensor and postproc.sink , i tried adding pointers and also adding into properties of postproc (the code i have given is like that) but it doesn't have a property name tensor.
If you have a sample C code for a similar object detection pipeline, that would be very helpful. I can then adapt it to fit my requirements.
#include <gst/gst.h>
#include <glib.h>
int main(int argc, char *argv[]) {
GstElement *pipeline, *source, *jpeg_caps, *jpegdec, *videoconvert, *nv12_caps;
GstElement *scaler_split, *detect_queue ,*sink_queue, *detect_caps, *sink_caps ,*preproc, *tensor_caps, *inference, *postproc, *fakesink;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// Create elements
pipeline = gst_pipeline_new("video-pipeline");
source = gst_element_factory_make("v4l2src", "source");
jpeg_caps = gst_element_factory_make("capsfilter", "jpeg_caps");
jpegdec = gst_element_factory_make("jpegdec", "jpegdec");
videoconvert = gst_element_factory_make("videoconvert", "videoconvert");
nv12_caps = gst_element_factory_make("capsfilter", "nv12_caps");
scaler_split = gst_element_factory_make("tiovxmultiscaler", "scaler_split");
detect_queue = gst_element_factory_make("queue","detect_queue");
sink_queue = gst_element_factory_make("queue", "sink_queue");
detect_caps = gst_element_factory_make("capsfilter","detect_caps");
sink_caps = gst_element_factory_make("capsfilter","sink_caps");
preproc = gst_element_factory_make("tiovxdlpreproc","preproc");
tensor_caps = gst_element_factory_make("capsfilter","tensor_caps");
inference = gst_element_factory_make("tidlinferer","inference");
postproc = gst_element_factory_make("tidlpostproc","postproc");
fakesink = gst_element_factory_make("fakesink","fakesink");
if (!pipeline || !source || !jpeg_caps || !jpegdec || !videoconvert || !nv12_caps || !tee ||
!input_queue || !scaler_split || !detect_queue || !detect_caps || !preproc || !tensor_caps || !inference
|| !scaler_split || !sink_queue || !sink_caps || !postproc || !fakesink) {
g_printerr("Not all elements could be created.\n");
return -1;
}
// Configure elements
g_object_set(G_OBJECT(source),
"device", "/dev/video18",
"io-mode", 2,
NULL);
// JPEG caps
GstCaps *jpeg_caps_val = gst_caps_new_simple("image/jpeg",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
"framerate", GST_TYPE_FRACTION, 30, 1,
NULL);
g_object_set(G_OBJECT(jpeg_caps), "caps", jpeg_caps_val, NULL);
gst_caps_unref(jpeg_caps_val);
// NV12 caps
GstCaps *nv12_caps_val = gst_caps_new_simple("video/x-raw",
"format", G_TYPE_STRING, "NV12",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
NULL);
g_object_set(G_OBJECT(nv12_caps), "caps", nv12_caps_val, NULL);
gst_caps_unref(nv12_caps_val);
//detect_caps
GstCaps *detect_caps_val = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 320,
"height", G_TYPE_INT, 320,
NULL);
g_object_set(G_OBJECT(detect_caps),"caps", detect_caps_val, NULL);
gst_caps_unref(detect_caps_val);
//preproc
g_object_set(G_OBJECT(preproc),"model","/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320",
"out-pool-size",4,
NULL);
//tensor_caps
GstCaps *tensor_caps_val = gst_caps_new_simple("application/x-tensor-tiovx",
NULL);
g_object_set(G_OBJECT(tensor_caps),"caps",tensor_caps_val,NULL);
gst_caps_unref(tensor_caps_val);
//inference
g_object_set(G_OBJECT(inference),"target", 1,
"model","/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320",
NULL);
//sink caps
GstCaps *sink_caps_val = gst_caps_new_simple("video/x-raw",
"width",G_TYPE_INT, 480,
"height",G_TYPE_INT,480,
NULL);
g_object_set(G_OBJECT(sink_caps),"caps",sink_caps_val,NULL);
gst_caps_unref(sink_caps_val);
//postproc
g_object_set(G_OBJECT(postproc),"model","/opt/model_zoo/TFL-OD-2020-ssdLite-mobDet-DSP-coco-320x320",
"alpha",0.4,
"viz-threshold", 0.6,
"top-N",5,
"display-model",TRUE,
"tensor", inference,
"sink", sink_caps,
NULL);
// Add elements to pipeline
gst_bin_add_many(GST_BIN(pipeline),
source, jpeg_caps, jpegdec, videoconvert, nv12_caps,
scaler_split,detect_queue, detect_caps, preproc, tensor_caps, inference,
scaler_split,sink_queue, sink_caps, postproc,fakesink,
NULL);
// Link elements
if (!gst_element_link_many(source, jpeg_caps, jpegdec, videoconvert, nv12_caps,NULL)) {
g_printerr("Source nv12_caps link failed.\n");
return -1;
}
if (!gst_element_link_many(scaler_split, detect_queue, detect_caps,preproc,tensor_caps,inference,NULL)){
g_printerr("scaler postproc.tensor link failed!");
return -1;
}
if(!gst_element_link_many(scaler_split, sink_queue,sink_caps, postproc,fakesink,NULL)){
g_printerr("scaler postproc.sink link failed!");
return -1;
}
// Start pipeline
ret = gst_element_set_state(pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr("Unable to set the pipeline to the playing state.\n");
gst_object_unref(pipeline);
return -1;
}
// Bus message handling
bus = gst_element_get_bus(pipeline);
do {
msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_ERROR | GST_MESSAGE_EOS | GST_MESSAGE_STATE_CHANGED);
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error(msg, &err, &debug_info);
g_printerr("Error received from element %s: %s\n",
GST_OBJECT_NAME(msg->src), err->message);
g_printerr("Debugging information: %s\n",
debug_info ? debug_info : "none");
g_clear_error(&err);
g_free(debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print("End of stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
if (GST_MESSAGE_SRC(msg) == GST_OBJECT(pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed(msg, &old_state, &new_state, &pending_state);
g_print("Pipeline state changed from %s to %s\n",
gst_element_state_get_name(old_state),
gst_element_state_get_name(new_state));
}
break;
default:
g_printerr("Unexpected message received.\n");
break;
}
gst_message_unref(msg);
}
} while (!terminate);
// Cleanup
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}