Compare commits

..

1 Commits

Author SHA1 Message Date
svcmobrel-release
5c1f0868fd Updating prebuilts and/or headers
ed8273ff6102bb0b4fa7975a401b12b3e95a7187 - nvbufsurface.h
7af73b80b2f930ab91431d66cd84ec794da9e117 - v4l2_nv_extensions.h
d27a433ddeaefb9f42d0312c23472514b0cd6a45 - gst-nvcustomevent.h
21a860247c06670e4619b8eaae1d92db31bdd3e8 - gst-v4l2/gstv4l2.c
e8e973c103725b65232d32817e0305d12d6ff309 - gst-v4l2/gstv4l2h264enc.c
49a66f0ce02abc71f33e096a65645ddedf5c7f46 - gst-v4l2/gstv4l2bufferpool.c
9f726e4439379bb399f29c68736242f21dab3dd0 - gst-v4l2/gstv4l2allocator.c
65de802e5f162aa04518b7ade5841cc3ced01111 - gst-v4l2/Makefile
02d142337f4b96fcb0c9f2405a3cbe90c5917cca - gst-v4l2/gstv4l2vp9enc.c
dc1a3f7292873f1f71dc27300f97f3ab918ed79f - gst-v4l2/gstv4l2h265enc.c
d29e3a719400c3cb27314366d48ec792a3c12363 - gst-v4l2/gstv4l2h265enc.h
c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
b1cd923335aa60985ff9866fba91a2068e8671c7 - gst-v4l2/LICENSE.gst-nvvideo4linux2
73b03969d7ae0a8adb374c93999c43af88ea93b2 - gst-v4l2/v4l2_calls.c
d89a680415f6ff5acec2571cde0fce9054d8e81f - gst-v4l2/gstv4l2vp9enc.h
b52a5ee4c739818736b9a3683442df285ebe9eda - gst-v4l2/gstv4l2videodec.c
3f7cafe5beb4395caf2e1591bf0a835e5076031a - gst-v4l2/gstv4l2object.h
d5952b0286c34bf13fbf5e09fe552ced0da49368 - gst-v4l2/gstv4l2videodec.h
398c24d1eef98ec9003a06587bc3784050602cd2 - gst-v4l2/gstv4l2h26xparser.c
39fcb2f599e6906ab0fd7ab9a46fef3ea58a8cab - gst-v4l2/gstv4l2vp8enc.h
cbc84dccd2506afa4c8f03849c95bb28c83ef4a3 - gst-v4l2/gstv4l2av1enc.h
a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
c2099692cdb374440c2a040cb6ad01bbc1549ce5 - gst-v4l2/gstv4l2h26xparser.h
99d65d620807b5ba1ca29a838e032940c9b019cc - gst-v4l2/sei_parse.c
b827fd6cb1e3b8ecebd6a07f8556e846e26cba17 - gst-v4l2/gstv4l2allocator.h
489fde70531590e94d1d211a42f10f81ae68d2b9 - gst-v4l2/gstv4l2videoenc.h
4e79cf75c4fa29791e1f5141318dc8aec13a7835 - gst-v4l2/nalutils.h
71be284b547ee68fb0e2cd14b0aeb14734a915a1 - gst-v4l2/gstv4l2bufferpool.h
5ecd059e5ef9be4014eface37e5e2f7598960f4e - gst-v4l2/nalutils.c
5948d70c07e87f9b1dc403789dcbed6acfa47ad9 - gst-v4l2/gstv4l2av1enc.c
bb104683f5e4f7402e3f765a891e149edc794e02 - gst-v4l2/gstv4l2h264enc.h
9681f7b98dfdfbc4d845f9ce7f11c3692b923195 - gst-v4l2/gstv4l2videoenc.c
807bc9859585a540b0f85e98f147756aab24e1bd - gst-v4l2/gstv4l2vp8enc.c
884e5b97b9fa8d07b6153e6efe6999884922b813 - gst-v4l2/gstv4l2object.c
20c4f7c0cb89c83256650bc3353ed82154cf3a9d - gst-v4l2/gst/gst-i18n-plugin.h
e864ee6647f3572b144403d799f68152e9900da1 - gst-v4l2/gst/gettext.h
499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
72a34a694337f8f6da3bb94c9faced6730cbd2fc - gst-v4l2/ext/types-compat.h
1636366b5a062e4bc1791b7bc3012ccf5635b363 - gst-v4l2/ext/v4l2-controls.h
a745675b051a2b8434a430c80fde3f245864ca89 - gst-v4l2/ext/v4l2-common.h
522ab8fc8531a2c758b9278d29642f5b763fd3e7 - gst-v4l2/ext/videodev2.h

Change-Id: I3770af2d1c63a6193ccfb47a0ec190f5d241a331
2024-09-06 00:00:25 -07:00
22 changed files with 3720 additions and 1196 deletions

View File

@@ -1,39 +1,43 @@
Updating prebuilts and/or headers
77c130ed3990efc3a31750d9380ded85e2b660d1 - v4l2_nv_extensions.h
c2b683c77f90fbca8b8c2e0b59efee45db468369 - nvbufsurface.h
83e21353d1fe20cba4bd630c3b41c1615b8268ed - nvbuf_utils.h
80b3faf4a2e03de49089b320f0cf005d9a0a54ad - gst-v4l2/gstv4l2bufferpool.c
5948d70c07e87f9b1dc403789dcbed6acfa47ad9 - gst-v4l2/gstv4l2av1enc.c
a6f39a3f80f770833a35db7bf41e2ae5de9b6ace - gst-v4l2/sei_parse.c
d89a680415f6ff5acec2571cde0fce9054d8e81f - gst-v4l2/gstv4l2vp9enc.h
4311b3fbc6e5675353491a6fab52577ed36f499d - gst-v4l2/gstv4l2.c
b724de78f364b0855abfbbaf6fda9ae51ecbfd00 - gst-v4l2/gstv4l2videoenc.c
39fcb2f599e6906ab0fd7ab9a46fef3ea58a8cab - gst-v4l2/gstv4l2vp8enc.h
71be284b547ee68fb0e2cd14b0aeb14734a915a1 - gst-v4l2/gstv4l2bufferpool.h
c773f1e03097c888a3fda59ace02ea622e101d13 - gst-v4l2/Makefile
0eabbf0521068ee26f5c620698aac456a2b1d265 - gst-v4l2/gstv4l2object.c
ed8273ff6102bb0b4fa7975a401b12b3e95a7187 - nvbufsurface.h
7af73b80b2f930ab91431d66cd84ec794da9e117 - v4l2_nv_extensions.h
d27a433ddeaefb9f42d0312c23472514b0cd6a45 - gst-nvcustomevent.h
21a860247c06670e4619b8eaae1d92db31bdd3e8 - gst-v4l2/gstv4l2.c
e8e973c103725b65232d32817e0305d12d6ff309 - gst-v4l2/gstv4l2h264enc.c
b827fd6cb1e3b8ecebd6a07f8556e846e26cba17 - gst-v4l2/gstv4l2allocator.h
49a66f0ce02abc71f33e096a65645ddedf5c7f46 - gst-v4l2/gstv4l2bufferpool.c
9f726e4439379bb399f29c68736242f21dab3dd0 - gst-v4l2/gstv4l2allocator.c
65de802e5f162aa04518b7ade5841cc3ced01111 - gst-v4l2/Makefile
02d142337f4b96fcb0c9f2405a3cbe90c5917cca - gst-v4l2/gstv4l2vp9enc.c
85ff961e6bdfb02907033709ee001bc250af8e03 - gst-v4l2/gstv4l2object.h
fbdc964b443c64094f5b3f6e2bcd29697bc27694 - gst-v4l2/gstv4l2videodec.h
c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
cbc84dccd2506afa4c8f03849c95bb28c83ef4a3 - gst-v4l2/gstv4l2av1enc.h
dc1a3f7292873f1f71dc27300f97f3ab918ed79f - gst-v4l2/gstv4l2h265enc.c
d29e3a719400c3cb27314366d48ec792a3c12363 - gst-v4l2/gstv4l2h265enc.h
bb104683f5e4f7402e3f765a891e149edc794e02 - gst-v4l2/gstv4l2h264enc.h
c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
b1cd923335aa60985ff9866fba91a2068e8671c7 - gst-v4l2/LICENSE.gst-nvvideo4linux2
c08d733da85d44332a0b7b6a9183308d307d160c - gst-v4l2/gstv4l2videodec.c
a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
605f3b6fd4cc1f0e790f5ab50c9e2d87dfea9523 - gst-v4l2/gstv4l2videoenc.h
73b03969d7ae0a8adb374c93999c43af88ea93b2 - gst-v4l2/v4l2_calls.c
d89a680415f6ff5acec2571cde0fce9054d8e81f - gst-v4l2/gstv4l2vp9enc.h
b52a5ee4c739818736b9a3683442df285ebe9eda - gst-v4l2/gstv4l2videodec.c
3f7cafe5beb4395caf2e1591bf0a835e5076031a - gst-v4l2/gstv4l2object.h
d5952b0286c34bf13fbf5e09fe552ced0da49368 - gst-v4l2/gstv4l2videodec.h
398c24d1eef98ec9003a06587bc3784050602cd2 - gst-v4l2/gstv4l2h26xparser.c
39fcb2f599e6906ab0fd7ab9a46fef3ea58a8cab - gst-v4l2/gstv4l2vp8enc.h
cbc84dccd2506afa4c8f03849c95bb28c83ef4a3 - gst-v4l2/gstv4l2av1enc.h
a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
c2099692cdb374440c2a040cb6ad01bbc1549ce5 - gst-v4l2/gstv4l2h26xparser.h
99d65d620807b5ba1ca29a838e032940c9b019cc - gst-v4l2/sei_parse.c
b827fd6cb1e3b8ecebd6a07f8556e846e26cba17 - gst-v4l2/gstv4l2allocator.h
489fde70531590e94d1d211a42f10f81ae68d2b9 - gst-v4l2/gstv4l2videoenc.h
4e79cf75c4fa29791e1f5141318dc8aec13a7835 - gst-v4l2/nalutils.h
71be284b547ee68fb0e2cd14b0aeb14734a915a1 - gst-v4l2/gstv4l2bufferpool.h
5ecd059e5ef9be4014eface37e5e2f7598960f4e - gst-v4l2/nalutils.c
5948d70c07e87f9b1dc403789dcbed6acfa47ad9 - gst-v4l2/gstv4l2av1enc.c
bb104683f5e4f7402e3f765a891e149edc794e02 - gst-v4l2/gstv4l2h264enc.h
9681f7b98dfdfbc4d845f9ce7f11c3692b923195 - gst-v4l2/gstv4l2videoenc.c
807bc9859585a540b0f85e98f147756aab24e1bd - gst-v4l2/gstv4l2vp8enc.c
ed77613908dddf791481ea198dfd75f988684226 - gst-v4l2/gstv4l2allocator.c
4a047575250eb3ccb6db1947ed36e9562fe000af - gst-v4l2/gstv4l2h265enc.c
499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
884e5b97b9fa8d07b6153e6efe6999884922b813 - gst-v4l2/gstv4l2object.c
20c4f7c0cb89c83256650bc3353ed82154cf3a9d - gst-v4l2/gst/gst-i18n-plugin.h
e864ee6647f3572b144403d799f68152e9900da1 - gst-v4l2/gst/gettext.h
522ab8fc8531a2c758b9278d29642f5b763fd3e7 - gst-v4l2/ext/videodev2.h
a745675b051a2b8434a430c80fde3f245864ca89 - gst-v4l2/ext/v4l2-common.h
1636366b5a062e4bc1791b7bc3012ccf5635b363 - gst-v4l2/ext/v4l2-controls.h
499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
72a34a694337f8f6da3bb94c9faced6730cbd2fc - gst-v4l2/ext/types-compat.h
1636366b5a062e4bc1791b7bc3012ccf5635b363 - gst-v4l2/ext/v4l2-controls.h
a745675b051a2b8434a430c80fde3f245864ca89 - gst-v4l2/ext/v4l2-common.h
522ab8fc8531a2c758b9278d29642f5b763fd3e7 - gst-v4l2/ext/videodev2.h

235
gst-nvcustomevent.h Normal file
View File

@@ -0,0 +1,235 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* @file
* <b>NVIDIA GStreamer: Custom Events</b>
*
* @b Description: This file specifies the NVIDIA GStreamer custom
* event functions.
*
*/
/**
* @defgroup gstreamer_nvevent Events: Custom Events API
*
* Specifies GStreamer custom event functions.
*
* @ingroup gst_mess_evnt_qry
* @{
*/
#ifndef __GST_NVCUSTOMEVENT_H__
#define __GST_NVCUSTOMEVENT_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
#define FLAG(name) GST_EVENT_TYPE_##name
/** Defines supported types of custom events. */
typedef enum {
/** Specifies a custom event to indicate decoder drop frame interval update
of a particular stream. */
GST_NVEVENT_DEC_DROP_FRAME_INTERVAL_UPDATE
= GST_EVENT_MAKE_TYPE (500, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate decoder skip frame update
of a particular stream. */
GST_NVEVENT_DEC_SKIP_FRAME_UPDATE
= GST_EVENT_MAKE_TYPE (501, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to enable decoder low-latency-mode
of a particular stream. */
GST_NVEVENT_DEC_ENABLE_LOW_LATENCY_MODE
= GST_EVENT_MAKE_TYPE (502, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate encoder bitrate update
of a particular stream. */
GST_NVEVENT_ENC_BITRATE_UPDATE
= GST_EVENT_MAKE_TYPE (503, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate encoder force IDR frame
of a particular stream. */
GST_NVEVENT_ENC_FORCE_IDR
= GST_EVENT_MAKE_TYPE (504, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate encoder force Intra frame
of a particular stream. */
GST_NVEVENT_ENC_FORCE_INTRA
= GST_EVENT_MAKE_TYPE (505, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate iframe interval update
of a particular stream. */
GST_NVEVENT_ENC_IFRAME_INTERVAL_UPDATE
= GST_EVENT_MAKE_TYPE (506, FLAG(DOWNSTREAM) | FLAG(SERIALIZED))
} GstNvCustomEventType;
#undef FLAG
/**
* Creates a new "nv-dec-drop-frame-interval-update" event.
*
* @param[out] stream_id Stream ID of the stream for which decoder-drop-frame-interval is to be sent
* @param[out] interval The decoder drop-frame interval obtained corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_dec_drop_frame_interval_update (gchar* stream_id, guint interval);
/**
* Parses a "nv-dec-drop-frame-interval-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a dec-drop-frame-interval-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] interval A pointer to the parsed interval
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_dec_drop_frame_interval_update (GstEvent * event, gchar** stream_id, guint *interval);
/**
* Creates a new "nv-dec-skip-frame-update" event.
*
* @param[out] stream_id Stream ID of the stream for which decoder-skip-frame-update is to be sent
* @param[out] frame_type The decoder frame-type to be skipped obtained corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_dec_skip_frame_update (gchar* stream_id, guint frame_type);
/**
* Parses a "nv-dec-skip-frame-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a skip-frame-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] frame_type A pointer to the parsed frame_type
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_dec_skip_frame_update (GstEvent * event, gchar** stream_id, guint *frame_type);
/**
* Creates a new "nv-dec-enable-low-latency-mode" event.
*
* @param[out] stream_id Stream ID of the stream for which decoder-low-latenct-mode is to be sent
* @param[out] enable The decoder low latency mode to be enabled corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_dec_enable_low_latency_mode (gchar* stream_id, gint enable);
/**
* Parses a "nv-dec-enable-low-latency-mode" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a enable-low-latency-mode event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] enable A pointer to the parsed enable flag
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_dec_enable_low_latency_mode (GstEvent * event, gchar** stream_id, gint *enable);
/**
* Creates a new "nv-enc-bitrate-update" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-bitrate-update is to be sent
* @param[out] bitrate The encoder bitrate to be set corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_bitrate_update (gchar* stream_id, guint bitrate);
/**
* Parses a "nv-enc-bitrate-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a bitrate-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] bitrate A pointer to the parsed bitrate value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_bitrate_update (GstEvent * event, gchar** stream_id, guint *bitrate);
/**
* Creates a new "nv-enc-force-idr" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-force-idr is to be sent
* @param[out] force The encoder force IDR frame corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_force_idr (gchar* stream_id, gint force);
/**
* Parses a "nv-enc-force-idr" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a force-idr event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] force A pointer to the parsed force value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_force_idr (GstEvent * event, gchar** stream_id, gint *force);
/**
* Creates a new "nv-enc-force-intra" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-force-intra is to be sent
* @param[out] force The encoder force Intra frame corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_force_intra (gchar* stream_id, gint force);
/**
* Parses a "nv-enc-force-intra" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a force-intra event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] force A pointer to the parsed force value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_force_intra (GstEvent * event, gchar** stream_id, gint *force);
/**
* Creates a new "nv-enc-iframeinterval-update" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-iframeinterval-update is to be sent
* @param[out] interval The encoder iframeinterval to be set corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_iframeinterval_update (gchar* stream_id, guint interval);
/**
* Parses a "nv-enc-iframeinterval-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a iframeinterval-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] bitrate A pointer to the parsed interval value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_iframeinterval_update (GstEvent * event, gchar** stream_id, guint *interval);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@@ -1,6 +1,6 @@
###############################################################################
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
@@ -19,7 +19,6 @@ NVDS_VERSION:=6.0
ifeq ($(TARGET_DEVICE),aarch64)
GST_INSTALL_DIR?=/usr/lib/aarch64-linux-gnu/gstreamer-1.0/
LIB_INSTALL_DIR?=/usr/lib/aarch64-linux-gnu/tegra/
INCLUDES += -I/usr/src/jetson_multimedia_api/include/
CFLAGS:=
else
GST_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/gst-plugins/
@@ -27,10 +26,11 @@ else
CFLAGS:= -DUSE_V4L2_TARGET_NV_CODECSDK=1 -DUSE_V4L2_TARGET_NV_X86=1 -DUSE_V4L2_GST_HEADER_VER_1_8
endif
LIBS:= -lnvbufsurface -lnvbufsurftransform -lgstnvdsseimeta
LIBS:= -lnvbufsurface -lnvbufsurftransform -lgstnvdsseimeta -lgstnvcustomhelper
SRCS := $(wildcard *.c)
INCLUDES += -I./ -I../
INCLUDES += -I/usr/src/jetson_multimedia_api/include/
PKGS := gstreamer-1.0 \
gstreamer-base-1.0 \

View File

@@ -2,7 +2,7 @@
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2.c: plugin for v4l2 elements
*
@@ -291,6 +291,76 @@ GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
#else
static gboolean
gst_v4l2_has_vp8_encoder(void)
{
gboolean ret = FALSE;
int fd = -1;
long len = -1;
struct stat statbuf;
char info[128];
if (access (V4L2_DEVICE_PATH_TEGRA_INFO, F_OK) == 0) {
stat(V4L2_DEVICE_PATH_TEGRA_INFO, &statbuf);
if (statbuf.st_size > 0 && statbuf.st_size < 128)
{
fd = open(V4L2_DEVICE_PATH_TEGRA_INFO, O_RDONLY);
read(fd, info, statbuf.st_size);
len = statbuf.st_size - 8;
for (int i = 0; i < len; i ++)
{
if (strncmp(&info[i], "tegra", 5) == 0)
{
if (strncmp(&info[i], "tegra186", 8) == 0 ||
strncmp(&info[i], "tegra210", 8) == 0)
ret = TRUE;
break;
}
}
close(fd);
}
}
return ret;
}
static gboolean
gst_v4l2_is_v4l2_nvenc_present(void)
{
gboolean ret = TRUE;
int fd = -1;
long len = -1;
struct stat statbuf;
char info[128];
if (access (V4L2_DEVICE_PATH_TEGRA_INFO, F_OK) == 0) {
stat(V4L2_DEVICE_PATH_TEGRA_INFO, &statbuf);
if (statbuf.st_size > 0 && statbuf.st_size < 128)
{
fd = open(V4L2_DEVICE_PATH_TEGRA_INFO, O_RDONLY);
read(fd, info, statbuf.st_size);
len = statbuf.st_size - 10;
for (int i = 0; i < len; i ++)
{
if (strncmp(&info[i], "p3767", 5) == 0)
{
/*
Jetson Orin Nano 8GB (P3767-0003) Commercial module
Jetson Orin Nano 4GB (P3767-0004) Commercial module
Jetson Orin Nano 8GB with SD card slot (P3767-0005) For the Developer Kit only
*/
if (strncmp(&info[i + 6], "0003", 4) == 0 ||
strncmp(&info[i + 6], "0004", 4) == 0 ||
strncmp(&info[i + 6], "0005", 4) == 0)
ret = FALSE;
break;
}
}
close(fd);
}
}
return ret;
}
static gboolean
plugin_init (GstPlugin * plugin)
{
@@ -301,17 +371,20 @@ plugin_init (GstPlugin * plugin)
GST_DEBUG_CATEGORY_INIT (v4l2_debug, "v4l2", 0, "V4L2 API calls");
#ifndef USE_V4L2_TARGET_NV_X86
int ret_val = -1;
ret_val = system("lsmod | grep 'nvgpu' > /dev/null");
if (ret_val == -1) {
int igpu = -1, dgpu = -1;
igpu = system("lsmod | grep 'nvgpu' > /dev/null");
dgpu = system("modprobe -D -q nvidia | grep 'dkms' > /dev/null");
if (igpu == -1 || dgpu == -1)
return FALSE;
}
else if (ret_val == 0) {
is_cuvid = FALSE;
}
else {
else if (dgpu == 0)
is_cuvid = TRUE;
}
else
is_cuvid = FALSE;
if (getenv("AARCH64_DGPU"))
is_cuvid = TRUE;
else if (getenv("AARCH64_IGPU"))
is_cuvid = FALSE;
#endif
if (is_cuvid == TRUE)
@@ -345,6 +418,10 @@ plugin_init (GstPlugin * plugin)
NULL,
NULL);
} else {
if (!gst_v4l2_is_v4l2_nvenc_present()) {
// Orin Nano does not have HW encoders, so early return here.
return ret;
}
gst_v4l2_h264_enc_register(plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
@@ -359,11 +436,13 @@ plugin_init (GstPlugin * plugin)
if (is_cuvid == FALSE) {
if (access (V4L2_DEVICE_PATH_NVENC, F_OK) == 0) {
gst_v4l2_vp8_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
if (gst_v4l2_has_vp8_encoder()) {
gst_v4l2_vp8_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
}
gst_v4l2_vp9_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,

View File

@@ -1,7 +1,7 @@
/*
* Copyright (C) 2014 Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -1265,10 +1265,11 @@ gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
gsize maxsize, psize;
if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
maxsize = group->planes[i].length;
struct v4l2_pix_format_mplane *pix = &obj->format.fmt.pix_mp;
maxsize = pix->plane_fmt[i].sizeimage;
psize = size[i];
} else {
maxsize = group->planes[i].length;
maxsize = obj->format.fmt.pix.sizeimage;
psize = img_size;
}

View File

@@ -3,7 +3,7 @@
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* 2009 Texas Instruments, Inc - http://www.ti.com/
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2bufferpool.c V4L2 buffer pool class
*
@@ -35,7 +35,6 @@
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include "gst/video/video.h"
#include "gst/video/gstvideometa.h"
#include "gst/video/gstvideopool.h"
@@ -135,6 +134,32 @@ done:
return valid;
}
static NvBufSurfTransform_Error CopySurfTransform(NvBufSurface* src, NvBufSurface* dest)
{
NvBufSurfTransform_Error status;
NvBufSurfTransformParams transformParams;
NvBufSurfTransformRect srcRect;
NvBufSurfTransformRect destRect;
status = NvBufSurfTransformSetDefaultSession();
if (status != NvBufSurfTransformError_Success)
{
return status;
}
srcRect.top = srcRect.left = 0;
destRect.top = destRect.left = 0;
srcRect.width = src->surfaceList[0].width;
srcRect.height = src->surfaceList[0].height;
destRect.width = dest->surfaceList[0].width;
destRect.height = dest->surfaceList[0].height;
transformParams.src_rect = &srcRect;
transformParams.dst_rect = &destRect;
transformParams.transform_flag = NVBUFSURF_TRANSFORM_FILTER;
transformParams.transform_flip = NvBufSurfTransform_None;
transformParams.transform_filter = NvBufSurfTransformInter_Nearest;
status = NvBufSurfTransform(src, dest, &transformParams);
return status;
}
static GstFlowReturn
gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest,
GstBuffer * src)
@@ -290,10 +315,11 @@ gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest,
return GST_FLOW_ERROR;
}
if (NvBufSurfaceCopy(src_bufsurf, dst_bufsurf) != 0)
if (CopySurfTransform(src_bufsurf, dst_bufsurf) != NvBufSurfTransformError_Success)
{
g_print("ERROR in BufSurfacecopy \n");
return GST_FLOW_ERROR;
GST_ERROR_OBJECT(src, "ERROR in BufSurfacecopy \n");
gst_buffer_unmap(src, &inmap);
return GST_FLOW_ERROR;
}
gst_buffer_unmap(src, &inmap);
}
@@ -842,8 +868,14 @@ gst_v4l2_buffer_pool_set_config (GstBufferPool * bpool, GstStructure * config)
}
/* Always update the config to ensure the configured size matches */
gst_buffer_pool_config_set_params (config, caps, obj->info.size, min_buffers,
max_buffers);
if ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC) || !strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) &&
(obj->mode == GST_V4L2_IO_DMABUF_IMPORT)) {
gst_buffer_pool_config_set_params (config, caps, sizeof (NvBufSurface), min_buffers,
max_buffers);
}
else
gst_buffer_pool_config_set_params (config, caps, obj->info.size, min_buffers,
max_buffers);
#ifdef USE_V4L2_TARGET_NV
/* Need to adjust the size to 0th plane's size since we will only output
v4l2 memory associated with 0th plane. */

View File

@@ -214,6 +214,8 @@ v4l2_profile_from_string (const gchar * profile)
v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10;
} else if (g_str_equal (profile, "mainstillpicture")) {
v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE;
} else if (g_str_equal (profile, "frext")) {
v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_FREXT;
} else {
GST_WARNING ("Unsupported profile string '%s'", profile);
}
@@ -230,6 +232,8 @@ v4l2_profile_to_string (gint v4l2_profile)
return "main10";
case V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE:
return "mainstillpicture";
case V4L2_MPEG_VIDEO_H265_PROFILE_FREXT:
return "frext";
default:
GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
break;
@@ -427,6 +431,8 @@ gst_v4l2_videnc_profile_get_type (void)
"GST_V4L2_H265_VIDENC_MAIN_PROFILE", "Main"},
{V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10,
"GST_V4L2_H265_VIDENC_MAIN10_PROFILE", "Main10"},
{V4L2_MPEG_VIDEO_H265_PROFILE_FREXT,
"GST_V4L2_H265_VIDENC_FREXT_PROFILE", "FREXT"},
{0, NULL, NULL}
};

View File

@@ -0,0 +1,924 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "nalutils.h"
#include "gstv4l2h26xparser.h"
#include <gst/base/gstbytereader.h>
#include <gst/base/gstbitreader.h>
#include <string.h>
#include <math.h>
GST_DEBUG_CATEGORY_STATIC (h26x_parser_debug);
#define GST_CAT_DEFAULT h26x_parser_debug
static gboolean initialized = FALSE;
#define INITIALIZE_DEBUG_CATEGORY \
if (!initialized) { \
GST_DEBUG_CATEGORY_INIT (h26x_parser_debug, "codecparsers_h26x", 0, \
"h26x parser library"); \
initialized = TRUE; \
}
/**** Default scaling_lists according to Table 7-2 *****/
static const guint8 default_4x4_intra[16] = {
6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32,
32, 37, 37, 42
};
static const guint8 default_4x4_inter[16] = {
10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27,
27, 30, 30, 34
};
static const guint8 default_8x8_intra[64] = {
6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18,
18, 18, 18, 23, 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27,
27, 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31, 31, 33,
33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42
};
static const guint8 default_8x8_inter[64] = {
9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19,
19, 19, 19, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27, 27, 28,
28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35
};
/***** Utils ****/
#define EXTENDED_SAR 255
static gboolean
h264_parse_nalu_header (H264NalUnit * nalu)
{
guint8 *data = nalu->data + nalu->offset;
if (nalu->size < 1)
return FALSE;
nalu->type = (data[0] & 0x1f);
nalu->ref_idc = (data[0] & 0x60) >> 5;
nalu->idr_pic_flag = (nalu->type == 5 ? 1 : 0);
nalu->header_bytes = 1;
nalu->extension_type = H264_NAL_EXTENSION_NONE;
GST_DEBUG ("Nal type %u, ref_idc %u", nalu->type, nalu->ref_idc);
return TRUE;
}
static gboolean
h264_sps_copy (H264SPS * dst_sps, const H264SPS * src_sps)
{
g_return_val_if_fail (dst_sps != NULL, FALSE);
g_return_val_if_fail (src_sps != NULL, FALSE);
h264_sps_clear (dst_sps);
*dst_sps = *src_sps;
return TRUE;
}
static gboolean
h264_parser_parse_scaling_list (NalReader * nr,
guint8 scaling_lists_4x4[6][16], guint8 scaling_lists_8x8[6][64],
const guint8 fallback_4x4_inter[16], const guint8 fallback_4x4_intra[16],
const guint8 fallback_8x8_inter[64], const guint8 fallback_8x8_intra[64],
guint8 n_lists)
{
guint i;
static const guint8 *default_lists[12] = {
default_4x4_intra, default_4x4_intra, default_4x4_intra,
default_4x4_inter, default_4x4_inter, default_4x4_inter,
default_8x8_intra, default_8x8_inter,
default_8x8_intra, default_8x8_inter,
default_8x8_intra, default_8x8_inter
};
GST_DEBUG ("parsing scaling lists");
for (i = 0; i < 12; i++) {
gboolean use_default = FALSE;
if (i < n_lists) {
guint8 scaling_list_present_flag;
READ_UINT8 (nr, scaling_list_present_flag, 1);
if (scaling_list_present_flag) {
guint8 *scaling_list;
guint size;
guint j;
guint8 last_scale, next_scale;
if (i < 6) {
scaling_list = scaling_lists_4x4[i];
size = 16;
} else {
scaling_list = scaling_lists_8x8[i - 6];
size = 64;
}
last_scale = 8;
next_scale = 8;
for (j = 0; j < size; j++) {
if (next_scale != 0) {
gint32 delta_scale;
READ_SE (nr, delta_scale);
next_scale = (last_scale + delta_scale) & 0xff;
}
if (j == 0 && next_scale == 0) {
/* Use default scaling lists (7.4.2.1.1.1) */
memcpy (scaling_list, default_lists[i], size);
break;
}
last_scale = scaling_list[j] =
(next_scale == 0) ? last_scale : next_scale;
}
} else
use_default = TRUE;
} else
use_default = TRUE;
if (use_default) {
switch (i) {
case 0:
memcpy (scaling_lists_4x4[0], fallback_4x4_intra, 16);
break;
case 1:
memcpy (scaling_lists_4x4[1], scaling_lists_4x4[0], 16);
break;
case 2:
memcpy (scaling_lists_4x4[2], scaling_lists_4x4[1], 16);
break;
case 3:
memcpy (scaling_lists_4x4[3], fallback_4x4_inter, 16);
break;
case 4:
memcpy (scaling_lists_4x4[4], scaling_lists_4x4[3], 16);
break;
case 5:
memcpy (scaling_lists_4x4[5], scaling_lists_4x4[4], 16);
break;
case 6:
memcpy (scaling_lists_8x8[0], fallback_8x8_intra, 64);
break;
case 7:
memcpy (scaling_lists_8x8[1], fallback_8x8_inter, 64);
break;
case 8:
memcpy (scaling_lists_8x8[2], scaling_lists_8x8[0], 64);
break;
case 9:
memcpy (scaling_lists_8x8[3], scaling_lists_8x8[1], 64);
break;
case 10:
memcpy (scaling_lists_8x8[4], scaling_lists_8x8[2], 64);
break;
case 11:
memcpy (scaling_lists_8x8[5], scaling_lists_8x8[3], 64);
break;
default:
break;
}
}
}
return TRUE;
error:
GST_WARNING ("error parsing scaling lists");
return FALSE;
}
H264NalParser *
h264_nal_parser_new (void)
{
H264NalParser *nalparser;
nalparser = g_slice_new0 (H264NalParser);
INITIALIZE_DEBUG_CATEGORY;
return nalparser;
}
void
h264_nal_parser_free (H264NalParser * nalparser)
{
guint i;
for (i = 0; i < H264_MAX_SPS_COUNT; i++)
h264_sps_clear (&nalparser->sps[i]);
g_slice_free (H264NalParser, nalparser);
nalparser = NULL;
}
H264ParserResult
h264_parser_identify_nalu_unchecked (H264NalParser * nalparser,
const guint8 * data, guint offset, gsize size, H264NalUnit * nalu)
{
gint off1;
memset (nalu, 0, sizeof (*nalu));
if (size < offset + 4) {
GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
", offset %u", size, offset);
return H264_PARSER_ERROR;
}
off1 = scan_for_start_codes (data + offset, size - offset);
if (off1 < 0) {
GST_DEBUG ("No start code prefix in this buffer");
return H264_PARSER_NO_NAL;
}
if (offset + off1 == size - 1) {
GST_DEBUG ("Missing data to identify nal unit");
return H264_PARSER_ERROR;
}
nalu->sc_offset = offset + off1;
nalu->offset = offset + off1 + 3;
nalu->data = (guint8 *) data;
nalu->size = size - nalu->offset;
if (!h264_parse_nalu_header (nalu)) {
GST_WARNING ("error parsing \"NAL unit header\"");
nalu->size = 0;
return H264_PARSER_BROKEN_DATA;
}
nalu->valid = TRUE;
/* sc might have 2 or 3 0-bytes */
if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00
&& (nalu->type == H264_NAL_SPS || nalu->type == H264_NAL_PPS
|| nalu->type == H264_NAL_AU_DELIMITER))
nalu->sc_offset--;
if (nalu->type == H264_NAL_SEQ_END ||
nalu->type == H264_NAL_STREAM_END) {
GST_DEBUG ("end-of-seq or end-of-stream nal found");
nalu->size = 1;
return H264_PARSER_OK;
}
return H264_PARSER_OK;
}
H264ParserResult
h264_parser_identify_nalu (H264NalParser * nalparser,
const guint8 * data, guint offset, gsize size, H264NalUnit * nalu)
{
H264ParserResult res;
gint off2;
res =
h264_parser_identify_nalu_unchecked (nalparser, data, offset, size,
nalu);
if (res != H264_PARSER_OK)
goto beach;
/* The two NALs are exactly 1 byte size and are placed at the end of an AU,
* there is no need to wait for the following */
if (nalu->type == H264_NAL_SEQ_END ||
nalu->type == H264_NAL_STREAM_END)
goto beach;
off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset);
if (off2 < 0) {
GST_DEBUG ("Nal start %d, No end found", nalu->offset);
return H264_PARSER_NO_NAL_END;
}
/* Mini performance improvement:
* We could have a way to store how many 0s were skipped to avoid
* parsing them again on the next NAL */
while (off2 > 0 && data[nalu->offset + off2 - 1] == 00)
off2--;
nalu->size = off2;
if (nalu->size < 2)
return H264_PARSER_BROKEN_DATA;
GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size);
beach:
return res;
}
H264ParserResult
h264_parser_parse_sps (H264NalParser * nalparser, H264NalUnit * nalu,
H264SPS * sps, gboolean parse_vui_params)
{
H264ParserResult res = h264_parse_sps (nalu, sps, parse_vui_params);
return res;
if (res == H264_PARSER_OK) {
GST_DEBUG ("adding sequence parameter set with id: %d to array", sps->id);
if (!h264_sps_copy (&nalparser->sps[sps->id], sps))
return H264_PARSER_ERROR;
nalparser->last_sps = &nalparser->sps[sps->id];
}
return res;
}
/* Parse seq_parameter_set_data() */
static gboolean
h264_parse_sps_data (NalReader * nr, H264SPS * sps,
gboolean parse_vui_params)
{
gint width, height;
guint subwc[] = { 1, 2, 2, 1 };
guint subhc[] = { 1, 2, 1, 1 };
memset (sps, 0, sizeof (*sps));
/* set default values for fields that might not be present in the bitstream
and have valid defaults */
sps->extension_type = H264_NAL_EXTENSION_NONE;
sps->chroma_format_idc = 1;
memset (sps->scaling_lists_4x4, 16, 96);
memset (sps->scaling_lists_8x8, 16, 384);
READ_UINT8 (nr, sps->profile_idc, 8);
READ_UINT8 (nr, sps->constraint_set0_flag, 1);
READ_UINT8 (nr, sps->constraint_set1_flag, 1);
READ_UINT8 (nr, sps->constraint_set2_flag, 1);
READ_UINT8 (nr, sps->constraint_set3_flag, 1);
READ_UINT8 (nr, sps->constraint_set4_flag, 1);
READ_UINT8 (nr, sps->constraint_set5_flag, 1);
/* skip reserved_zero_2bits */
if (!_skip (nr, 2))
goto error;
READ_UINT8 (nr, sps->level_idc, 8);
READ_UE_MAX (nr, sps->id, H264_MAX_SPS_COUNT - 1);
if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
sps->profile_idc == 122 || sps->profile_idc == 244 ||
sps->profile_idc == 44 || sps->profile_idc == 83 ||
sps->profile_idc == 86 || sps->profile_idc == 118 ||
sps->profile_idc == 128) {
READ_UE_MAX (nr, sps->chroma_format_idc, 3);
if (sps->chroma_format_idc == 3)
READ_UINT8 (nr, sps->separate_colour_plane_flag, 1);
READ_UE_MAX (nr, sps->bit_depth_luma_minus8, 6);
READ_UE_MAX (nr, sps->bit_depth_chroma_minus8, 6);
READ_UINT8 (nr, sps->qpprime_y_zero_transform_bypass_flag, 1);
READ_UINT8 (nr, sps->scaling_matrix_present_flag, 1);
if (sps->scaling_matrix_present_flag) {
guint8 n_lists;
n_lists = (sps->chroma_format_idc != 3) ? 8 : 12;
if (!h264_parser_parse_scaling_list (nr,
sps->scaling_lists_4x4, sps->scaling_lists_8x8,
default_4x4_inter, default_4x4_intra,
default_8x8_inter, default_8x8_intra, n_lists))
goto error;
}
}
READ_UE_MAX (nr, sps->log2_max_frame_num_minus4, 12);
sps->max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
READ_UE_MAX (nr, sps->pic_order_cnt_type, 2);
if (sps->pic_order_cnt_type == 0) {
READ_UE_MAX (nr, sps->log2_max_pic_order_cnt_lsb_minus4, 12);
} else if (sps->pic_order_cnt_type == 1) {
guint i;
READ_UINT8 (nr, sps->delta_pic_order_always_zero_flag, 1);
READ_SE (nr, sps->offset_for_non_ref_pic);
READ_SE (nr, sps->offset_for_top_to_bottom_field);
READ_UE_MAX (nr, sps->num_ref_frames_in_pic_order_cnt_cycle, 255);
for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++)
READ_SE (nr, sps->offset_for_ref_frame[i]);
}
READ_UE (nr, sps->num_ref_frames);
READ_UINT8 (nr, sps->gaps_in_frame_num_value_allowed_flag, 1);
READ_UE (nr, sps->pic_width_in_mbs_minus1);
READ_UE (nr, sps->pic_height_in_map_units_minus1);
READ_UINT8 (nr, sps->frame_mbs_only_flag, 1);
if (!sps->frame_mbs_only_flag)
READ_UINT8 (nr, sps->mb_adaptive_frame_field_flag, 1);
READ_UINT8 (nr, sps->direct_8x8_inference_flag, 1);
READ_UINT8 (nr, sps->frame_cropping_flag, 1);
if (sps->frame_cropping_flag) {
READ_UE (nr, sps->frame_crop_left_offset);
READ_UE (nr, sps->frame_crop_right_offset);
READ_UE (nr, sps->frame_crop_top_offset);
READ_UE (nr, sps->frame_crop_bottom_offset);
}
/* calculate ChromaArrayType */
if (!sps->separate_colour_plane_flag)
sps->chroma_array_type = sps->chroma_format_idc;
/* Calculate width and height */
width = (sps->pic_width_in_mbs_minus1 + 1);
width *= 16;
height = (sps->pic_height_in_map_units_minus1 + 1);
height *= 16 * (2 - sps->frame_mbs_only_flag);
GST_LOG ("initial width=%d, height=%d", width, height);
if (width < 0 || height < 0) {
GST_WARNING ("invalid width/height in SPS");
goto error;
}
sps->width = width;
sps->height = height;
if (sps->frame_cropping_flag) {
const guint crop_unit_x = subwc[sps->chroma_format_idc];
const guint crop_unit_y =
subhc[sps->chroma_format_idc] * (2 - sps->frame_mbs_only_flag);
width -= (sps->frame_crop_left_offset + sps->frame_crop_right_offset)
* crop_unit_x;
height -= (sps->frame_crop_top_offset + sps->frame_crop_bottom_offset)
* crop_unit_y;
sps->crop_rect_width = width;
sps->crop_rect_height = height;
sps->crop_rect_x = sps->frame_crop_left_offset * crop_unit_x;
sps->crop_rect_y = sps->frame_crop_top_offset * crop_unit_y;
GST_LOG ("crop_rectangle x=%u y=%u width=%u, height=%u", sps->crop_rect_x,
sps->crop_rect_y, width, height);
}
sps->fps_num_removed = 0;
sps->fps_den_removed = 1;
return TRUE;
error:
return FALSE;
}
H264ParserResult
h264_parse_sps (H264NalUnit * nalu, H264SPS * sps,
gboolean parse_vui_params)
{
NalReader nr;
INITIALIZE_DEBUG_CATEGORY;
GST_DEBUG ("parsing SPS");
init_nal (&nr, nalu->data + nalu->offset + nalu->header_bytes,
nalu->size - nalu->header_bytes);
if (!h264_parse_sps_data (&nr, sps, parse_vui_params))
goto error;
sps->valid = TRUE;
return H264_PARSER_OK;
error:
GST_WARNING ("error parsing \"Sequence parameter set\"");
sps->valid = FALSE;
return H264_PARSER_ERROR;
}
void
h264_sps_clear (H264SPS * sps)
{
g_return_if_fail (sps != NULL);
}
/************************** H265 *****************************/
static gboolean
h265_parse_nalu_header (H265NalUnit * nalu)
{
guint8 *data = nalu->data + nalu->offset;
GstBitReader br;
if (nalu->size < 2)
return FALSE;
gst_bit_reader_init (&br, data, nalu->size - nalu->offset);
/* skip the forbidden_zero_bit */
gst_bit_reader_skip_unchecked (&br, 1);
nalu->type = gst_bit_reader_get_bits_uint8_unchecked (&br, 6);
nalu->layer_id = gst_bit_reader_get_bits_uint8_unchecked (&br, 6);
nalu->temporal_id_plus1 = gst_bit_reader_get_bits_uint8_unchecked (&br, 3);
nalu->header_bytes = 2;
return TRUE;
}
/****** Parsing functions *****/
static gboolean
h265_parse_profile_tier_level (H265ProfileTierLevel * ptl,
NalReader * nr, guint8 maxNumSubLayersMinus1)
{
guint i, j;
GST_DEBUG ("parsing \"ProfileTierLevel parameters\"");
READ_UINT8 (nr, ptl->profile_space, 2);
READ_UINT8 (nr, ptl->tier_flag, 1);
READ_UINT8 (nr, ptl->profile_idc, 5);
for (j = 0; j < 32; j++)
READ_UINT8 (nr, ptl->profile_compatibility_flag[j], 1);
READ_UINT8 (nr, ptl->progressive_source_flag, 1);
READ_UINT8 (nr, ptl->interlaced_source_flag, 1);
READ_UINT8 (nr, ptl->non_packed_constraint_flag, 1);
READ_UINT8 (nr, ptl->frame_only_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_12bit_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_10bit_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_8bit_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_422chroma_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_420chroma_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_monochrome_constraint_flag, 1);
READ_UINT8 (nr, ptl->intra_constraint_flag, 1);
READ_UINT8 (nr, ptl->one_picture_only_constraint_flag, 1);
READ_UINT8 (nr, ptl->lower_bit_rate_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_14bit_constraint_flag, 1);
/* skip the reserved zero bits */
if (!_skip (nr, 34))
goto error;
READ_UINT8 (nr, ptl->level_idc, 8);
for (j = 0; j < maxNumSubLayersMinus1; j++) {
READ_UINT8 (nr, ptl->sub_layer_profile_present_flag[j], 1);
READ_UINT8 (nr, ptl->sub_layer_level_present_flag[j], 1);
}
if (maxNumSubLayersMinus1 > 0) {
for (i = maxNumSubLayersMinus1; i < 8; i++)
if (!_skip (nr, 2))
goto error;
}
for (i = 0; i < maxNumSubLayersMinus1; i++) {
if (ptl->sub_layer_profile_present_flag[i]) {
READ_UINT8 (nr, ptl->sub_layer_profile_space[i], 2);
READ_UINT8 (nr, ptl->sub_layer_tier_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_profile_idc[i], 5);
for (j = 0; j < 32; j++)
READ_UINT8 (nr, ptl->sub_layer_profile_compatibility_flag[i][j], 1);
READ_UINT8 (nr, ptl->sub_layer_progressive_source_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_interlaced_source_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_non_packed_constraint_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_frame_only_constraint_flag[i], 1);
if (!_skip (nr, 44))
goto error;
}
if (ptl->sub_layer_level_present_flag[i])
READ_UINT8 (nr, ptl->sub_layer_level_idc[i], 8);
}
return TRUE;
error:
GST_WARNING ("error parsing \"ProfileTierLevel Parameters\"");
return FALSE;
}
H265Parser *
h265_parser_new (void)
{
H265Parser *parser;
parser = g_slice_new0 (H265Parser);
INITIALIZE_DEBUG_CATEGORY;
return parser;
}
void
h265_parser_free (H265Parser * parser)
{
g_slice_free (H265Parser, parser);
parser = NULL;
}
H265ParserResult
h265_parser_identify_nalu_unchecked (H265Parser * parser,
const guint8 * data, guint offset, gsize size, H265NalUnit * nalu)
{
gint off1;
memset (nalu, 0, sizeof (*nalu));
if (size < offset + 4) {
GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
", offset %u", size, offset);
return H265_PARSER_ERROR;
}
off1 = scan_for_start_codes (data + offset, size - offset);
if (off1 < 0) {
GST_DEBUG ("No start code prefix in this buffer");
return H265_PARSER_NO_NAL;
}
if (offset + off1 == size - 1) {
GST_DEBUG ("Missing data to identify nal unit");
return H265_PARSER_ERROR;
}
nalu->sc_offset = offset + off1;
/* sc might have 2 or 3 0-bytes */
if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00)
nalu->sc_offset--;
nalu->offset = offset + off1 + 3;
nalu->data = (guint8 *) data;
nalu->size = size - nalu->offset;
if (!h265_parse_nalu_header (nalu)) {
GST_WARNING ("error parsing \"NAL unit header\"");
nalu->size = 0;
return H265_PARSER_BROKEN_DATA;
}
nalu->valid = TRUE;
if (nalu->type == H265_NAL_EOS || nalu->type == H265_NAL_EOB) {
GST_DEBUG ("end-of-seq or end-of-stream nal found");
nalu->size = 2;
return H265_PARSER_OK;
}
return H265_PARSER_OK;
}
H265ParserResult
h265_parser_identify_nalu (H265Parser * parser,
const guint8 * data, guint offset, gsize size, H265NalUnit * nalu)
{
H265ParserResult res;
gint off2;
res =
h265_parser_identify_nalu_unchecked (parser, data, offset, size,
nalu);
if (res != H265_PARSER_OK)
goto beach;
/* The two NALs are exactly 2 bytes size and are placed at the end of an AU,
* there is no need to wait for the following */
if (nalu->type == H265_NAL_EOS || nalu->type == H265_NAL_EOB)
goto beach;
off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset);
if (off2 < 0) {
GST_DEBUG ("Nal start %d, No end found", nalu->offset);
return H265_PARSER_NO_NAL_END;
}
/* Mini performance improvement:
* We could have a way to store how many 0s were skipped to avoid
* parsing them again on the next NAL */
while (off2 > 0 && data[nalu->offset + off2 - 1] == 00)
off2--;
nalu->size = off2;
if (nalu->size < 3)
return H265_PARSER_BROKEN_DATA;
GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size);
beach:
return res;
}
H265ParserResult
h265_parser_identify_nalu_hevc (H265Parser * parser,
const guint8 * data, guint offset, gsize size, guint8 nal_length_size,
H265NalUnit * nalu)
{
GstBitReader br;
memset (nalu, 0, sizeof (*nalu));
if (size < offset + nal_length_size) {
GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
", offset %u", size, offset);
return H265_PARSER_ERROR;
}
size = size - offset;
gst_bit_reader_init (&br, data + offset, size);
nalu->size = gst_bit_reader_get_bits_uint32_unchecked (&br,
nal_length_size * 8);
nalu->sc_offset = offset;
nalu->offset = offset + nal_length_size;
if (size < nalu->size + nal_length_size) {
nalu->size = 0;
return H265_PARSER_NO_NAL_END;
}
nalu->data = (guint8 *) data;
if (!h265_parse_nalu_header (nalu)) {
GST_WARNING ("error parsing \"NAL unit header\"");
nalu->size = 0;
return H265_PARSER_BROKEN_DATA;
}
if (nalu->size < 2)
return H265_PARSER_BROKEN_DATA;
nalu->valid = TRUE;
return H265_PARSER_OK;
}
H265ParserResult
h265_parser_parse_sps (H265Parser * parser, H265NalUnit * nalu,
H265SPS * sps, gboolean parse_vui_params)
{
H265ParserResult res =
h265_parse_sps (parser, nalu, sps, parse_vui_params);
return res;
if (res == H265_PARSER_OK) {
GST_DEBUG ("adding sequence parameter set with id: %d to array", sps->id);
parser->sps[sps->id] = *sps;
parser->last_sps = &parser->sps[sps->id];
}
return res;
}
H265ParserResult
h265_parse_sps (H265Parser * parser, H265NalUnit * nalu,
H265SPS * sps, gboolean parse_vui_params)
{
NalReader nr;
guint8 vps_id;
guint i;
guint subwc[] = { 1, 2, 2, 1, 1 };
guint subhc[] = { 1, 2, 1, 1, 1 };
INITIALIZE_DEBUG_CATEGORY;
GST_DEBUG ("parsing SPS");
init_nal (&nr, nalu->data + nalu->offset + nalu->header_bytes,
nalu->size - nalu->header_bytes);
memset (sps, 0, sizeof (*sps));
READ_UINT8 (&nr, vps_id, 4);
READ_UINT8 (&nr, sps->max_sub_layers_minus1, 3);
READ_UINT8 (&nr, sps->temporal_id_nesting_flag, 1);
if (!h265_parse_profile_tier_level (&sps->profile_tier_level, &nr,
sps->max_sub_layers_minus1))
goto error;
READ_UE_MAX (&nr, sps->id, H265_MAX_SPS_COUNT - 1);
READ_UE_MAX (&nr, sps->chroma_format_idc, 3);
if (sps->chroma_format_idc == 3)
READ_UINT8 (&nr, sps->separate_colour_plane_flag, 1);
READ_UE_ALLOWED (&nr, sps->pic_width_in_luma_samples, 1, 16888);
READ_UE_ALLOWED (&nr, sps->pic_height_in_luma_samples, 1, 16888);
READ_UINT8 (&nr, sps->conformance_window_flag, 1);
if (sps->conformance_window_flag) {
READ_UE (&nr, sps->conf_win_left_offset);
READ_UE (&nr, sps->conf_win_right_offset);
READ_UE (&nr, sps->conf_win_top_offset);
READ_UE (&nr, sps->conf_win_bottom_offset);
}
READ_UE_MAX (&nr, sps->bit_depth_luma_minus8, 6);
READ_UE_MAX (&nr, sps->bit_depth_chroma_minus8, 6);
READ_UE_MAX (&nr, sps->log2_max_pic_order_cnt_lsb_minus4, 12);
READ_UINT8 (&nr, sps->sub_layer_ordering_info_present_flag, 1);
for (i =
(sps->sub_layer_ordering_info_present_flag ? 0 :
sps->max_sub_layers_minus1); i <= sps->max_sub_layers_minus1; i++) {
READ_UE_MAX (&nr, sps->max_dec_pic_buffering_minus1[i], 16);
READ_UE_MAX (&nr, sps->max_num_reorder_pics[i],
sps->max_dec_pic_buffering_minus1[i]);
READ_UE_MAX (&nr, sps->max_latency_increase_plus1[i], G_MAXUINT32 - 1);
}
/* setting default values if sps->sub_layer_ordering_info_present_flag is zero */
if (!sps->sub_layer_ordering_info_present_flag && sps->max_sub_layers_minus1) {
for (i = 0; i <= (guint)(sps->max_sub_layers_minus1 - 1); i++) {
sps->max_dec_pic_buffering_minus1[i] =
sps->max_dec_pic_buffering_minus1[sps->max_sub_layers_minus1];
sps->max_num_reorder_pics[i] =
sps->max_num_reorder_pics[sps->max_sub_layers_minus1];
sps->max_latency_increase_plus1[i] =
sps->max_latency_increase_plus1[sps->max_sub_layers_minus1];
}
}
/* The limits are calculted based on the profile_tier_level constraint
* in Annex-A: CtbLog2SizeY = 4 to 6 */
READ_UE_MAX (&nr, sps->log2_min_luma_coding_block_size_minus3, 3);
READ_UE_MAX (&nr, sps->log2_diff_max_min_luma_coding_block_size, 6);
READ_UE_MAX (&nr, sps->log2_min_transform_block_size_minus2, 3);
READ_UE_MAX (&nr, sps->log2_diff_max_min_transform_block_size, 3);
READ_UE_MAX (&nr, sps->max_transform_hierarchy_depth_inter, 4);
READ_UE_MAX (&nr, sps->max_transform_hierarchy_depth_intra, 4);
/* Calculate width and height */
sps->width = sps->pic_width_in_luma_samples;
sps->height = sps->pic_height_in_luma_samples;
if (sps->width < 0 || sps->height < 0) {
GST_WARNING ("invalid width/height in SPS");
goto error;
}
if (sps->conformance_window_flag) {
const guint crop_unit_x = subwc[sps->chroma_format_idc];
const guint crop_unit_y = subhc[sps->chroma_format_idc];
sps->crop_rect_width = sps->width -
(sps->conf_win_left_offset + sps->conf_win_right_offset) * crop_unit_x;
sps->crop_rect_height = sps->height -
(sps->conf_win_top_offset + sps->conf_win_bottom_offset) * crop_unit_y;
sps->crop_rect_x = sps->conf_win_left_offset * crop_unit_x;
sps->crop_rect_y = sps->conf_win_top_offset * crop_unit_y;
GST_LOG ("crop_rectangle x=%u y=%u width=%u, height=%u", sps->crop_rect_x,
sps->crop_rect_y, sps->crop_rect_width, sps->crop_rect_height);
}
sps->fps_num = 0;
sps->fps_den = 1;
sps->valid = TRUE;
return H265_PARSER_OK;
error:
GST_WARNING ("error parsing \"Sequence parameter set\"");
sps->valid = FALSE;
return H265_PARSER_ERROR;
}

View File

@@ -0,0 +1,462 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifndef __H26X_PARSER_H__
#define __H26X_PARSER_H__
#include <gst/gst.h>
G_BEGIN_DECLS
#define H264_MAX_SPS_COUNT 32
typedef enum
{
H264_NAL_UNKNOWN = 0,
H264_NAL_SLICE = 1,
H264_NAL_SLICE_DPA = 2,
H264_NAL_SLICE_DPB = 3,
H264_NAL_SLICE_DPC = 4,
H264_NAL_SLICE_IDR = 5,
H264_NAL_SEI = 6,
H264_NAL_SPS = 7,
H264_NAL_PPS = 8,
H264_NAL_AU_DELIMITER = 9,
H264_NAL_SEQ_END = 10,
H264_NAL_STREAM_END = 11,
H264_NAL_FILLER_DATA = 12,
H264_NAL_SPS_EXT = 13,
H264_NAL_PREFIX_UNIT = 14,
H264_NAL_SUBSET_SPS = 15,
H264_NAL_DEPTH_SPS = 16,
H264_NAL_SLICE_AUX = 19,
H264_NAL_SLICE_EXT = 20,
H264_NAL_SLICE_DEPTH = 21
} H264NalUnitType;
typedef enum
{
H264_NAL_EXTENSION_NONE = 0,
H264_NAL_EXTENSION_SVC,
H264_NAL_EXTENSION_MVC,
} H264NalUnitExtensionType;
typedef enum
{
H264_PARSER_OK,
H264_PARSER_BROKEN_DATA,
H264_PARSER_BROKEN_LINK,
H264_PARSER_ERROR,
H264_PARSER_NO_NAL,
H264_PARSER_NO_NAL_END
} H264ParserResult;
typedef enum
{
H264_FRAME_PACKING_NONE = 6,
H264_FRAME_PACKING_CHECKERBOARD_INTERLEAVING = 0,
H264_FRAME_PACKING_COLUMN_INTERLEAVING = 1,
H264_FRAME_PACKING_ROW_INTERLEAVING = 2,
H264_FRAME_PACKING_SIDE_BY_SIDE = 3,
H264_FRMAE_PACKING_TOP_BOTTOM = 4,
H264_FRAME_PACKING_TEMPORAL_INTERLEAVING = 5
} H264FramePackingType;
typedef enum
{
H264_P_SLICE = 0,
H264_B_SLICE = 1,
H264_I_SLICE = 2,
H264_SP_SLICE = 3,
H264_SI_SLICE = 4,
H264_S_P_SLICE = 5,
H264_S_B_SLICE = 6,
H264_S_I_SLICE = 7,
H264_S_SP_SLICE = 8,
H264_S_SI_SLICE = 9
} H264SliceType;
typedef enum
{
H264_CT_TYPE_PROGRESSIVE = 0,
H264_CT_TYPE_INTERLACED = 1,
H264_CT_TYPE_UNKNOWN = 2,
} CtType;
typedef struct _H264NalParser H264NalParser;
typedef struct _H264NalUnit H264NalUnit;
typedef struct _H264SPS H264SPS;
struct _H264NalUnit
{
guint16 ref_idc;
guint16 type;
/* calculated values */
guint8 idr_pic_flag;
guint size;
guint offset;
guint sc_offset;
gboolean valid;
guint8 *data;
guint8 header_bytes;
guint8 extension_type;
};
struct _H264SPS
{
gint id;
guint8 profile_idc;
guint8 constraint_set0_flag;
guint8 constraint_set1_flag;
guint8 constraint_set2_flag;
guint8 constraint_set3_flag;
guint8 constraint_set4_flag;
guint8 constraint_set5_flag;
guint8 level_idc;
guint8 chroma_format_idc;
guint8 separate_colour_plane_flag;
guint8 bit_depth_luma_minus8;
guint8 bit_depth_chroma_minus8;
guint8 qpprime_y_zero_transform_bypass_flag;
guint8 scaling_matrix_present_flag;
guint8 scaling_lists_4x4[6][16];
guint8 scaling_lists_8x8[6][64];
guint8 log2_max_frame_num_minus4;
guint8 pic_order_cnt_type;
/* if pic_order_cnt_type == 0 */
guint8 log2_max_pic_order_cnt_lsb_minus4;
/* else if pic_order_cnt_type == 1 */
guint8 delta_pic_order_always_zero_flag;
gint32 offset_for_non_ref_pic;
gint32 offset_for_top_to_bottom_field;
guint8 num_ref_frames_in_pic_order_cnt_cycle;
gint32 offset_for_ref_frame[255];
guint32 num_ref_frames;
guint8 gaps_in_frame_num_value_allowed_flag;
guint32 pic_width_in_mbs_minus1;
guint32 pic_height_in_map_units_minus1;
guint8 frame_mbs_only_flag;
guint8 mb_adaptive_frame_field_flag;
guint8 direct_8x8_inference_flag;
guint8 frame_cropping_flag;
/* if frame_cropping_flag */
guint32 frame_crop_left_offset;
guint32 frame_crop_right_offset;
guint32 frame_crop_top_offset;
guint32 frame_crop_bottom_offset;
guint8 vui_parameters_present_flag;
/* calculated values */
guint8 chroma_array_type;
guint32 max_frame_num;
gint width, height;
gint crop_rect_width, crop_rect_height;
gint crop_rect_x, crop_rect_y;
gint fps_num_removed, fps_den_removed; /* FIXME: remove */
gboolean valid;
/* Subset SPS extensions */
guint8 extension_type;
};
struct _H264NalParser
{
/*< private >*/
H264SPS sps[H264_MAX_SPS_COUNT];
H264SPS *last_sps;
};
H264NalParser *h264_nal_parser_new (void);
H264ParserResult h264_parser_identify_nalu (H264NalParser *nalparser,
const guint8 *data, guint offset,
gsize size, H264NalUnit *nalu);
H264ParserResult h264_parser_identify_nalu_unchecked (H264NalParser *nalparser,
const guint8 *data, guint offset,
gsize size, H264NalUnit *nalu);
H264ParserResult h264_parser_parse_sps (H264NalParser *nalparser, H264NalUnit *nalu,
H264SPS *sps, gboolean parse_vui_params);
void h264_nal_parser_free (H264NalParser *nalparser);
H264ParserResult h264_parse_sps (H264NalUnit *nalu,
H264SPS *sps, gboolean parse_vui_params);
void h264_sps_clear (H264SPS *sps);
#define H265_MAX_SUB_LAYERS 8
#define H265_MAX_SPS_COUNT 16
typedef enum
{
H265_NAL_SLICE_TRAIL_N = 0,
H265_NAL_SLICE_TRAIL_R = 1,
H265_NAL_SLICE_TSA_N = 2,
H265_NAL_SLICE_TSA_R = 3,
H265_NAL_SLICE_STSA_N = 4,
H265_NAL_SLICE_STSA_R = 5,
H265_NAL_SLICE_RADL_N = 6,
H265_NAL_SLICE_RADL_R = 7,
H265_NAL_SLICE_RASL_N = 8,
H265_NAL_SLICE_RASL_R = 9,
H265_NAL_SLICE_BLA_W_LP = 16,
H265_NAL_SLICE_BLA_W_RADL = 17,
H265_NAL_SLICE_BLA_N_LP = 18,
H265_NAL_SLICE_IDR_W_RADL = 19,
H265_NAL_SLICE_IDR_N_LP = 20,
H265_NAL_SLICE_CRA_NUT = 21,
H265_NAL_VPS = 32,
H265_NAL_SPS = 33,
H265_NAL_PPS = 34,
H265_NAL_AUD = 35,
H265_NAL_EOS = 36,
H265_NAL_EOB = 37,
H265_NAL_FD = 38,
H265_NAL_PREFIX_SEI = 39,
H265_NAL_SUFFIX_SEI = 40
} H265NalUnitType;
typedef enum
{
H265_PARSER_OK,
H265_PARSER_BROKEN_DATA,
H265_PARSER_BROKEN_LINK,
H265_PARSER_ERROR,
H265_PARSER_NO_NAL,
H265_PARSER_NO_NAL_END
} H265ParserResult;
typedef struct _H265Parser H265Parser;
typedef struct _H265NalUnit H265NalUnit;
typedef struct _H265SPS H265SPS;
typedef struct _H265ProfileTierLevel H265ProfileTierLevel;
struct _H265NalUnit
{
guint8 type;
guint8 layer_id;
guint8 temporal_id_plus1;
/* calculated values */
guint size;
guint offset;
guint sc_offset;
gboolean valid;
guint8 *data;
guint8 header_bytes;
};
struct _H265ProfileTierLevel {
guint8 profile_space;
guint8 tier_flag;
guint8 profile_idc;
guint8 profile_compatibility_flag[32];
guint8 progressive_source_flag;
guint8 interlaced_source_flag;
guint8 non_packed_constraint_flag;
guint8 frame_only_constraint_flag;
guint8 max_12bit_constraint_flag;
guint8 max_10bit_constraint_flag;
guint8 max_8bit_constraint_flag;
guint8 max_422chroma_constraint_flag;
guint8 max_420chroma_constraint_flag;
guint8 max_monochrome_constraint_flag;
guint8 intra_constraint_flag;
guint8 one_picture_only_constraint_flag;
guint8 lower_bit_rate_constraint_flag;
guint8 max_14bit_constraint_flag;
guint8 level_idc;
guint8 sub_layer_profile_present_flag[6];
guint8 sub_layer_level_present_flag[6];
guint8 sub_layer_profile_space[6];
guint8 sub_layer_tier_flag[6];
guint8 sub_layer_profile_idc[6];
guint8 sub_layer_profile_compatibility_flag[6][32];
guint8 sub_layer_progressive_source_flag[6];
guint8 sub_layer_interlaced_source_flag[6];
guint8 sub_layer_non_packed_constraint_flag[6];
guint8 sub_layer_frame_only_constraint_flag[6];
guint8 sub_layer_level_idc[6];
};
struct _H265SPS
{
guint8 id;
guint8 max_sub_layers_minus1;
guint8 temporal_id_nesting_flag;
H265ProfileTierLevel profile_tier_level;
guint8 chroma_format_idc;
guint8 separate_colour_plane_flag;
guint16 pic_width_in_luma_samples;
guint16 pic_height_in_luma_samples;
guint8 conformance_window_flag;
/* if conformance_window_flag */
guint32 conf_win_left_offset;
guint32 conf_win_right_offset;
guint32 conf_win_top_offset;
guint32 conf_win_bottom_offset;
guint8 bit_depth_luma_minus8;
guint8 bit_depth_chroma_minus8;
guint8 log2_max_pic_order_cnt_lsb_minus4;
guint8 sub_layer_ordering_info_present_flag;
guint8 max_dec_pic_buffering_minus1[H265_MAX_SUB_LAYERS];
guint8 max_num_reorder_pics[H265_MAX_SUB_LAYERS];
guint8 max_latency_increase_plus1[H265_MAX_SUB_LAYERS];
guint8 log2_min_luma_coding_block_size_minus3;
guint8 log2_diff_max_min_luma_coding_block_size;
guint8 log2_min_transform_block_size_minus2;
guint8 log2_diff_max_min_transform_block_size;
guint8 max_transform_hierarchy_depth_inter;
guint8 max_transform_hierarchy_depth_intra;
guint8 scaling_list_enabled_flag;
/* if scaling_list_enabled_flag */
guint8 scaling_list_data_present_flag;
guint8 amp_enabled_flag;
guint8 sample_adaptive_offset_enabled_flag;
guint8 pcm_enabled_flag;
/* if pcm_enabled_flag */
guint8 pcm_sample_bit_depth_luma_minus1;
guint8 pcm_sample_bit_depth_chroma_minus1;
guint8 log2_min_pcm_luma_coding_block_size_minus3;
guint8 log2_diff_max_min_pcm_luma_coding_block_size;
guint8 pcm_loop_filter_disabled_flag;
guint8 num_short_term_ref_pic_sets;
guint8 long_term_ref_pics_present_flag;
/* if long_term_ref_pics_present_flag */
guint8 num_long_term_ref_pics_sps;
guint16 lt_ref_pic_poc_lsb_sps[32];
guint8 used_by_curr_pic_lt_sps_flag[32];
guint8 temporal_mvp_enabled_flag;
guint8 strong_intra_smoothing_enabled_flag;
guint8 vui_parameters_present_flag;
/* if vui_parameters_present_flat */
guint8 sps_extension_flag;
/* calculated values */
guint8 chroma_array_type;
gint width, height;
gint crop_rect_width, crop_rect_height;
gint crop_rect_x, crop_rect_y;
gint fps_num, fps_den;
gboolean valid;
};
struct _H265Parser
{
/*< private >*/
H265SPS sps[H265_MAX_SPS_COUNT];
H265SPS *last_sps;
};
H265Parser * h265_parser_new (void);
H265ParserResult h265_parser_identify_nalu (H265Parser * parser,
const guint8 * data,
guint offset,
gsize size,
H265NalUnit * nalu);
H265ParserResult h265_parser_identify_nalu_unchecked (H265Parser * parser,
const guint8 * data,
guint offset,
gsize size,
H265NalUnit * nalu);
H265ParserResult h265_parser_identify_nalu_hevc (H265Parser * parser,
const guint8 * data,
guint offset,
gsize size,
guint8 nal_length_size,
H265NalUnit * nalu);
H265ParserResult h265_parser_parse_sps (H265Parser * parser,
H265NalUnit * nalu,
H265SPS * sps,
gboolean parse_vui_params);
void h265_parser_free (H265Parser * parser);
H265ParserResult h265_parse_sps (H265Parser * parser,
H265NalUnit * nalu,
H265SPS * sps,
gboolean parse_vui_params);
G_END_DECLS
#endif

View File

@@ -2,7 +2,7 @@
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2object.c: base class for V4L2 elements
*
@@ -172,6 +172,8 @@ static const GstV4L2FormatDesc gst_v4l2_formats[] = {
{V4L2_PIX_FMT_P012, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_P012M, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_NV24M, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_YUV444_10LE, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_YUV444_12LE, TRUE, GST_V4L2_RAW},
#endif
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
{V4L2_PIX_FMT_SBGGR8, TRUE, GST_V4L2_CODEC},
@@ -1038,7 +1040,9 @@ gst_v4l2_object_get_format_from_fourcc (GstV4l2Object * v4l2object,
#ifdef USE_V4L2_TARGET_NV
if (fourcc == V4L2_PIX_FMT_P010M ||
fourcc == V4L2_PIX_FMT_P012M ||
fourcc == V4L2_PIX_FMT_NV24M) {
fourcc == V4L2_PIX_FMT_NV24M ||
fourcc == V4L2_PIX_FMT_YUV444_10LE ||
fourcc == V4L2_PIX_FMT_YUV444_12LE) {
fmt->pixelformat = fourcc;
return fmt;
}
@@ -1137,6 +1141,8 @@ gst_v4l2_object_format_get_rank (const struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_P012: /* Y/CbCr 4:2:0, 12 bits per channel */
case V4L2_PIX_FMT_P012M:
case V4L2_PIX_FMT_NV24M:
case V4L2_PIX_FMT_YUV444_10LE:
case V4L2_PIX_FMT_YUV444_12LE:
#endif
case V4L2_PIX_FMT_NV21: /* 12 Y/CrCb 4:2:0 */
case V4L2_PIX_FMT_NV21M: /* Same as NV21 */
@@ -1358,7 +1364,6 @@ static GstVideoFormat
gst_v4l2_object_v4l2fourcc_to_video_format (guint32 fourcc)
{
GstVideoFormat format;
switch (fourcc) {
case V4L2_PIX_FMT_GREY: /* 8 Greyscale */
format = GST_VIDEO_FORMAT_GRAY8;
@@ -1419,6 +1424,15 @@ gst_v4l2_object_v4l2fourcc_to_video_format (guint32 fourcc)
case V4L2_PIX_FMT_P012M:
format = GST_VIDEO_FORMAT_I420_12LE;
break;
case V4L2_PIX_FMT_YUV444:
format = GST_VIDEO_FORMAT_Y444;
break;
case V4L2_PIX_FMT_YUV444_10LE:
format = GST_VIDEO_FORMAT_Y444_10LE;
break;
case V4L2_PIX_FMT_YUV444_12LE:
format = GST_VIDEO_FORMAT_Y444_12LE;
break;
#endif
case V4L2_PIX_FMT_NV12MT:
format = GST_VIDEO_FORMAT_NV12_64Z32;
@@ -1613,6 +1627,9 @@ gst_v4l2_object_v4l2fourcc_to_bare_struct (guint32 fourcc)
case V4L2_PIX_FMT_NV61M:
#ifdef USE_V4L2_TARGET_NV
case V4L2_PIX_FMT_NV24M:
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV444_10LE:
case V4L2_PIX_FMT_YUV444_12LE:
#endif
case V4L2_PIX_FMT_NV24: /* 24 Y/CrCb 4:4:4 */
case V4L2_PIX_FMT_YVU410:
@@ -1677,7 +1694,6 @@ gst_v4l2_object_v4l2fourcc_to_bare_struct (guint32 fourcc)
case V4L2_PIX_FMT_Y10:
case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y10BPACK:
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_Y41P:
@@ -1848,6 +1864,9 @@ gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
if (g_str_equal (mimetype, "video/x-raw")) {
switch (GST_VIDEO_INFO_FORMAT (info)) {
case GST_VIDEO_FORMAT_Y444:
fourcc = V4L2_PIX_FMT_YUV444;
break;
case GST_VIDEO_FORMAT_I420:
fourcc = V4L2_PIX_FMT_YUV420;
fourcc_nc = V4L2_PIX_FMT_YUV420M;
@@ -1880,6 +1899,12 @@ gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
fourcc = V4L2_PIX_FMT_P012;
fourcc_nc = V4L2_PIX_FMT_P012M;
break;
case GST_VIDEO_FORMAT_Y444_10LE:
fourcc_nc = V4L2_PIX_FMT_YUV444_10LE;
break;
case GST_VIDEO_FORMAT_Y444_12LE:
fourcc_nc = V4L2_PIX_FMT_YUV444_12LE;
break;
#endif
case GST_VIDEO_FORMAT_NV12_64Z32:
fourcc_nc = V4L2_PIX_FMT_NV12MT;
@@ -3861,6 +3886,48 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_PRESET_ID;
ctl.value = videoenc->cudaenc_preset_id;
ctrls.count = 1;
ctrls.controls = &ctl ;
ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
v4l2_ctrl_video_constqp constqp;
constqp.constQpI = videoenc->constQpI;
constqp.constQpP = videoenc->constQpP;
constqp.constQpB = videoenc->constQpB;
ctrls.count = 1;
ctrls.controls = &ctl ;
ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_CONSTQP;
ctl.string = (gchar *) &constqp;
ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
v4l2_ctrl_video_init_qp init_qp;
init_qp.IInitQP = videoenc->IInitQP;
init_qp.PInitQP = videoenc->PInitQP;
init_qp.BInitQP = videoenc->BInitQP;
ctrls.count = 1;
ctrls.controls = &ctl ;
ctl.id = V4L2_CID_MPEG_VIDEOENC_INIT_FRAME_QP;
ctl.string = (gchar *) &init_qp;
ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_TUNING_INFO;
ctl.value = videoenc->cudaenc_tuning_info_id;
ctrls.count = 1;
ctrls.controls = &ctl ;
ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
}
}
#endif
@@ -4299,6 +4366,19 @@ gst_v4l2_object_acquire_format (GstV4l2Object * v4l2object, GstVideoInfo * info)
gst_video_info_set_format (info, format, width, height);
#ifdef USE_V4L2_TARGET_NV
/* Currently gst plugins base doesn't have support for P012_12LE or NV12 12 bit format.
So we can only pass GST_VIDEO_FORMAT_I420_12LE to gst_video_format_get_info() method
which returns num planes as 3 and creates an assertion in gst_v4l2_object_extrapolate_info().
Once the support for P012_12LE or NV12 12 bit format are added correctly in gst plugins base,
We no longer need this check. */
if (format == GST_VIDEO_FORMAT_I420_12LE) {
memcpy (&video_info, info->finfo, sizeof(video_info));
video_info.n_planes = 2;
info->finfo = &video_info;
}
#endif
switch (fmt.fmt.pix.field) {
case V4L2_FIELD_ANY:
case V4L2_FIELD_NONE:

View File

@@ -2,7 +2,7 @@
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2object.h: base class for V4L2 elements
*
@@ -54,10 +54,11 @@ typedef struct _GstV4l2ObjectClassHelper GstV4l2ObjectClassHelper;
#define V4L2_DEVICE_BASENAME_NVDEC "nvdec"
#define V4L2_DEVICE_BASENAME_NVENC "msenc"
#define V4L2_DEVICE_PATH_NVDEC "/dev/nvhost-nvdec"
#define V4L2_DEVICE_PATH_NVDEC_ALT "/dev/dri/card0"
#define V4L2_DEVICE_PATH_NVDEC_ALT "/dev/v4l2-nvdec"
#define V4L2_DEVICE_PATH_NVDEC_MCCOY "/dev/nvidia0"
#define V4L2_DEVICE_PATH_NVENC "/dev/nvhost-msenc"
#define V4L2_DEVICE_PATH_NVENC_ALT "/dev/v4l2-nvenc"
#define V4L2_DEVICE_PATH_TEGRA_INFO "/sys/firmware/devicetree/base/compatible"
#endif
/* max frame width/height */
@@ -137,6 +138,11 @@ typedef gboolean (*GstV4l2UpdateFpsFunction) (GstV4l2Object * v4l2object);
return FALSE; \
}
#ifdef USE_V4L2_TARGET_NV
/* Structure to hold the video info inorder to modify the contents, incase of
* GST_VIDEO_FORMAT_I420_12LE format */
GstVideoFormatInfo video_info;
#endif
struct _GstV4l2Object {
GstElement * element;

View File

@@ -1,7 +1,7 @@
/*
* Copyright (C) 2014-2022 Collabora Ltd.
* Copyright (C) 2014-2023 Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -29,9 +29,11 @@
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include "gstv4l2h26xparser.h"
#include "gstv4l2object.h"
#include "gstv4l2videodec.h"
#include "gstnvdsseimeta.h"
#include "gst-nvcustomevent.h"
#include "stdlib.h"
@@ -66,7 +68,7 @@ gboolean default_sei_extract_data;
gint default_num_extra_surfaces;
static gboolean enable_latency_measurement = FALSE;
uint8_t *parse_sei_data (uint8_t *bs, guint size, uint32_t *payload_size);
extern uint8_t *parse_sei_data (uint8_t *bs, guint size, uint32_t *payload_size, char *sei_uuid_string);
#ifdef USE_V4L2_TARGET_NV
GstVideoCodecFrame *
@@ -135,7 +137,7 @@ gst_video_dec_capture_buffer_dynamic_allocation (void)
#define GST_TYPE_V4L2_VID_CUDADEC_MEM_TYPE (gst_video_cudadec_mem_type ())
#ifndef __aarch64__
#define DEFAULT_CUDADEC_MEM_TYPE V4L2_CUDA_MEM_TYPE_UNIFIED
#define DEFAULT_CUDADEC_MEM_TYPE V4L2_CUDA_MEM_TYPE_DEVICE
#else
#define DEFAULT_CUDADEC_MEM_TYPE V4L2_CUDA_MEM_TYPE_DEVICE
#endif
@@ -246,6 +248,7 @@ enum
PROP_CUDADEC_GPU_ID,
PROP_CUDADEC_LOW_LATENCY,
PROP_EXTRACT_SEI_TYPE5_DATA,
PROP_SEI_UUID_STRING,
PROP_CAP_BUF_DYNAMIC_ALLOCATION,
#endif
};
@@ -448,6 +451,10 @@ gst_v4l2_video_dec_set_property_cuvid (GObject * object,
self->extract_sei_type5_data = g_value_get_boolean (value);
break;
case PROP_SEI_UUID_STRING:
self->sei_uuid_string = (gchar *)g_value_dup_string (value);
break;
case PROP_CAP_BUF_DYNAMIC_ALLOCATION:
self->cap_buf_dynamic_allocation = g_value_get_enum (value);
break;
@@ -570,6 +577,10 @@ gst_v4l2_video_dec_get_property_cuvid (GObject * object,
g_value_set_boolean (value, self->extract_sei_type5_data);
break;
case PROP_SEI_UUID_STRING:
g_value_set_string (value, self->sei_uuid_string);
break;
case PROP_CAP_BUF_DYNAMIC_ALLOCATION:
g_value_set_enum (value, self->cap_buf_dynamic_allocation);
break;
@@ -719,117 +730,10 @@ gst_v4l2_video_dec_stop (GstVideoDecoder * decoder)
return TRUE;
}
static gboolean
gst_v4l2_video_dec_set_format (GstVideoDecoder * decoder,
GstVideoCodecState * state)
#ifdef USE_V4L2_TARGET_NV
gboolean set_v4l2_controls (GstV4l2VideoDec *self)
{
GstV4l2Error error = GST_V4L2_ERROR_INIT;
gboolean ret = TRUE;
GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);
if (self->input_state) {
#ifndef USE_V4L2_TARGET_NV
if (gst_v4l2_object_caps_equal (self->v4l2output, state->caps)) {
GST_DEBUG_OBJECT (self, "Compatible caps");
goto done;
}
#endif
{
GstStructure *config;
GstCaps *oldcaps;
GstStructure *structure, *new_structure;
gint old_width = 0, new_width = 0;
gint old_height = 0, new_height = 0;
config = gst_buffer_pool_get_config (self->v4l2output->pool);
gst_buffer_pool_config_get_params (config, &oldcaps, NULL, NULL, NULL);
structure = gst_caps_get_structure (oldcaps, 0);
gst_structure_get_int (structure, "width", &old_width);
gst_structure_get_int (structure, "height", &old_height);
new_structure = gst_caps_get_structure (state->caps, 0);
gst_structure_get_int (new_structure, "width", &new_width);
gst_structure_get_int (new_structure, "height", &new_height);
if ((old_width && (old_width != new_width)) ||
(old_height && (old_height != new_height)))
self->is_drc = TRUE;
else
self->is_drc = FALSE;
gst_structure_free (config);
}
if (self->is_drc == FALSE)
return TRUE;
else
self->idr_received = FALSE;
gst_video_codec_state_unref (self->input_state);
self->input_state = NULL;
gst_v4l2_video_dec_finish (decoder);
gst_v4l2_object_stop (self->v4l2output);
/* The renegotiation flow don't blend with the base class flow. To
* properly stop the capture pool we need to reclaim our buffers, which
* will happend through the allocation query. The allocation query is
* triggered by gst_video_decoder_negotiate() which requires the output
* caps to be set, but we can't know this information as we rely on the
* decoder, which requires the capture queue to be stopped.
*
* To workaround this issue, we simply run an allocation query with the
* old negotiated caps in order to drain/reclaim our buffers. That breaks
* the complexity and should not have much impact in performance since the
* following allocation query will happen on a drained pipeline and won't
* block. */
{
GstCaps *caps = gst_pad_get_current_caps (decoder->srcpad);
if (caps) {
GstQuery *query = gst_query_new_allocation (caps, FALSE);
gst_pad_peer_query (decoder->srcpad, query);
gst_query_unref (query);
gst_caps_unref (caps);
}
}
gst_v4l2_object_stop (self->v4l2capture);
self->output_flow = GST_FLOW_OK;
#ifdef USE_V4L2_TARGET_NV
if (self->is_drc == TRUE)
{
g_mutex_lock (&self->v4l2capture->cplane_stopped_lock);
while (self->v4l2capture->capture_plane_stopped != TRUE)
{
g_cond_wait (&self->v4l2capture->cplane_stopped_cond,
&self->v4l2capture->cplane_stopped_lock);
}
self->v4l2capture->capture_plane_stopped = FALSE;
g_mutex_unlock (&self->v4l2capture->cplane_stopped_lock);
gst_v4l2_object_close (self->v4l2output);
gst_v4l2_object_close (self->v4l2capture);
gst_v4l2_object_open (self->v4l2output);
if (!gst_v4l2_object_open_shared (self->v4l2capture, self->v4l2output)) {
g_print ("gstv4l2object open shared failed\n");
if (GST_V4L2_IS_OPEN (self->v4l2output))
gst_v4l2_object_close (self->v4l2output);
return FALSE;
}
}
#endif
}
ret = gst_v4l2_object_set_format (self->v4l2output, state->caps, &error);
if (ret)
self->input_state = gst_video_codec_state_ref (state);
else
gst_v4l2_error (self, &error);
#ifdef USE_V4L2_TARGET_NV
GST_DEBUG_OBJECT(self, "set_v4l2_controls");
{
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT, 0)) {
@@ -906,19 +810,142 @@ gst_v4l2_video_dec_set_format (GstVideoDecoder * decoder,
}
}
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE,
self->cudadec_mem_type)) {
g_print ("S_EXT_CTRLS for CUDA_MEM_TYPE failed\n");
return FALSE;
if (is_cuvid == TRUE) {
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE,
self->cudadec_mem_type)) {
g_print ("S_EXT_CTRLS for CUDA_MEM_TYPE failed\n");
return FALSE;
}
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID,
self->cudadec_gpu_id)) {
g_print ("S_EXT_CTRLS for CUDA_GPU_ID failed\n");
return FALSE;
}
}
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID,
self->cudadec_gpu_id)) {
g_print ("S_EXT_CTRLS for CUDA_GPU_ID failed\n");
return FALSE;
return TRUE;
}
#endif
static gboolean
gst_v4l2_video_dec_set_format (GstVideoDecoder * decoder,
GstVideoCodecState * state)
{
GstV4l2Error error = GST_V4L2_ERROR_INIT;
gboolean ret = TRUE;
GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);
if (self->input_state) {
#ifndef USE_V4L2_TARGET_NV
if (gst_v4l2_object_caps_equal (self->v4l2output, state->caps)) {
GST_DEBUG_OBJECT (self, "Compatible caps");
goto done;
}
#else
if (is_cuvid == TRUE) {
GstV4l2BufferPool *v4l2pool = GST_V4L2_BUFFER_POOL(self->v4l2output->pool);
GstV4l2Object *obj = v4l2pool->obj;
if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
(GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
{
GST_INFO_OBJECT(self, "pix format is h264 or h265. skipping");
self->output_flow = GST_FLOW_OK;
self->set_format = true;
return TRUE;
}
} else if (is_cuvid == FALSE) {
{
GstStructure *config;
GstCaps *oldcaps;
GstStructure *structure;
gint width = 0;
config = gst_buffer_pool_get_config(self->v4l2output->pool);
gst_buffer_pool_config_get_params(config, &oldcaps, NULL, NULL, NULL);
structure = gst_caps_get_structure(oldcaps, 0);
if (gst_structure_get_int(structure, "width", &width))
self->is_drc = TRUE;
else
self->is_drc = FALSE;
gst_structure_free(config);
}
if (self->is_drc == FALSE)
return TRUE;
else
self->idr_received = FALSE;
}
#endif
gst_video_codec_state_unref (self->input_state);
self->input_state = NULL;
gst_v4l2_video_dec_finish (decoder);
gst_v4l2_object_stop (self->v4l2output);
/* The renegotiation flow don't blend with the base class flow. To
* properly stop the capture pool we need to reclaim our buffers, which
* will happend through the allocation query. The allocation query is
* triggered by gst_video_decoder_negotiate() which requires the output
* caps to be set, but we can't know this information as we rely on the
* decoder, which requires the capture queue to be stopped.
*
* To workaround this issue, we simply run an allocation query with the
* old negotiated caps in order to drain/reclaim our buffers. That breaks
* the complexity and should not have much impact in performance since the
* following allocation query will happen on a drained pipeline and won't
* block. */
{
GstCaps *caps = gst_pad_get_current_caps (decoder->srcpad);
if (caps) {
GstQuery *query = gst_query_new_allocation (caps, FALSE);
gst_pad_peer_query (decoder->srcpad, query);
gst_query_unref (query);
gst_caps_unref (caps);
}
}
gst_v4l2_object_stop (self->v4l2capture);
self->output_flow = GST_FLOW_OK;
#ifdef USE_V4L2_TARGET_NV
if (is_cuvid == FALSE) {
if (self->is_drc == TRUE)
{
g_mutex_lock(&self->v4l2capture->cplane_stopped_lock);
while (self->v4l2capture->capture_plane_stopped != TRUE)
{
g_cond_wait(&self->v4l2capture->cplane_stopped_cond,
&self->v4l2capture->cplane_stopped_lock);
}
self->v4l2capture->capture_plane_stopped = FALSE;
g_mutex_unlock(&self->v4l2capture->cplane_stopped_lock);
gst_v4l2_object_close(self->v4l2output);
gst_v4l2_object_close(self->v4l2capture);
gst_v4l2_object_open(self->v4l2output);
if (!gst_v4l2_object_open_shared(self->v4l2capture, self->v4l2output))
{
g_print("gstv4l2object open shared failed\n");
if (GST_V4L2_IS_OPEN(self->v4l2output))
gst_v4l2_object_close(self->v4l2output);
return FALSE;
}
}
}
#endif
}
ret = gst_v4l2_object_set_format (self->v4l2output, state->caps, &error);
if (ret)
self->input_state = gst_video_codec_state_ref (state);
else
gst_v4l2_error (self, &error);
#ifdef USE_V4L2_TARGET_NV
ret = set_v4l2_controls(self);
#endif
#ifndef USE_V4L2_TARGET_NV
@@ -1386,41 +1413,418 @@ gst_v4l2_video_remove_padding (GstCapsFeatures * features,
return TRUE;
}
static const char * const NvBufSurfaceMemType_names[] =
{
[NVBUF_MEM_DEFAULT] = "nvbuf-mem-default",
[NVBUF_MEM_CUDA_PINNED] = "nvbuf-mem-cuda-pinned",
[NVBUF_MEM_CUDA_DEVICE] = "nvbuf-mem-cuda-device",
[NVBUF_MEM_CUDA_UNIFIED] = "nvbuf-mem-cuda-unified",
[NVBUF_MEM_SURFACE_ARRAY] = "nvbuf-mem-surface-array",
[NVBUF_MEM_HANDLE] = "nvbuf-mem-handle",
[NVBUF_MEM_SYSTEM] = "nvbuf-mem-system",
};
static gboolean
gst_h265_parse_process_nal (GstV4l2VideoDec *self, H265NalUnit * nalu)
{
H265SPS sps = { 0, };
guint nal_type;
H265Parser nalparser;
H265ParserResult pres = H265_PARSER_ERROR;
/* nothing to do for broken input */
if (G_UNLIKELY (nalu->size < 2)) {
return TRUE;
}
/* we have a peek as well */
nal_type = nalu->type;
switch (nal_type) {
case H265_NAL_SPS:
/* reset state, everything else is obsolete */
pres = h265_parser_parse_sps (&nalparser, nalu, &sps, TRUE);
/* arranged for a fallback sps.id, so use that one and only warn */
if (pres != H265_PARSER_OK) {
/* try to not parse VUI */
pres = h265_parser_parse_sps (&nalparser, nalu, &sps, FALSE);
if (pres != H265_PARSER_OK) {
return FALSE;
}
}
self->current_width = sps.width;
self->current_height = sps.height;
break;
}
return TRUE;
}
static gboolean
gst_h264_parse_process_nal (GstV4l2VideoDec *self, H264NalUnit * nalu)
{
guint nal_type;
H264SPS sps = { 0, };
H264NalParser nalparser;
H264ParserResult pres;
/* nothing to do for broken input */
if (G_UNLIKELY (nalu->size < 2)) {
GST_DEBUG_OBJECT (self, "not processing nal size %u", nalu->size);
return TRUE;
}
/* we have a peek as well */
nal_type = nalu->type;
GST_DEBUG_OBJECT (self, "processing nal of type %u , size %u",
nal_type, nalu->size);
if (nal_type == H264_NAL_SPS)
{
GST_DEBUG_OBJECT (self, "GOT SPS frame\n");
}
switch (nal_type) {
case H264_NAL_SPS:
/* reset state, everything else is obsolete */
pres = h264_parser_parse_sps (&nalparser, nalu, &sps, TRUE);
/* arranged for a fallback sps.id, so use that one and only warn */
if (pres != H264_PARSER_OK) {
GST_WARNING_OBJECT (self, "failed to parse SPS:");
return FALSE;
}
self->current_width = (sps.pic_width_in_mbs_minus1 + 1) << 4;
self->current_height = (sps.pic_height_in_map_units_minus1 + 1) << 4;
break;
}
return TRUE;
}
static GstFlowReturn
gst_v4l2_h265_stream_parser (GstV4l2VideoDec *self, gpointer data, guint32 size)
{
H265NalUnit nalu;
H265ParserResult pres;
gint current_off = 0;
guint nalu_size = 0;
GstFlowReturn ret = GST_FLOW_OK;
while (TRUE)
{
pres = h265_parser_identify_nalu (NULL, data, current_off, size, &nalu);
nalu_size += nalu.size + 4;
if (nalu_size >= size)
break;
switch (pres) {
case H265_PARSER_OK:
GST_DEBUG_OBJECT (self, "complete nal (offset, size): (%u, %u) ",
nalu.offset, nalu.size);
break;
case H265_PARSER_NO_NAL_END:
GST_WARNING_OBJECT (self, "Start of the nal found, but not the end");
break;
case H265_PARSER_ERROR:
/* should not really occur either */
GST_ELEMENT_ERROR (self, STREAM, FORMAT,
("Error parsing H.265 stream"), ("Invalid H.265 stream"));
ret = GST_FLOW_ERROR;
break;
case H265_PARSER_NO_NAL:
GST_ELEMENT_ERROR (self, STREAM, FORMAT,
("Error parsing H.265 stream"), ("No H.265 NAL unit found"));
ret = GST_FLOW_ERROR;
break;
default:
ret = GST_FLOW_ERROR;
//g_assert_not_reached ();
break;
}
GST_DEBUG_OBJECT (self, "%p complete nal found. Off: %u, Size: %u",
data, nalu.offset, nalu.size);
if (!gst_h265_parse_process_nal (self, &nalu)) {
GST_WARNING_OBJECT (self,
"broken/invalid nal Type: %d, Size: %u will be dropped",
nalu.type, nalu.size);
}
current_off += nalu.size;
}
GST_DEBUG_OBJECT (self,
"sps width = %d height = %d \n", self->current_width, self->current_height);
return ret;
}
static GstFlowReturn
gst_v4l2_h264_stream_parser (GstV4l2VideoDec *self, gpointer data, guint32 size)
{
H264NalUnit nalu;
H264ParserResult pres;
gint current_off = 0;
guint nalu_size = 0;
GstFlowReturn ret = GST_FLOW_OK;
while (TRUE)
{
pres = h264_parser_identify_nalu (NULL, data, current_off, size, &nalu);
nalu_size += nalu.size + 4;
if (nalu_size >= size)
break;
switch (pres)
{
case H264_PARSER_OK:
GST_DEBUG_OBJECT(self, "complete nal (offset, size): (%u, %u) ",
nalu.offset, nalu.size);
break;
case H264_PARSER_NO_NAL_END:
GST_WARNING_OBJECT(self, "parser will assume that the end of the data is the end of the NAL unit");
break;
case H264_PARSER_ERROR:
/* should not really occur either */
GST_ELEMENT_ERROR(self, STREAM, FORMAT,
("Error parsing H.264 stream"), ("Invalid H.264 stream"));
ret = GST_FLOW_ERROR;
break;
case H264_PARSER_NO_NAL:
GST_ELEMENT_ERROR(self, STREAM, FORMAT,
("Error parsing H.264 stream"), ("No H.264 NAL unit found"));
ret = GST_FLOW_ERROR;
break;
default:
ret = GST_FLOW_ERROR;
// g_assert_not_reached ();
break;
}
GST_DEBUG_OBJECT (self, "%p complete nal found. Off: %u, Size: %u",
data, nalu.offset, nalu.size);
if (!gst_h264_parse_process_nal (self, &nalu)) {
GST_WARNING_OBJECT (self,
"broken/invalid nal Type: %d, Size: %u will be dropped",
nalu.type, nalu.size);
}
current_off += nalu.size;
}
GST_DEBUG_OBJECT (self,
"sps width = %d height = %d", self->current_width, self->current_height);
return ret;
}
static gboolean
findvpxStartCode(GstV4l2Object *obj, const uint8_t* data, int size)
{
for (int i = 0; i < size - 2; i++)
{
if (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP8)
{
if (data[i] == VP8_START_BYTE_0 &&
data[i + 1] == VP8_START_BYTE_1)
{
return TRUE;
}
}
else if (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP9)
{
if (data[i] == VP9_START_BYTE_0 &&
data[i + 1] == VP9_START_BYTE_1 &&
data[i + 2] == VP9_START_BYTE_2)
{
return TRUE;
}
}
}
return FALSE; // Start code not found
}
static GstFlowReturn
gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
GstVideoCodecFrame * frame)
GstVideoCodecFrame * frame)
{
GstV4l2Error error = GST_V4L2_ERROR_INIT;
GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
GstFlowReturn ret = GST_FLOW_OK;
gboolean processed = FALSE;
GstBuffer *tmp;
GstTaskState task_state;
GstV4l2Error error = GST_V4L2_ERROR_INIT;
GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
GstFlowReturn ret = GST_FLOW_OK;
gboolean vpx_ret = FALSE;
gboolean processed = FALSE;
GstBuffer *tmp;
GstTaskState task_state;
#ifdef USE_V4L2_TARGET_NV
GstV4l2BufferPool *v4l2pool = GST_V4L2_BUFFER_POOL (self->v4l2output->pool);
GstV4l2Object *obj = v4l2pool->obj;
gboolean trigger_drc = false;
GstV4l2BufferPool *v4l2pool = GST_V4L2_BUFFER_POOL (self->v4l2output->pool);
GstV4l2Object *obj = v4l2pool->obj;
#endif
GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);
GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);
#ifdef USE_V4L2_TARGET_NV
/* CUVID and TEGRA decoders return format when SPS/PPS is received along with
* a frame. In case of RTSP inputs we drop the DELTA units which are not
* decodable independently until we receive I / IDR frame.
*/
if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
(GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
{
if ((GST_BUFFER_FLAG_IS_SET (GST_BUFFER_CAST(frame->input_buffer),
GST_BUFFER_FLAG_DELTA_UNIT)) &&
(self->idr_received == FALSE))
{
GST_DEBUG_OBJECT (decoder, "Delta Unit Received, Dropping...");
gst_video_decoder_drop_frame (decoder, frame);
return GST_FLOW_OK;
}
self->idr_received = TRUE;
}
if (is_cuvid == TRUE) {
if (((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP8) ||
(GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP9)) && self->valid_vpx == FALSE)
{
GstMapInfo map;
if (!gst_buffer_map(frame->input_buffer, &map, GST_MAP_READ))
{
GST_ERROR_OBJECT(self, "couldnt map frame input_buffer\n");
}
vpx_ret = findvpxStartCode(obj, map.data, map.size);
if (vpx_ret == FALSE)
{
gst_buffer_unmap (frame->input_buffer, &map);
goto drop;
}
else if (vpx_ret == TRUE)
{
self->valid_vpx = TRUE;
}
gst_buffer_unmap (frame->input_buffer, &map);
}
if (((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
(GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265)) &&
!(GST_BUFFER_FLAG_IS_SET(GST_BUFFER_CAST(frame->input_buffer),
GST_BUFFER_FLAG_DELTA_UNIT)))
{
GstMapInfo map;
if (!gst_buffer_map(frame->input_buffer, &map, GST_MAP_READ))
{
GST_ERROR_OBJECT(self, "couldnt map frame input_buffer\n");
}
if (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264)
{
if (gst_v4l2_h264_stream_parser(self, map.data, map.size) != GST_FLOW_OK)
GST_ERROR_OBJECT(self, "h264 stream parsing failed");
}
else if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
{
if (gst_v4l2_h265_stream_parser(self, map.data, map.size) != GST_FLOW_OK)
GST_ERROR_OBJECT(self, "h265 stream parsing failed");
}
gst_buffer_unmap (frame->input_buffer, &map);
if (self->old_width || self->old_height)
{
if ((self->old_width != self->current_width) ||
(self->old_height != self->current_height))
{
GST_INFO_OBJECT(self, "Decoder found new resolution. triggering DRC seq.");
GST_INFO_OBJECT(self, "Old Resolution: [%d x %d], New Resolution: [%d x %d]",
self->old_width, self->old_height, self->current_width, self->current_height);
trigger_drc = true;
}
}
self->old_width = self->current_width;
self->old_height = self->current_height;
if (trigger_drc == true)
{
GstStructure *structure = NULL;
GstCaps *dec_sink_caps = gst_pad_get_current_caps(decoder->sinkpad);
if (dec_sink_caps != NULL)
{
dec_sink_caps = gst_caps_make_writable(dec_sink_caps);
structure = gst_caps_get_structure(dec_sink_caps, 0);
gst_structure_set(structure, "width", G_TYPE_INT, self->current_width,
"height", G_TYPE_INT, self->current_height,
NULL);
/* Replace coded size with visible size, we want to negotiate visible size
* with downstream, not coded size. */
gst_caps_map_in_place(dec_sink_caps, gst_v4l2_video_remove_padding, self);
GST_DEBUG_OBJECT(self, "dec_sink_caps: %s", gst_caps_to_string(dec_sink_caps));
}
self->idr_received = FALSE;
gst_v4l2_video_dec_finish(decoder);
gst_v4l2_object_stop(self->v4l2output);
{
GstCaps *caps = gst_pad_get_current_caps(decoder->srcpad);
if (caps)
{
GstQuery *query = gst_query_new_allocation(caps, FALSE);
gst_pad_peer_query(decoder->srcpad, query);
gst_query_unref(query);
gst_caps_unref(caps);
}
}
gst_v4l2_object_stop(self->v4l2capture);
self->output_flow = GST_FLOW_OK;
g_mutex_lock(&self->v4l2capture->cplane_stopped_lock);
while (self->v4l2capture->capture_plane_stopped != TRUE)
{
g_cond_wait(&self->v4l2capture->cplane_stopped_cond,
&self->v4l2capture->cplane_stopped_lock);
}
self->v4l2capture->capture_plane_stopped = FALSE;
g_mutex_unlock(&self->v4l2capture->cplane_stopped_lock);
gst_v4l2_object_close(self->v4l2output);
gst_v4l2_object_close(self->v4l2capture);
if (!gst_v4l2_object_open(self->v4l2output))
GST_ERROR_OBJECT(self, "gst_v4l2_object_open (self->v4l2output) failed\n");
if (!gst_v4l2_object_open_shared(self->v4l2capture, self->v4l2output))
{
GST_ERROR_OBJECT(self, "gstv4l2object open shared failed\n");
if (GST_V4L2_IS_OPEN(self->v4l2output))
gst_v4l2_object_close(self->v4l2output);
return GST_FLOW_ERROR;
}
if (dec_sink_caps != NULL)
{
ret = gst_v4l2_object_set_format(self->v4l2output, dec_sink_caps, &error);
gst_caps_unref(dec_sink_caps);
} else
{
GST_ERROR_OBJECT(self, "Decoder sink caps == NULL");
}
if (ret)
self->input_state = gst_video_codec_state_ref(self->input_state);
else
gst_v4l2_error(self, &error);
set_v4l2_controls(self);
GST_INFO_OBJECT(self, "Reset Done");
}
else if (self->set_format == true)
{
set_v4l2_controls(self);
}
}
}
/* CUVID and TEGRA decoders return format when SPS/PPS is received along with
* a frame. In case of RTSP inputs we drop the DELTA units which are not
* decodable independently until we receive I / IDR frame.
*/
if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
(GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
{
if ((GST_BUFFER_FLAG_IS_SET (GST_BUFFER_CAST(frame->input_buffer),
GST_BUFFER_FLAG_DELTA_UNIT)) &&
(self->idr_received == FALSE))
{
GST_DEBUG_OBJECT (decoder, "Delta Unit Received, Dropping...");
gst_video_decoder_drop_frame (decoder, frame);
return GST_FLOW_OK;
}
self->idr_received = TRUE;
}
#endif
if (is_cuvid == TRUE) {
@@ -1653,6 +2057,29 @@ gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
output_state->caps = gst_video_info_to_caps (&output_state->info);
GstCapsFeatures *features = gst_caps_features_new ("memory:NVMM", NULL);
gst_caps_set_features (output_state->caps, 0, features);
{
// convert "cudadec_mem_type" to NvBufSurfaceMemType
int buf_surface_mem_type = 0;
switch(self->cudadec_mem_type)
{
case 0:
{
if(is_cuvid == TRUE)
buf_surface_mem_type = NVBUF_MEM_CUDA_DEVICE;
else
buf_surface_mem_type = NVBUF_MEM_SURFACE_ARRAY;
break;
}
case 1:
buf_surface_mem_type = NVBUF_MEM_CUDA_PINNED;
break;
case 2:
buf_surface_mem_type = NVBUF_MEM_CUDA_UNIFIED;
break;
}
gst_caps_set_simple(output_state->caps, "nvbuf-memory-type", G_TYPE_STRING , NvBufSurfaceMemType_names[buf_surface_mem_type], NULL);
gst_caps_set_simple(output_state->caps, "gpu-id", G_TYPE_INT , self->cudadec_gpu_id, NULL);
}
#endif
/* Copy the rest of the information, there might be more in the future */
output_state->info.interlace_mode = info.interlace_mode;
@@ -1688,6 +2115,37 @@ gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
}
#endif
}
#ifdef USE_V4L2_TARGET_NV
if (is_cuvid == TRUE)
{
if ((trigger_drc == false) && (self->set_format == true))
{
GstStructure *sink_pad_st = NULL, *src_pad_st = NULL;
const GValue *framerate = NULL;
GstCaps *dec_sink_caps = gst_pad_get_current_caps(decoder->sinkpad);
GstCaps *dec_src_caps = gst_pad_get_current_caps(decoder->srcpad);
if (G_UNLIKELY (dec_sink_caps != NULL) && G_UNLIKELY (dec_src_caps != NULL))
{
GST_DEBUG_OBJECT(self, "dec_sink_caps: %s", gst_caps_to_string(dec_sink_caps));
dec_src_caps = gst_caps_make_writable(dec_src_caps);
sink_pad_st = gst_caps_get_structure(dec_sink_caps, 0);
src_pad_st = gst_caps_get_structure(dec_src_caps, 0);
framerate = gst_structure_get_value(sink_pad_st, "framerate");
if (framerate)
gst_structure_set_value(src_pad_st, "framerate", framerate);
GST_DEBUG_OBJECT(self, "dec_src_caps: %s", gst_caps_to_string(dec_src_caps));
gst_pad_set_caps(decoder->srcpad, dec_src_caps);
gst_caps_unref(dec_sink_caps);
gst_caps_unref(dec_src_caps);
}
}
trigger_drc = false;
self->set_format = false;
}
#endif
task_state = gst_pad_get_task_state (GST_VIDEO_DECODER_SRC_PAD (self));
if (task_state == GST_TASK_STOPPED || task_state == GST_TASK_PAUSED) {
@@ -1760,7 +2218,7 @@ gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
uint32_t payload_size = 0;
uint8_t *stream_data = (uint8_t *)map.data;
sei_type5_payload = parse_sei_data (stream_data, map.size,
&payload_size);
&payload_size, self->sei_uuid_string);
if (sei_type5_payload != NULL)
{
GST_DEBUG_OBJECT (self, "sei_type5_payload found\n");
@@ -1943,6 +2401,33 @@ gst_v4l2_video_dec_sink_event (GstVideoDecoder * decoder, GstEvent * event)
break;
}
#ifdef USE_V4L2_TARGET_NV
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_DEC_DROP_FRAME_INTERVAL_UPDATE) {
gchar* stream_id = NULL;
gst_nvevent_parse_dec_drop_frame_interval_update (event, &stream_id, &self->drop_frame_interval);
}
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_DEC_SKIP_FRAME_UPDATE) {
gchar* stream_id = NULL;
gst_nvevent_parse_dec_skip_frame_update (event, &stream_id, &self->skip_frames);
/* Handle skip-frame event */
if (self->skip_frames != V4L2_SKIP_FRAMES_TYPE_NONE) {
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_SKIP_FRAMES,
self->skip_frames)) {
g_print ("S_EXT_CTRLS for SKIP_FRAMES failed\n");
return FALSE;
}
}
}
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_DEC_ENABLE_LOW_LATENCY_MODE) {
gchar* stream_id = NULL;
gst_nvevent_parse_dec_enable_low_latency_mode (event, &stream_id, &self->cudadec_low_latency);
/* TODO: Handle enable of low_latency mode */
}
#endif
return ret;
}
@@ -2059,6 +2544,7 @@ gst_v4l2_video_dec_init (GstV4l2VideoDec * self)
self->drop_frame_interval = 0;
self->decoded_picture_cnt = 0;
self->num_extra_surfaces = default_num_extra_surfaces;
self->valid_vpx = FALSE;
self->disable_dpb = DEFAULT_DISABLE_DPB;
self->enable_full_frame = DEFAULT_FULL_FRAME;
@@ -2095,6 +2581,9 @@ gst_v4l2_video_dec_subinstance_init (GTypeInstance * instance, gpointer g_class)
gst_v4l2_get_output, gst_v4l2_set_output, NULL);
self->v4l2output->no_initial_format = TRUE;
self->v4l2output->keep_aspect = FALSE;
#ifdef USE_V4L2_TARGET_NV
self->v4l2output->open_mjpeg_block = TRUE;
#endif
self->v4l2capture = gst_v4l2_object_new (GST_ELEMENT (self),
GST_OBJECT (GST_VIDEO_DECODER_SRC_PAD (self)),
@@ -2245,6 +2734,12 @@ gst_v4l2_video_dec_class_init (GstV4l2VideoDecClass * klass)
default_sei_extract_data,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_SEI_UUID_STRING,
g_param_spec_string ("sei-uuid", "SEI UUID String",
"Set 16 bytes UUID string for SEI Parsing, extract-sei-type5-data should be TRUE",
NULL,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
}
#endif

View File

@@ -50,6 +50,13 @@ G_BEGIN_DECLS
#define GstV4l2VideoDecClass GstNvV4l2VideoDecClass
#define LOOP_COUNT_TO_WAIT_FOR_DQEVENT 6
#define WAIT_TIME_PER_LOOP_FOR_DQEVENT 100*1000
#define VP8_START_BYTE_0 0x9D
#define VP8_START_BYTE_1 0x01
#define VP9_START_BYTE_0 0x49
#define VP9_START_BYTE_1 0x83
#define VP9_START_BYTE_2 0x42
#endif
typedef struct _GstV4l2VideoDec GstV4l2VideoDec;
@@ -69,6 +76,7 @@ struct _GstV4l2VideoDec
/* State */
GstVideoCodecState *input_state;
gboolean active;
GstFlowReturn output_flow;
guint64 frame_num;
@@ -86,13 +94,20 @@ struct _GstV4l2VideoDec
gboolean enable_frame_type_reporting;
gboolean enable_error_check;
gboolean enable_max_performance;
gboolean set_format;
guint32 cudadec_mem_type;
guint32 cudadec_gpu_id;
guint32 cudadec_num_surfaces;
gboolean cudadec_low_latency;
gboolean extract_sei_type5_data;
gchar *sei_uuid_string;
gdouble rate;
guint32 cap_buf_dynamic_allocation;
guint32 current_width;
guint32 current_height;
guint32 old_width;
guint32 old_height;
gboolean valid_vpx;
#endif
};
@@ -106,6 +121,9 @@ struct _GstV4l2VideoDecClass
GType gst_v4l2_video_dec_get_type (void);
gboolean gst_v4l2_is_video_dec (GstCaps * sink_caps, GstCaps * src_caps);
#ifdef USE_V4L2_TARGET_NV
gboolean set_v4l2_controls (GstV4l2VideoDec *self);
#endif
void gst_v4l2_video_dec_register (GstPlugin * plugin,
const gchar * basename,
const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);

View File

@@ -3,7 +3,7 @@
* Authors Ayaka <ayaka@soulik.info>
* Copyright (C) 2017 Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -38,19 +38,21 @@
#include "gstv4l2object.h"
#include "gstv4l2videoenc.h"
#include "gstnvdsseimeta.h"
#include "gst-nvcustomevent.h"
#include <string.h>
#include <gst/gst-i18n-plugin.h>
GST_DEBUG_CATEGORY_STATIC (gst_v4l2_video_enc_debug);
#define GST_CAT_DEFAULT gst_v4l2_video_enc_debug
static gboolean enable_latency_measurement = FALSE;
#ifdef USE_V4L2_TARGET_NV
#define OUTPUT_CAPS \
"video/x-raw(memory:NVMM), " \
"width = (gint) [ 1, MAX ], " \
"height = (gint) [ 1, MAX ], " \
"format = (string) { I420, NV12, P010_10LE, NV24}, " \
"format = (string) { I420, NV12, P010_10LE, Y444, Y444_10LE, NV24}, " \
"framerate = (fraction) [ 0, MAX ];"
static GstStaticCaps sink_template_caps =
@@ -70,13 +72,10 @@ typedef struct
GstCaps *src_caps;
} GstV4l2VideoEncCData;
#ifdef USE_V4L2_TARGET_NV
#define DEFAULT_CUDAENC_GPU_ID 0
#ifdef USE_V4L2_TARGET_NV
GstVideoCodecFrame *
gst_v4l2_video_enc_find_nearest_frame (GstV4l2VideoEnc *self,
GstBuffer * buf, GList * frames);
#endif
gboolean set_v4l2_video_encoder_properties (GstVideoEncoder * encoder);
gboolean setQpRange (GstV4l2Object * v4l2object, guint label, guint MinQpI,
guint MaxQpI, guint MinQpP, guint MaxQpP, guint MinQpB, guint MaxQpB);
@@ -86,10 +85,15 @@ gint gst_v4l2_trace_file_open (FILE ** file);
void gst_v4l2_trace_file_close (FILE * file);
void gst_v4l2_trace_printf (FILE * file, const gchar *fmt, ...);
static gboolean
gst_v4l2_video_enc_parse_constqp (GstV4l2VideoEnc * self, const gchar * arr);
static gboolean
gst_v4l2_video_enc_parse_initqp (GstV4l2VideoEnc * self, const gchar * arr);
static gboolean
gst_v4l2_video_enc_parse_quantization_range (GstV4l2VideoEnc * self,
const gchar * arr);
static GType gst_v4l2_videnc_hw_preset_level_get_type (void);
static GType gst_v4l2_videnc_tuning_info_get_type (void);
static void gst_v4l2_video_encoder_forceIDR (GstV4l2VideoEnc * self);
static GType gst_v4l2_videnc_ratecontrol_get_type (void);
@@ -115,6 +119,10 @@ enum
PROP_INTRA_FRAME_INTERVAL,
/* Properties exposed on dGPU only */
PROP_CUDAENC_GPU_ID,
PROP_CUDAENC_PRESET_ID,
PROP_CUDAENC_CONSTQP,
PROP_CUDAENC_INITQP,
PROP_CUDAENC_TUNING_INFO_ID,
/* Properties exposed on Tegra only */
PROP_PEAK_BITRATE,
PROP_QUANT_I_FRAMES,
@@ -128,6 +136,7 @@ enum
PROP_MAX_PERF,
PROP_IDR_FRAME_INTERVAL,
PROP_FORCE_INTRA,
PROP_COPY_METADATA,
PROP_FORCE_IDR
#endif
};
@@ -138,13 +147,18 @@ enum
#define GST_V4L2_VIDEO_ENC_PEAK_BITRATE_DEFAULT (0)
#define DEFAULT_RATE_CONTROL V4L2_MPEG_VIDEO_BITRATE_MODE_CBR
#define DEFAULT_INTRA_FRAME_INTERVAL 30
#define DEFAULT_CUDAENC_GPU_ID 0
#define DEFAULT_CUDAENC_PRESET_ID 1
#define DEFAULT_CUDAENC_TUNING_INFO_ID 3
#define DEFAULT_IDR_FRAME_INTERVAL 256
#define GST_V4L2_VIDEO_ENC_QUANT_I_FRAMES_DEFAULT (0xffffffff)
#define GST_V4L2_VIDEO_ENC_QUANT_P_FRAMES_DEFAULT (0xffffffff)
#define GST_V4L2_VIDEO_ENC_QUANT_B_FRAMES_DEFAULT (0xffffffff)
#define DEFAULT_HW_PRESET_LEVEL V4L2_ENC_HW_PRESET_ULTRAFAST
#define DEFAULT_TUNING_INFO_PRESET V4L2_ENC_TUNING_INFO_LOW_LATENCY
#define GST_TYPE_V4L2_VID_ENC_HW_PRESET_LEVEL (gst_v4l2_videnc_hw_preset_level_get_type ())
#define GST_TYPE_V4L2_VID_ENC_TUNING_INFO_PRESET (gst_v4l2_videnc_tuning_info_get_type ())
#define GST_TYPE_V4L2_VID_ENC_RATECONTROL (gst_v4l2_videnc_ratecontrol_get_type())
#define DEFAULT_VBV_SIZE 4000000
#endif
@@ -153,6 +167,16 @@ enum
G_DEFINE_ABSTRACT_TYPE (GstV4l2VideoEnc, gst_v4l2_video_enc,
GST_TYPE_VIDEO_ENCODER);
static gdouble get_current_system_timestamp(void)
{
struct timeval t1;
double elapsedTime = 0;
gettimeofday(&t1, NULL);
elapsedTime = (t1.tv_sec) * 1000.0;
elapsedTime += (t1.tv_usec) / 1000.0;
return elapsedTime;
}
#ifdef USE_V4L2_TARGET_NV
GType
gst_v4l2_enc_output_io_mode_get_type (void)
@@ -315,10 +339,38 @@ gst_v4l2_video_enc_set_property_cuvid (GObject * object,
self->iframeinterval = g_value_get_uint (value);
break;
case PROP_QUANT_RANGE:
gst_v4l2_video_enc_parse_quantization_range (self,
g_value_get_string (value));
self->set_qpRange = TRUE;
break;
case PROP_CUDAENC_GPU_ID:
self->cudaenc_gpu_id = g_value_get_uint (value);
break;
case PROP_CUDAENC_PRESET_ID:
self->cudaenc_preset_id = g_value_get_uint (value);
break;
case PROP_CUDAENC_CONSTQP:
gst_v4l2_video_enc_parse_constqp (self,
g_value_get_string (value));
break;
case PROP_CUDAENC_INITQP:
gst_v4l2_video_enc_parse_initqp (self,
g_value_get_string (value));
break;
case PROP_CUDAENC_TUNING_INFO_ID:
self->cudaenc_tuning_info_id = g_value_get_enum (value);
break;
case PROP_IDR_FRAME_INTERVAL:
self->idrinterval = g_value_get_uint (value);
break;
case PROP_FORCE_IDR:
self->force_idr = g_value_get_boolean (value);
break;
@@ -326,6 +378,10 @@ gst_v4l2_video_enc_set_property_cuvid (GObject * object,
case PROP_FORCE_INTRA:
self->force_intra = g_value_get_boolean (value);
break;
case PROP_COPY_METADATA:
self->copy_meta = g_value_get_boolean (value);
break;
#endif
/* By default, only set on output */
@@ -451,6 +507,28 @@ gst_v4l2_video_enc_get_property_cuvid (GObject * object,
g_value_set_uint(value, self->cudaenc_gpu_id);
break;
case PROP_CUDAENC_PRESET_ID:
g_value_set_uint(value, self->cudaenc_preset_id);
break;
case PROP_QUANT_RANGE:
// gst_v4l2_video_enc_get_quantization_range (self, value);
break;
case PROP_CUDAENC_CONSTQP:
break;
case PROP_CUDAENC_INITQP:
break;
case PROP_CUDAENC_TUNING_INFO_ID:
g_value_set_enum(value, self->cudaenc_tuning_info_id);
break;
case PROP_IDR_FRAME_INTERVAL:
g_value_set_uint (value, self->idrinterval);
break;
case PROP_FORCE_IDR:
g_value_set_boolean (value, self->force_idr);
break;
@@ -458,6 +536,10 @@ gst_v4l2_video_enc_get_property_cuvid (GObject * object,
case PROP_FORCE_INTRA:
g_value_set_boolean (value, self->force_intra);
break;
case PROP_COPY_METADATA:
g_value_set_boolean (value, self->copy_meta);
break;
#endif
/* By default read from output */
@@ -596,6 +678,8 @@ gst_v4l2_video_enc_start (GstVideoEncoder * encoder)
g_atomic_int_set (&self->active, TRUE);
self->output_flow = GST_FLOW_OK;
self->hash_pts_systemtime = g_hash_table_new(NULL, NULL);
return TRUE;
}
@@ -623,6 +707,8 @@ gst_v4l2_video_enc_stop (GstVideoEncoder * encoder)
gst_v4l2_object_stop (self->v4l2output);
gst_v4l2_object_stop (self->v4l2capture);
g_hash_table_destroy (self->hash_pts_systemtime);
if (self->input_state) {
gst_video_codec_state_unref (self->input_state);
self->input_state = NULL;
@@ -714,6 +800,91 @@ done:
return ret;
}
#ifdef USE_V4L2_TARGET_NV
gboolean is_drc (GstVideoEncoder *encoder, GstCaps *input_caps)
{
int curr_width, curr_height, new_width, new_height;
GstStructure *sink_caps_st, *input_caps_st;
GstCaps *sink_caps = gst_caps_make_writable(gst_pad_get_current_caps(encoder->sinkpad));
sink_caps_st = gst_caps_get_structure(sink_caps, 0);
input_caps_st = gst_caps_get_structure(input_caps, 0);
gst_structure_get_int(sink_caps_st, "width", &curr_width);
gst_structure_get_int(sink_caps_st, "height", &curr_height);
gst_structure_get_int(input_caps_st, "width", &new_width);
gst_structure_get_int(input_caps_st, "height", &new_height);
GST_INFO_OBJECT(encoder, "curr resolution: [%dx%d], new resolution: [%dx%d]", curr_width, curr_height, new_width, new_height);
if ((curr_width != new_width) || (curr_height != new_height))
return TRUE;
gst_caps_unref(sink_caps);
return FALSE;
}
void set_encoder_src_caps (GstVideoEncoder *encoder, GstCaps *input_caps)
{
GstStructure *src_caps_st, *input_caps_st;
const GValue *framerate = NULL;
GstCaps *src_caps = gst_caps_make_writable(gst_pad_get_current_caps(encoder->srcpad));
src_caps_st = gst_caps_get_structure(src_caps, 0);
input_caps_st = gst_caps_get_structure(input_caps, 0);
framerate = gst_structure_get_value(input_caps_st, "framerate");
if (framerate)
gst_structure_set_value(src_caps_st, "framerate", framerate);
GST_DEBUG_OBJECT(encoder, "enc_src_caps: %s", gst_caps_to_string(src_caps));
gst_pad_set_caps(encoder->srcpad, src_caps);
gst_caps_unref(src_caps);
}
gboolean
reconfigure_fps (GstVideoEncoder *encoder, GstCaps *input_caps, guint label)
{
GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
GstV4l2Object *v4l2object = self->v4l2output;
struct v4l2_ext_control control;
struct v4l2_ext_controls ctrls;
v4l2_ctrl_video_framerate enc_config;
gint curr_fps_n = 0, curr_fps_d = 0;
gint new_fps_n = 0, new_fps_d = 0;
gint ret = 0;
/*Check if current fps is same as in newly received caps */
GstStructure *sink_pad_st, *input_caps_st;
GstCaps *sink_caps = gst_pad_get_current_caps(encoder->sinkpad);
sink_pad_st = gst_caps_get_structure(sink_caps, 0);
input_caps_st = gst_caps_get_structure(input_caps, 0);
gst_structure_get_fraction (sink_pad_st, "framerate", &curr_fps_n, &curr_fps_d);
gst_structure_get_fraction (input_caps_st, "framerate", &new_fps_n, &new_fps_d);
GST_INFO_OBJECT(encoder, "old framerate:[%d/%d], new framerate:[%d/%d]", curr_fps_n, curr_fps_d, new_fps_n, new_fps_d);
if ((curr_fps_n != new_fps_n) || (curr_fps_d != new_fps_d)) {
enc_config.fps_n = new_fps_n;
enc_config.fps_d = new_fps_d;
} else {
GST_DEBUG_OBJECT(encoder, "No change in framerate");
return TRUE;
}
memset (&control, 0, sizeof (control));
memset (&ctrls, 0, sizeof (ctrls));
ctrls.count = 1;
ctrls.controls = &control;
ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
control.id = label;
control.string = (gchar *) &enc_config;
ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret < 0) {
GST_WARNING_OBJECT (encoder, "Error in reconfiguring fps\n");
return FALSE;
}
return TRUE;
}
#endif
static gboolean
gst_v4l2_video_enc_set_format (GstVideoEncoder * encoder,
GstVideoCodecState * state)
@@ -731,10 +902,26 @@ gst_v4l2_video_enc_set_format (GstVideoEncoder * encoder,
GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);
if (self->input_state) {
if (gst_v4l2_object_caps_equal (self->v4l2output, state->caps)) {
GST_DEBUG_OBJECT (self, "Compatible caps");
return TRUE;
if (is_cuvid == FALSE) {
if (gst_v4l2_object_caps_equal(self->v4l2output, state->caps)) {
GST_DEBUG_OBJECT(self, "Compatible caps");
return TRUE;
}
}
#ifdef USE_V4L2_TARGET_NV
if (is_cuvid == TRUE) {
if (is_drc (encoder, state->caps)) {
/*TODO: Reset encoder to allocate new buffer size at encoder output plane*/
} else {
GST_DEBUG_OBJECT (self, "Not DRC. Reconfigure encoder with new fps if required");
if (!reconfigure_fps(encoder, state->caps, V4L2_CID_MPEG_VIDEOENC_RECONFIG_FPS))
GST_WARNING_OBJECT(self, "S_EXT_CTRLS for RECONFIG_FPS failed\n");
/* set encoder src caps */
set_encoder_src_caps(encoder, state->caps);
return TRUE;
}
}
#endif
if (gst_v4l2_video_enc_finish (encoder) != GST_FLOW_OK)
return FALSE;
@@ -1213,7 +1400,47 @@ gst_v4l2_video_enc_loop (GstVideoEncoder * encoder)
frame->output_buffer = buffer;
buffer = NULL;
if(enable_latency_measurement) /* TODO with better option */
{
gpointer in_time = g_hash_table_lookup (self->hash_pts_systemtime,
&frame->pts);
gdouble input_time = *((gdouble*)in_time);
gdouble output_time = get_current_system_timestamp ();
if (output_time < input_time)
{
gdouble time = G_MAXDOUBLE - input_time;
g_print ("Encode Latency = %f \n", output_time + time);
}
else
{
g_print ("Encode Latency = %f \n", (output_time - input_time));
}
GstCaps *reference = gst_caps_new_simple ("video/x-raw",
"component_name", G_TYPE_STRING, GST_ELEMENT_NAME(self),
/*"frame_num", G_TYPE_INT, self->frame_num++,*/
"in_timestamp", G_TYPE_DOUBLE, input_time,
"out_timestamp", G_TYPE_DOUBLE, output_time,
NULL);
GstReferenceTimestampMeta * enc_meta =
gst_buffer_add_reference_timestamp_meta (frame->output_buffer, reference,
0, 0);
if(enc_meta == NULL)
{
GST_DEBUG_OBJECT (encoder, "enc_meta: %p", enc_meta);
}
gst_caps_unref(reference);
}
#ifdef USE_V4L2_TARGET_NV
if (self->copy_meta == TRUE)
{
if (!gst_buffer_copy_into (frame->output_buffer, frame->input_buffer,
(GstBufferCopyFlags)GST_BUFFER_COPY_METADATA, 0, -1)) {
GST_DEBUG_OBJECT (encoder, "Buffer metadata copy failed \n");
}
}
if (self->tracing_file_enc) {
gettimeofday (&ts, NULL);
done_time = ((gint64) ts.tv_sec * 1000000 + ts.tv_usec) / 1000;
@@ -1285,6 +1512,12 @@ gst_v4l2_video_enc_handle_frame (GstVideoEncoder * encoder,
}
#endif
if (enable_latency_measurement)
{
self->buffer_in_time = get_current_system_timestamp ();
g_hash_table_insert (self->hash_pts_systemtime, &frame->pts, &self->buffer_in_time);
}
if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
goto flushing;
@@ -1345,6 +1578,8 @@ gst_v4l2_video_enc_handle_frame (GstVideoEncoder * encoder,
{
uint32_t total_metadata_size = meta->sei_metadata_size;
GST_DEBUG_OBJECT (self, "total metadata size = %d\n", total_metadata_size);
self->v4l2output->sei_payload_size = 0;
self->v4l2output->sei_payload = NULL;
if (meta->sei_metadata_type == (guint)GST_USER_SEI_META)
{
self->v4l2output->sei_payload_size = meta->sei_metadata_size;
@@ -1602,6 +1837,39 @@ gst_v4l2_video_enc_sink_event (GstVideoEncoder * encoder, GstEvent * event)
break;
}
#ifdef USE_V4L2_TARGET_NV
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_BITRATE_UPDATE) {
gchar* stream_id = NULL;
gst_nvevent_parse_enc_bitrate_update (event, &stream_id, &self->bitrate);
if (GST_V4L2_IS_OPEN (self->v4l2output)) {
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_BITRATE, self->bitrate)) {
g_print ("S_EXT_CTRLS for BITRATE failed\n");
}
}
}
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_FORCE_IDR) {
gchar* stream_id = NULL;
gst_nvevent_parse_enc_force_idr (event, &stream_id, &self->force_idr);
}
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_FORCE_INTRA) {
gchar* stream_id = NULL;
gst_nvevent_parse_enc_force_intra (event, &stream_id, &self->force_intra);
}
if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_IFRAME_INTERVAL_UPDATE) {
gchar* stream_id = NULL;
gst_nvevent_parse_enc_iframeinterval_update (event, &stream_id, &self->iframeinterval);
if (!set_v4l2_video_mpeg_class (self->v4l2output,
V4L2_CID_MPEG_VIDEO_GOP_SIZE, self->iframeinterval)) {
g_print ("S_EXT_CTRLS for GOP_SIZE failed\n");
return FALSE;
}
}
#endif
return ret;
}
@@ -1669,6 +1937,8 @@ gst_v4l2_video_enc_init (GstV4l2VideoEnc * self)
self->MinQpB = (guint) - 1;
self->MaxQpB = (guint) - 1;
self->set_qpRange = FALSE;
self->force_idr = FALSE;
self->force_intra = FALSE;
self->hw_preset_level = DEFAULT_HW_PRESET_LEVEL;
self->virtual_buffer_size = DEFAULT_VBV_SIZE;
self->ratecontrol_enable = TRUE;
@@ -1678,7 +1948,17 @@ gst_v4l2_video_enc_init (GstV4l2VideoEnc * self)
self->best_prev = NULL;
self->buf_pts_prev = GST_CLOCK_STIME_NONE;
if (is_cuvid == TRUE)
{
self->cudaenc_gpu_id = DEFAULT_CUDAENC_GPU_ID;
self->cudaenc_preset_id = DEFAULT_CUDAENC_PRESET_ID;
self->cudaenc_tuning_info_id = DEFAULT_TUNING_INFO_PRESET;
}
const gchar * latency = g_getenv("NVDS_ENABLE_LATENCY_MEASUREMENT");
if(latency)
{
enable_latency_measurement = TRUE;
}
#endif
}
@@ -1755,6 +2035,21 @@ gst_v4l2_video_enc_class_init (GstV4l2VideoEncClass * klass)
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_QUANT_RANGE,
g_param_spec_string ("qp-range", "qpp-range",
"Qunatization range for P, I and B frame,\n"
"\t\t\t Use string with unsigned integer values of Qunatization Range \n"
"\t\t\t in MinQpP,MaxQpP:MinQpI,MaxQpI:MinQpB,MaxQpB order, to set the property.",
"-1,-1:-1,-1:-1,-1",
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_IDR_FRAME_INTERVAL,
g_param_spec_uint ("idrinterval", "IDR Frame interval",
"Encoding IDR Frame occurance frequency",
0, G_MAXUINT, DEFAULT_IDR_FRAME_INTERVAL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
if (is_cuvid == TRUE) {
g_object_class_install_property (gobject_class, PROP_CUDAENC_GPU_ID,
g_param_spec_uint ("gpu-id",
@@ -1764,6 +2059,31 @@ gst_v4l2_video_enc_class_init (GstV4l2VideoEncClass * klass)
G_MAXUINT, DEFAULT_CUDAENC_GPU_ID,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_CUDAENC_PRESET_ID,
g_param_spec_uint ("preset-id",
"CUVID Encoder Preset ID",
"For each tuning info, seven presets from P1 (highest performance) to P7 (lowest performance) \n"
"\t\t\thave been provided to control performance/quality trade off. Using these presets will\n"
"\t\t\tautomatically set all relevant encoding parameters for the selected tuning info ",
1,
7, DEFAULT_CUDAENC_PRESET_ID,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_CUDAENC_CONSTQP,
g_param_spec_string ("constqp", "CUVID Encoder constqp values",
"Set unsigned integer values for constqp, control-rate should be set to GST_V4L2_VIDENC_CONSTANT_QP,\n"
"\t\t\tUse string with values of constQP in constQpI:constQpP:constQpB order, to set the property.",
"0:0:0",
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_CUDAENC_INITQP,
g_param_spec_string ("initqp", "CUVID Encoder initqp values",
"Set unsigned integer values for initqp,\n"
"\t\t\tUse string with values of initQP in IInitQP:PInitQP:BInitQP order, to set the property.\n"
"\t\t\tThis provides rough hint to encoder to influence the qp difference between I, P and B",
"0:0:0",
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_FORCE_IDR,
g_param_spec_boolean ("force-idr",
"Force an IDR frame",
@@ -1771,6 +2091,14 @@ gst_v4l2_video_enc_class_init (GstV4l2VideoEncClass * klass)
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_CUDAENC_TUNING_INFO_ID,
g_param_spec_enum ("tuning-info-id", "TuningInfoforHWEncoder",
"Tuning Info Preset for encoder",
GST_TYPE_V4L2_VID_ENC_TUNING_INFO_PRESET,
DEFAULT_TUNING_INFO_PRESET,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_FORCE_INTRA,
g_param_spec_boolean ("force-intra",
"Force an INTRA frame",
@@ -1778,14 +2106,14 @@ gst_v4l2_video_enc_class_init (GstV4l2VideoEncClass * klass)
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
} else if (is_cuvid == FALSE) {
g_object_class_install_property (gobject_class, PROP_IDR_FRAME_INTERVAL,
g_param_spec_uint ("idrinterval", "IDR Frame interval",
"Encoding IDR Frame occurance frequency",
0, G_MAXUINT, DEFAULT_IDR_FRAME_INTERVAL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
g_object_class_install_property (gobject_class, PROP_COPY_METADATA,
g_param_spec_boolean ("copy-meta",
"Copies input metadata on output buffer",
"Copies input metadata on output buffer",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
} else if (is_cuvid == FALSE) {
g_object_class_install_property (gobject_class, PROP_PEAK_BITRATE,
g_param_spec_uint ("peak-bitrate", "Peak Bitrate",
"Peak bitrate in variable control-rate\n"
@@ -1795,14 +2123,6 @@ gst_v4l2_video_enc_class_init (GstV4l2VideoEncClass * klass)
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_PLAYING));
g_object_class_install_property (gobject_class, PROP_QUANT_RANGE,
g_param_spec_string ("qp-range", "qpp-range",
"Qunatization range for P, I and B frame,\n"
"\t\t\t Use string with values of Qunatization Range \n"
"\t\t\t in MinQpP-MaxQpP:MinQpI-MaxQpI:MinQpB-MaxQpB order, to set the property.",
"-1,-1:-1,-1:-1,-1",
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_QUANT_I_FRAMES,
g_param_spec_uint ("quant-i-frames", "I-Frame Quantization",
"Quantization parameter for I-frames (0xffffffff=component default),\n"
@@ -2085,6 +2405,31 @@ gst_v4l2_trace_printf (FILE * TracingFile, const gchar *fmt, ...)
}
}
static GType
gst_v4l2_videnc_tuning_info_get_type (void)
{
static GType qtype = 0;
if (qtype == 0) {
static const GEnumValue values[] = {
/*{V4L2_ENC_TUNING_INFO_UNDEFINED,
"No Tuning Info", "UndefinedTuningInfo"},*/
{V4L2_ENC_TUNING_INFO_HIGH_QUALITY,
"Tuning Preset for High Quality", "HighQualityPreset"},
{V4L2_ENC_TUNING_INFO_LOW_LATENCY,
"Tuning Preset for Low Latency", "LowLatencyPreset"},
{V4L2_ENC_TUNING_INFO_ULTRA_LOW_LATENCY,
"Tuning Preset for Low Latency", "UltraLowLatencyPreset"},
{V4L2_ENC_TUNING_INFO_LOSSLESS,
"Tuning Preset for Lossless", "LosslessPreset"},
{0, NULL, NULL}
};
qtype = g_enum_register_static ("GstV4L2VideoEncTuingInfoPreset", values);
}
return qtype;
}
static GType
gst_v4l2_videnc_hw_preset_level_get_type (void)
{
@@ -2111,22 +2456,68 @@ static GType
gst_v4l2_videnc_ratecontrol_get_type (void)
{
static volatile gsize ratecontrol = 0;
static const GEnumValue rc_type[] = {
{V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, "GST_V4L2_VIDENC_VARIABLE_BITRATE",
"variable_bitrate"},
{V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, "GST_V4L2_VIDENC_CONSTANT_BITRATE",
"constant_bitrate"},
{0, NULL, NULL}
};
if (is_cuvid == false) {
static const GEnumValue rc_type[] = {
{V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, "GST_V4L2_VIDENC_VARIABLE_BITRATE",
"variable_bitrate"},
{V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, "GST_V4L2_VIDENC_CONSTANT_BITRATE",
"constant_bitrate"},
{0, NULL, NULL}
};
if (g_once_init_enter (&ratecontrol)) {
GType tmp =
g_enum_register_static ("GstV4l2VideoEncRateControlType", rc_type);
g_once_init_leave (&ratecontrol, tmp);
if (g_once_init_enter (&ratecontrol)) {
GType tmp =
g_enum_register_static ("GstV4l2VideoEncRateControlType", rc_type);
g_once_init_leave (&ratecontrol, tmp);
}
} else {
static const GEnumValue rc_type_cuvid[] = {
{V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, "GST_V4L2_VIDENC_VARIABLE_BITRATE",
"variable_bitrate"},
{V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, "GST_V4L2_VIDENC_CONSTANT_BITRATE",
"constant_bitrate"},
{V4L2_MPEG_VIDEO_BITRATE_MODE_CONSTQP, "GST_V4L2_VIDENC_CONSTANT_QP",
"constantQP"},
{0, NULL, NULL}
};
if (g_once_init_enter (&ratecontrol)) {
GType tmp =
g_enum_register_static ("GstV4l2VideoEncRateControlType", rc_type_cuvid);
g_once_init_leave (&ratecontrol, tmp);
}
}
return (GType) ratecontrol;
}
static gboolean
gst_v4l2_video_enc_parse_constqp (GstV4l2VideoEnc * self,
const gchar * arr)
{
gchar *str;
self->constQpI = atoi (arr);
str = g_strstr_len (arr, -1, ":") + 1;
self->constQpP = atoi (str);
str = g_strstr_len (str, -1, ":") + 1;
self->constQpB = atoi (str);
return TRUE;
}
static gboolean
gst_v4l2_video_enc_parse_initqp (GstV4l2VideoEnc * self,
const gchar * arr)
{
gchar *str;
self->IInitQP = atoi (arr);
str = g_strstr_len (arr, -1, ":") + 1;
self->PInitQP = atoi (str);
str = g_strstr_len (str, -1, ":") + 1;
self->BInitQP = atoi (str);
return TRUE;
}
static gboolean
gst_v4l2_video_enc_parse_quantization_range (GstV4l2VideoEnc * self,
const gchar * arr)
@@ -2277,7 +2668,6 @@ set_v4l2_video_encoder_properties (GstVideoEncoder * encoder)
return FALSE;
}
#ifndef USE_V4L2_TARGET_NV_CODECSDK
if (video_enc->idrinterval) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEO_IDR_INTERVAL, video_enc->idrinterval)) {
@@ -2285,7 +2675,6 @@ set_v4l2_video_encoder_properties (GstVideoEncoder * encoder)
return FALSE;
}
}
#endif
if (video_enc->iframeinterval) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,

View File

@@ -66,6 +66,12 @@ struct _GstV4l2VideoEnc
guint32 MaxQpP;
guint32 MinQpB;
guint32 MaxQpB;
guint32 constQpI;
guint32 constQpP;
guint32 constQpB;
guint32 IInitQP;
guint32 PInitQP;
guint32 BInitQP;
gboolean set_qpRange;
guint32 hw_preset_level;
guint virtual_buffer_size;
@@ -77,9 +83,14 @@ struct _GstV4l2VideoEnc
FILE *tracing_file_enc;
GQueue *got_frame_pt;
guint32 cudaenc_gpu_id;
guint32 cudaenc_preset_id;
guint32 cudaenc_tuning_info_id;
gboolean slice_output;
GstVideoCodecFrame *best_prev;
GstClockTime buf_pts_prev;
gdouble buffer_in_time;
GHashTable* hash_pts_systemtime;
gboolean copy_meta;
#endif
/* < private > */
@@ -132,5 +143,11 @@ void gst_v4l2_video_enc_register (GstPlugin * plugin, GType type,
const char *codec, const gchar * basename, const gchar * device_path,
GstCaps * sink_caps, GstCaps * codec_caps, GstCaps * src_caps);
#ifdef USE_V4L2_TARGET_NV
void set_encoder_src_caps (GstVideoEncoder *encoder, GstCaps *input_caps);
gboolean is_drc (GstVideoEncoder *encoder, GstCaps *input_caps);
gboolean reconfigure_fps (GstVideoEncoder *encoder, GstCaps *input_caps, guint label);
#endif
G_END_DECLS
#endif /* __GST_V4L2_VIDEO_ENC_H__ */

296
gst-v4l2/nalutils.c Normal file
View File

@@ -0,0 +1,296 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "nalutils.h"
/* Compute Ceil(Log2(v)) */
/* Derived from branchless code for integer log2(v) from:
<http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog> */
guint
ceil_log2 (guint32 v)
{
guint r, shift;
v--;
r = (v > 0xFFFF) << 4;
v >>= r;
shift = (v > 0xFF) << 3;
v >>= shift;
r |= shift;
shift = (v > 0xF) << 2;
v >>= shift;
r |= shift;
shift = (v > 0x3) << 1;
v >>= shift;
r |= shift;
r |= (v >> 1);
return r + 1;
}
/****** Nal parser ******/
void
init_nal (NalReader * nr, const guint8 * data, guint size)
{
nr->data = data;
nr->size = size;
nr->n_epb = 0;
nr->byte = 0;
nr->bits_in_cache = 0;
/* fill with something other than 0 to detect emulation prevention bytes */
nr->first_byte = 0xff;
nr->cache = 0xff;
}
gboolean
_read (NalReader * nr, guint nbits)
{
if (G_UNLIKELY (nr->byte * 8 + (nbits - nr->bits_in_cache) > nr->size * 8)) {
GST_DEBUG ("Can not read %u bits, bits in cache %u, Byte * 8 %u, size in "
"bits %u", nbits, nr->bits_in_cache, nr->byte * 8, nr->size * 8);
return FALSE;
}
while (nr->bits_in_cache < nbits) {
guint8 byte;
gboolean check_three_byte;
check_three_byte = TRUE;
next_byte:
if (G_UNLIKELY (nr->byte >= nr->size))
return FALSE;
byte = nr->data[nr->byte++];
/* check if the byte is a emulation_prevention_three_byte */
if (check_three_byte && byte == 0x03 && nr->first_byte == 0x00 &&
((nr->cache & 0xff) == 0)) {
/* next byte goes unconditionally to the cache, even if it's 0x03 */
check_three_byte = FALSE;
nr->n_epb++;
goto next_byte;
}
nr->cache = (nr->cache << 8) | nr->first_byte;
nr->first_byte = byte;
nr->bits_in_cache += 8;
}
return TRUE;
}
/* Skips the specified amount of bits. This is only suitable to a
cacheable number of bits */
gboolean
_skip (NalReader * nr, guint nbits)
{
g_assert (nbits <= 8 * sizeof (nr->cache));
if (G_UNLIKELY (!_read (nr, nbits)))
return FALSE;
nr->bits_in_cache -= nbits;
return TRUE;
}
/* Generic version to skip any number of bits */
gboolean
_skip_long (NalReader * nr, guint nbits)
{
/* Leave out enough bits in the cache once we are finished */
const guint skip_size = 4 * sizeof (nr->cache);
guint remaining = nbits;
nbits %= skip_size;
while (remaining > 0) {
if (!_skip (nr, nbits))
return FALSE;
remaining -= nbits;
nbits = skip_size;
}
return TRUE;
}
guint
_get_pos (const NalReader * nr)
{
return nr->byte * 8 - nr->bits_in_cache;
}
guint
_get_remaining (const NalReader * nr)
{
return (nr->size - nr->byte) * 8 + nr->bits_in_cache;
}
guint
_get_epb_count (const NalReader * nr)
{
return nr->n_epb;
}
#define _READ_BITS(bits) \
gboolean \
_get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits) \
{ \
guint shift; \
\
if (!_read (nr, nbits)) \
return FALSE; \
\
/* bring the required bits down and truncate */ \
shift = nr->bits_in_cache - nbits; \
*val = nr->first_byte >> shift; \
\
*val |= nr->cache << (8 - shift); \
/* mask out required bits */ \
if (nbits < bits) \
*val &= ((guint##bits)1 << nbits) - 1; \
\
nr->bits_in_cache = shift; \
\
return TRUE; \
} \
_READ_BITS (8);
_READ_BITS (16);
_READ_BITS (32);
#define _PEEK_BITS(bits) \
gboolean \
_peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits) \
{ \
NalReader tmp; \
\
tmp = *nr; \
return _get_bits_uint##bits (&tmp, val, nbits); \
}
_PEEK_BITS (8);
gboolean
_get_ue (NalReader * nr, guint32 * val)
{
guint i = 0;
guint8 bit;
guint32 value;
if (G_UNLIKELY (!_get_bits_uint8 (nr, &bit, 1)))
return FALSE;
while (bit == 0) {
i++;
if (G_UNLIKELY (!_get_bits_uint8 (nr, &bit, 1)))
return FALSE;
}
if (G_UNLIKELY (i > 31))
return FALSE;
if (G_UNLIKELY (!_get_bits_uint32 (nr, &value, i)))
return FALSE;
*val = (1 << i) - 1 + value;
return TRUE;
}
gboolean
_get_se (NalReader * nr, gint32 * val)
{
guint32 value;
if (G_UNLIKELY (!_get_ue (nr, &value)))
return FALSE;
if (value % 2)
*val = (value / 2) + 1;
else
*val = -(value / 2);
return TRUE;
}
gboolean
_is_byte_aligned (NalReader * nr)
{
if (nr->bits_in_cache != 0)
return FALSE;
return TRUE;
}
gboolean
_has_more_data (NalReader * nr)
{
NalReader nr_tmp;
guint remaining, nbits;
guint8 rbsp_stop_one_bit, zero_bits;
remaining = _get_remaining (nr);
if (remaining == 0)
return FALSE;
nr_tmp = *nr;
nr = &nr_tmp;
/* The spec defines that more_rbsp_data() searches for the last bit
equal to 1, and that it is the rbsp_stop_one_bit. Subsequent bits
until byte boundary is reached shall be zero.
This means that more_rbsp_data() is FALSE if the next bit is 1
and the remaining bits until byte boundary are zero. One way to
be sure that this bit was the very last one, is that every other
bit after we reached byte boundary are also set to zero.
Otherwise, if the next bit is 0 or if there are non-zero bits
afterwards, then then we have more_rbsp_data() */
if (!_get_bits_uint8 (nr, &rbsp_stop_one_bit, 1))
return FALSE;
if (!rbsp_stop_one_bit)
return TRUE;
nbits = --remaining % 8;
while (remaining > 0) {
if (!_get_bits_uint8 (nr, &zero_bits, nbits))
return FALSE;
if (zero_bits != 0)
return TRUE;
remaining -= nbits;
nbits = 8;
}
return FALSE;
}
/*********** end of nal parser ***************/
gint
scan_for_start_codes (const guint8 * data, guint size)
{
GstByteReader br;
gst_byte_reader_init (&br, data, size);
/* NALU not empty, so we can at least expect 1 (even 2) bytes following sc */
return gst_byte_reader_masked_scan_uint32 (&br, 0xffffff00, 0x00000100,
0, size);
}

170
gst-v4l2/nalutils.h Normal file
View File

@@ -0,0 +1,170 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <gst/base/gstbytereader.h>
#include <gst/base/gstbitreader.h>
#include <string.h>
guint ceil_log2 (guint32 v);
typedef struct
{
const guint8 *data;
guint size;
guint n_epb; /* Number of emulation prevention bytes */
guint byte; /* Byte position */
guint bits_in_cache; /* bitpos in the cache of next bit */
guint8 first_byte;
guint64 cache; /* cached bytes */
} NalReader;
G_GNUC_INTERNAL
void init_nal (NalReader * nr, const guint8 * data, guint size);
G_GNUC_INTERNAL
gboolean _read (NalReader * nr, guint nbits);
G_GNUC_INTERNAL
gboolean _skip (NalReader * nr, guint nbits);
G_GNUC_INTERNAL
gboolean _skip_long (NalReader * nr, guint nbits);
G_GNUC_INTERNAL
guint _get_pos (const NalReader * nr);
G_GNUC_INTERNAL
guint _get_remaining (const NalReader * nr);
G_GNUC_INTERNAL
guint _get_epb_count (const NalReader * nr);
G_GNUC_INTERNAL
gboolean _is_byte_aligned (NalReader * nr);
G_GNUC_INTERNAL
gboolean _has_more_data (NalReader * nr);
#define _READ_BITS_H(bits) \
G_GNUC_INTERNAL \
gboolean _get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits)
_READ_BITS_H (8);
_READ_BITS_H (16);
_READ_BITS_H (32);
#define _PEEK_BITS_H(bits) \
G_GNUC_INTERNAL \
gboolean _peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits)
_PEEK_BITS_H (8);
G_GNUC_INTERNAL
gboolean _get_ue (NalReader * nr, guint32 * val);
G_GNUC_INTERNAL
gboolean _get_se (NalReader * nr, gint32 * val);
#define CHECK_ALLOWED_MAX(val, max) { \
if (val > max) { \
GST_WARNING ("value greater than max. value: %d, max %d", \
val, max); \
goto error; \
} \
}
#define CHECK_ALLOWED(val, min, max) { \
if (val < min || val > max) { \
GST_WARNING ("value not in allowed range. value: %d, range %d-%d", \
val, min, max); \
goto error; \
} \
}
#define READ_UINT8(nr, val, nbits) { \
if (!_get_bits_uint8 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint8, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UINT16(nr, val, nbits) { \
if (!_get_bits_uint16 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint16, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UINT32(nr, val, nbits) { \
if (!_get_bits_uint32 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint32, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UINT64(nr, val, nbits) { \
if (!_get_bits_uint64 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint32, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UE(nr, val) { \
if (!_get_ue (nr, &val)) { \
GST_WARNING ("failed to read UE"); \
goto error; \
} \
}
#define READ_UE_ALLOWED(nr, val, min, max) { \
guint32 tmp; \
READ_UE (nr, tmp); \
CHECK_ALLOWED (tmp, min, max); \
val = tmp; \
}
#define READ_UE_MAX(nr, val, max) { \
guint32 tmp; \
READ_UE (nr, tmp); \
CHECK_ALLOWED_MAX (tmp, max); \
val = tmp; \
}
#define READ_SE(nr, val) { \
if (!_get_se (nr, &val)) { \
GST_WARNING ("failed to read SE"); \
goto error; \
} \
}
#define READ_SE_ALLOWED(nr, val, min, max) { \
gint32 tmp; \
READ_SE (nr, tmp); \
CHECK_ALLOWED (tmp, min, max); \
val = tmp; \
}
G_GNUC_INTERNAL
gint scan_for_start_codes (const guint8 * data, guint size);

View File

@@ -30,17 +30,17 @@
#define UUID_SIZE 16
#define USER_DATA_UNREGISTERED_TYPE 5
gboolean check_uuid(uint8_t *stream);
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size);
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size);
gboolean check_uuid(uint8_t *stream, char *sei_uuid_string);
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string);
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size, char *sei_uuid_string);
gboolean check_uuid(uint8_t *stream)
gboolean check_uuid(uint8_t *stream, char *sei_uuid_string)
{
char uuid_string[UUID_SIZE] = {0};
uint32_t size = snprintf (uuid_string, UUID_SIZE, "%s", stream);
if (size == (UUID_SIZE-1))
{
if (!strncmp (uuid_string, "NVDS_CUSTOMMETA", (UUID_SIZE-1)))
if (!strncmp (uuid_string, sei_uuid_string, (UUID_SIZE-1)))
return TRUE;
else
return FALSE;
@@ -49,26 +49,27 @@ gboolean check_uuid(uint8_t *stream)
return FALSE;
}
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size)
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string)
{
int payload_type = 0;
int payload_size = 0;
uint8_t* payload = NULL;
int i, emu_count;
int i;
/* printf("found a SEI NAL unit!\n"); */
payload_type += *bs_ptr++;
payload_type = *bs_ptr++;
while (payload_size % 0xFF == 0)
{
payload_size += *bs_ptr++;
}
/* printf("payload_type = %i payload_size = %i\n", payload_type, payload_size); */
//printf("payload_type = %i payload_size = %i\n", payload_type, payload_size);
if (!check_uuid (bs_ptr))
if (!check_uuid (bs_ptr, sei_uuid_string))
{
/* printf ("Expected UUID not found\n"); */
//printf ("Expected UUID not found\n");
bs_ptr += (payload_size - UUID_SIZE);
return NULL;
}
else
@@ -82,19 +83,17 @@ uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size)
{
payload = (uint8_t*)malloc((payload_size - UUID_SIZE)*sizeof(uint8_t));
for (i = 0, emu_count = 0; i < (payload_size - UUID_SIZE);
i++, emu_count++)
for (i = 0; i < (payload_size - UUID_SIZE); i++)
{
payload[i] = *bs_ptr++;
payload[i] = *bs_ptr;
// drop emulation prevention bytes
if ((emu_count >= 2)
&& (payload[i] == 0x03)
&& (payload[i-1] == 0x00)
&& (payload[i-2] == 0x00))
if ((*(bs_ptr) == 0x03)
&& (*(bs_ptr - 1) == 0x00)
&& (*(bs_ptr - 2) == 0x00))
{
i--;
emu_count = 0;
}
bs_ptr++;
}
return payload;
}
@@ -104,13 +103,16 @@ uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size)
}
}
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size)
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size, char *sei_uuid_string)
{
if (sei_uuid_string == NULL)
return NULL;
int checklen = 0;
unsigned int sei_payload_size = 0;
uint8_t *bs_ptr = bs;
uint8_t *bs_ptr_end = bs + size;
uint8_t *payload = NULL;
while (size > 0)
while (bs_ptr_end > bs_ptr)
{
if (checklen < 2 && *bs_ptr++ == 0x00)
checklen++;
@@ -120,7 +122,7 @@ uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size)
checklen++;
else if (checklen == 4 && *bs_ptr++ == 0x06)
{
payload = parse_sei_unit(bs_ptr, &sei_payload_size);
payload = parse_sei_unit(bs_ptr, &sei_payload_size, sei_uuid_string);
checklen = 0;
if (payload != NULL)
{
@@ -128,12 +130,12 @@ uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size)
return payload;
}
else
return NULL;
{
continue;
}
}
else
checklen = 0;
size--;
}
return NULL;
}

View File

@@ -1,897 +0,0 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: Buffering and Transform/Composition/Blending</b>
*
*/
/**
* @defgroup ee_nvbuffering_group NvBufUtils API (Deprecated)
* @ingroup ds_nvbuf_api
* NVIDIA buffering utility library for use by applications.
* The utility also transforms, composits, and blends.
* @{
*/
#ifndef _NVBUF_UTILS_H_
#define _NVBUF_UTILS_H_
#ifdef __cplusplus
extern "C"
{
#endif
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <errno.h>
#include <stdbool.h>
/**
* Defines the maximum number of planes for a video frame.
*/
#define MAX_NUM_PLANES 4
/**
* Defines the maximum number of input video frames that can be used for composition.
*/
#define MAX_COMPOSITE_FRAME 16
/**
* Defines the default values for chroma subsampling.
* The default value matches JPEG/MPEG use cases.
*/
#define NVBUF_CHROMA_SUBSAMPLING_HORIZ_DEFAULT 0
#define NVBUF_CHROMA_SUBSAMPLING_VERT_DEFAULT 1
/**
* Defines the maximum number of sync object parameters.
*/
#define NVBUF_MAX_SYNCOBJ_PARAMS 5
/**
* Use this value to represent an infinite wait interval.
* A value of zero should not be interpreted as infinite,
* it should be interpreted as "time out immediately" and
* simply check whether the event has already happened.
*/
#define NVBUFFER_SYNCPOINT_WAIT_INFINITE 0xFFFFFFFF
/**
* Defines Payload types for NvBuffer.
*/
typedef enum
{
/** buffer payload with hardware memory handle for set of planes. */
NvBufferPayload_SurfArray,
/** buffer payload with hardware memory handle for specific memory size. */
NvBufferPayload_MemHandle,
} NvBufferPayloadType;
/**
* Defines display scan formats for NvBuffer video planes.
*/
typedef enum
{
/** Progessive scan formats. */
NvBufferDisplayScanFormat_Progressive = 0,
/** Interlaced scan formats. */
NvBufferDisplayScanFormat_Interlaced,
} NvBufferDisplayScanFormat;
/**
* Defines Layout formats for NvBuffer video planes.
*/
typedef enum
{
/** Pitch Layout. */
NvBufferLayout_Pitch,
/** BlockLinear Layout. */
NvBufferLayout_BlockLinear,
} NvBufferLayout;
/**
* Defines memory access flags for NvBuffer.
*/
typedef enum
{
/** Memory read. */
NvBufferMem_Read,
/** Memory write. */
NvBufferMem_Write,
/** Memory read & write. */
NvBufferMem_Read_Write,
} NvBufferMemFlags;
/**
* Defines tags that identify the components requesting a memory allocation.
* The tags can be used later to identify the total memory allocated to
* particular types of components.
*/
typedef enum
{
/** tag None. */
NvBufferTag_NONE = 0x0,
/** tag for Camera. */
NvBufferTag_CAMERA = 0x200,
/** tag for Jpeg Encoder/Decoder. */
NvBufferTag_JPEG = 0x1500,
/** tag for VPR Buffers. */
NvBufferTag_PROTECTED = 0x1504,
/** tag for H264/H265 Video Encoder. */
NvBufferTag_VIDEO_ENC = 0x1200,
/** tag for H264/H265/VP9 Video Decoder. */
NvBufferTag_VIDEO_DEC = 0x1400,
/** tag for Video Transform/Composite. */
NvBufferTag_VIDEO_CONVERT = 0xf01,
} NvBufferTag;
/**
* Defines color formats for NvBuffer.
*/
typedef enum
{
/** BT.601 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420,
/** BT.601 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YVU420,
/** BT.601 colorspace - YUV422 multi-planar. */
NvBufferColorFormat_YUV422,
/** BT.601 colorspace - YUV420 ER multi-planar. */
NvBufferColorFormat_YUV420_ER,
/** BT.601 colorspace - YVU420 ER multi-planar. */
NvBufferColorFormat_YVU420_ER,
/** BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12,
/** BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_ER,
/** BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV21,
/** BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV21_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_UYVY,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_UYVY_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_VYUY,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_VYUY_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_YUYV,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_YUYV_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_YVYU,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_YVYU_ER,
/** LegacyRGBA colorspace - BGRA-8-8-8-8 planar. */
NvBufferColorFormat_ABGR32,
/** LegacyRGBA colorspace - XRGB-8-8-8-8 planar. */
NvBufferColorFormat_XRGB32,
/** LegacyRGBA colorspace - ARGB-8-8-8-8 planar. */
NvBufferColorFormat_ARGB32,
/** BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE,
/** BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_709,
/** BT.709_ER colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_709_ER,
/** BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_2020,
/** BT.601 colorspace - Y/CrCb 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV21_10LE,
/** BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV12_12LE,
/** BT.2020 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV12_12LE_2020,
/** BT.601 colorspace - Y/CrCb 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV21_12LE,
/** BT.709 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420_709,
/** BT.709 colorspace - YUV420 ER multi-planar. */
NvBufferColorFormat_YUV420_709_ER,
/** BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_709,
/** BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_709_ER,
/** BT.2020 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420_2020,
/** BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_2020,
/** BT.601 colorspace - YUV444 multi-planar. */
NvBufferColorFormat_YUV444,
/** Optical flow */
NvBufferColorFormat_SignedR16G16,
/** Optical flow SAD calculation Buffer format */
NvBufferColorFormat_A32,
/** 8-bit grayscale. */
NvBufferColorFormat_GRAY8,
/** BT.601 colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16,
/** BT.601 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NvBufferColorFormat_NV16_10LE,
/** BT.601 colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24,
/** BT.601 colorspace - Y/CrCb 4:4:4 10-bit multi-planar. */
NvBufferColorFormat_NV24_10LE,
/** BT.601_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_ER,
/** BT.601_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_ER,
/** BT.709 colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_709,
/** BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_709,
/** BT.709_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_709_ER,
/** BT.709_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_709_ER,
/** BT.709 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_709,
/** BT.709 ER colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_709_ER,
/** BT.2020 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_2020,
/** BT.2020 colorspace - Y/CbCr 12 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_12LE_2020,
/** Non-linear RGB BT.709 colorspace - RGBA-10-10-10-2 planar. */
NvBufferColorFormat_RGBA_10_10_10_2_709,
/** Non-linear RGB BT.2020 colorspace - RGBA-10-10-10-2 planar. */
NvBufferColorFormat_RGBA_10_10_10_2_2020,
/** Non-linear RGB BT.709 colorspace - BGRA-10-10-10-2 planar. */
NvBufferColorFormat_BGRA_10_10_10_2_709,
/** Non-linear RGB BT.2020 colorspace - BGRA-10-10-10-2 planar. */
NvBufferColorFormat_BGRA_10_10_10_2_2020,
/** Invalid color format. */
NvBufferColorFormat_Invalid,
} NvBufferColorFormat;
/**
* Defines video flip methods.
*/
typedef enum
{
/** Video flip none. */
NvBufferTransform_None,
/** Video flip rotate 90 degree counter-clockwise. */
NvBufferTransform_Rotate90,
/** Video flip rotate 180 degree. */
NvBufferTransform_Rotate180,
/** Video flip rotate 270 degree counter-clockwise. */
NvBufferTransform_Rotate270,
/** Video flip with respect to X-axis. */
NvBufferTransform_FlipX,
/** Video flip with respect to Y-axis. */
NvBufferTransform_FlipY,
/** Video flip transpose. */
NvBufferTransform_Transpose,
/** Video flip inverse transpode. */
NvBufferTransform_InvTranspose,
} NvBufferTransform_Flip;
/**
* Defines transform video filter types.
*/
typedef enum
{
/** transform filter nearest. */
NvBufferTransform_Filter_Nearest,
/** transform filter bilinear. */
NvBufferTransform_Filter_Bilinear,
/** transform filter 5 tap. */
NvBufferTransform_Filter_5_Tap,
/** transform filter 10 tap. */
NvBufferTransform_Filter_10_Tap,
/** transform filter smart. */
NvBufferTransform_Filter_Smart,
/** transform filter nicest. */
NvBufferTransform_Filter_Nicest,
} NvBufferTransform_Filter;
/**
* Defines flags to indicate for valid transform.
*/
typedef enum {
/** transform flag to crop source rectangle. */
NVBUFFER_TRANSFORM_CROP_SRC = 1,
/** transform flag to crop destination rectangle. */
NVBUFFER_TRANSFORM_CROP_DST = 1 << 1,
/** transform flag to set filter type. */
NVBUFFER_TRANSFORM_FILTER = 1 << 2,
/** transform flag to set flip method. */
NVBUFFER_TRANSFORM_FLIP = 1 << 3,
} NvBufferTransform_Flag;
/**
* Defines flags that specify valid composition/blending operations.
*/
typedef enum {
/** flag to set for composition. */
NVBUFFER_COMPOSITE = 1,
/** flag to set for blending. */
NVBUFFER_BLEND = 1 << 1,
/** composition flag to set filter type. */
NVBUFFER_COMPOSITE_FILTER = 1 << 2,
} NvBufferComposite_Flag;
/**
* Holds parameters for buffer sync point object.
* sync object params is simply a data structure containing [sync point ID,value] pair.
* This can be used by clients to describe an event that might want to wait for.
*/
typedef struct _NvBufferSyncObjParams
{
uint32_t syncpointID;
uint32_t value;
}NvBufferSyncObjParams;
/**
* buffer sync point object.
*/
typedef struct _NvBufferSyncObjRec
{
NvBufferSyncObjParams insyncobj[NVBUF_MAX_SYNCOBJ_PARAMS];
uint32_t num_insyncobj;
NvBufferSyncObjParams outsyncobj;
uint32_t use_outsyncobj;
}NvBufferSyncObj;
/**
* Holds composition background r,g,b colors.
*/
typedef struct
{
/** background color value for r. */
float r;
/** background color value for g. */
float g;
/** background color value for b. */
float b;
}NvBufferCompositeBackground;
/**
* Holds coordinates for a rectangle.
*/
typedef struct
{
/** rectangle top. */
uint32_t top;
/** rectangle left. */
uint32_t left;
/** rectangle width. */
uint32_t width;
/** rectangle height. */
uint32_t height;
}NvBufferRect;
/**
* Holds an opaque NvBuffer session type required for parallel buffer
* tranformations and compositions. Operations using a single session are
* scheduled sequentially, after the previous operation finishes. Operations for
* multiple sessions are scheduled in parallel.
*/
typedef struct _NvBufferSession * NvBufferSession;
/**
* Holds Chroma Subsampling parameters.
*/
typedef struct _NvBufferChromaSubSamplingParams
{
/** location settings */
uint8_t chromaLocHoriz;
uint8_t chromaLocVert;
}NvBufferChromaSubsamplingParams;
#define NVBUF_CHROMA_SUBSAMPLING_PARAMS_DEFAULT \
{ \
NVBUF_CHROMA_SUBSAMPLING_HORIZ_DEFAULT, \
NVBUF_CHROMA_SUBSAMPLING_VERT_DEFAULT \
}
/**
* Holds the input parameters for hardware buffer creation.
*/
typedef struct _NvBufferCreateParams
{
/** width of the buffer. */
int32_t width;
/** height of the buffer. */
int32_t height;
/** payload type of the buffer. */
NvBufferPayloadType payloadType;
/** size of the memory.(Applicale for NvBufferPayload_MemHandle) */
int32_t memsize;
/** layout of the buffer. */
NvBufferLayout layout;
/** colorformat of the buffer. */
NvBufferColorFormat colorFormat;
/** tag to associate with the buffer. */
NvBufferTag nvbuf_tag;
}NvBufferCreateParams;
/**
* Holds parameters for a hardware buffer.
*/
typedef struct _NvBufferParams
{
/** Holds the DMABUF FD of the hardware buffer. */
uint32_t dmabuf_fd;
/** pointer to hardware buffer memory. */
void *nv_buffer;
/** payload type of the buffer. */
NvBufferPayloadType payloadType;
/** size of the memory.(Applicale for NvBufferPayload_MemHandle) */
int32_t memsize;
/** size of hardware buffer. */
uint32_t nv_buffer_size;
/** video format type of hardware buffer. */
NvBufferColorFormat pixel_format;
/** number of planes of hardware buffer. */
uint32_t num_planes;
/** width of each planes of hardware buffer. */
uint32_t width[MAX_NUM_PLANES];
/** height of each planes of hardware buffer. */
uint32_t height[MAX_NUM_PLANES];
/** pitch of each planes of hardware buffer. */
uint32_t pitch[MAX_NUM_PLANES];
/** memory offset values of each video planes of hardware buffer. */
uint32_t offset[MAX_NUM_PLANES];
/** size of each vodeo planes of hardware buffer. */
uint32_t psize[MAX_NUM_PLANES];
/** layout type of each planes of hardware buffer. */
uint32_t layout[MAX_NUM_PLANES];
}NvBufferParams;
/**
* Holds extended parameters for a hardware buffer.
*/
typedef struct _NvBufferParamsEx
{
/** nvbuffer basic parameters. */
NvBufferParams params;
/** offset in bytes from the start of the buffer to the first valid byte.
(Applicale for NvBufferPayload_MemHandle) */
int32_t startofvaliddata;
/** size of the valid data from the first to the last valid byte.
(Applicale for NvBufferPayload_MemHandle) */
int32_t sizeofvaliddatainbytes;
/** display scan format - progressive/interlaced. */
NvBufferDisplayScanFormat scanformat[MAX_NUM_PLANES];
/** offset of the second field for interlaced buffer. */
uint32_t secondfieldoffset[MAX_NUM_PLANES];
/** block height of the planes for blockLinear layout hardware buffer. */
uint32_t blockheightlog2[MAX_NUM_PLANES];
/** physical address of allocated planes. */
uint32_t physicaladdress[MAX_NUM_PLANES];
/** flags associated with planes */
uint64_t flags[MAX_NUM_PLANES];
/** metadata associated with the hardware buffer. */
void *payloadmetaInfo;
/** chroma subsampling parameters */
NvBufferChromaSubsamplingParams chromaSubsampling;
/** get buffer vpr information. */
bool is_protected;
/** buffer sync point object parameters */
NvBufferSyncObj syncobj;
/** reserved field. */
void *reserved;
}NvBufferParamsEx;
/**
* Holds parameters related to compositing/blending.
*/
typedef struct _NvBufferCompositeParams
{
/** flag to indicate which of the composition/blending parameters are valid. */
uint32_t composite_flag;
/** number of the input buffers to be composited. */
uint32_t input_buf_count;
/** filters to use for composition. */
NvBufferTransform_Filter composite_filter[MAX_COMPOSITE_FRAME];
/** alpha values of input buffers for the blending. */
float dst_comp_rect_alpha[MAX_COMPOSITE_FRAME];
/** source rectangle coordinates of input buffers for composition. */
NvBufferRect src_comp_rect[MAX_COMPOSITE_FRAME];
/** destination rectangle coordinates of input buffers for composition. */
NvBufferRect dst_comp_rect[MAX_COMPOSITE_FRAME];
/** background color values for composition. */
NvBufferCompositeBackground composite_bgcolor;
/** NvBufferSession to be used for composition. If NULL, the default session
* is used. */
NvBufferSession session;
}NvBufferCompositeParams;
/**
* Holds parameters for buffer transform functions.
*/
typedef struct _NvBufferTransformParams
{
/** flag to indicate which of the transform parameters are valid. */
uint32_t transform_flag;
/** flip method. */
NvBufferTransform_Flip transform_flip;
/** transform filter. */
NvBufferTransform_Filter transform_filter;
/** source rectangle coordinates for crop opeartion. */
NvBufferRect src_rect;
/** destination rectangle coordinates for crop opeartion. */
NvBufferRect dst_rect;
/** NvBufferSession to be used for transform. If NULL, the default session
* is used. */
NvBufferSession session;
}NvBufferTransformParams;
/**
* This method can be used to wait on sync point ID.
*
* @param[in] syncobj_params sync point object parameters.
* @param[in] timeout sync point wait timeout value.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferSyncObjWait (NvBufferSyncObjParams *syncobj_params, unsigned int timeout);
/**
* This method can be used to get hardware Buffer struct size.
*
* @returns hardware Buffer struct size.
*/
int NvBufferGetSize (void);
/**
* Creates an instance of EGLImage from a DMABUF FD.
*
* @param[in] display An EGLDisplay object used during the creation
* of the EGLImage. If NULL, nvbuf_utils() uses
* its own instance of EGLDisplay.
* @param[in] dmabuf_fd DMABUF FD of the buffer from which the EGLImage
* is to be created.
*
* @returns `EGLImageKHR` for success, `NULL` for failure
*/
EGLImageKHR NvEGLImageFromFd (EGLDisplay display, int dmabuf_fd);
/**
* Destroys an EGLImage object.
*
* @param[in] display An EGLDisplay object used to destroy the EGLImage.
* If NULL, nvbuf_utils() uses its own instance of
* EGLDisplay.
* @param[in] eglImage The EGLImageKHR object to be destroyed.
*
* @returns 0 for success, -1 for failure
*/
int NvDestroyEGLImage (EGLDisplay display, EGLImageKHR eglImage);
/**
* Allocates a hardware buffer (deprecated).
*
* @deprecated Use NvBufferCreateEx() instead.
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] width Buffer width, in bytes.
* @param[in] height Buffer height, in bytes.
* @param[in] layout Layout of the buffer.
* @param[in] colorFormat Color format of the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufferCreate (int *dmabuf_fd, int width, int height,
NvBufferLayout layout, NvBufferColorFormat colorFormat);
/**
* Allocates a hardware buffer.
*
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateEx (int *dmabuf_fd, NvBufferCreateParams *input_params);
/**
* Allocates a hardware buffer for interlace scan format.
*
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateInterlace (int *dmabuf_fd, NvBufferCreateParams *input_params);
/**
* Allocates a hardware buffer with a given chroma subsampling location.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
* @param[in] chromaSubsampling Chroma location parameters.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateWithChromaLoc (int *dmabuf_fd, NvBufferCreateParams *input_params, NvBufferChromaSubsamplingParams *chromaSubsampling);
/**
* Gets buffer parameters.
* @param[in] dmabuf_fd `DMABUF FD` of buffer.
* @param[out] params A pointer to the structure to fill with parameters.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferGetParams (int dmabuf_fd, NvBufferParams *params);
/**
* Gets buffer extended parameters.
* @param[in] dmabuf_fd `DMABUF FD` of buffer.
* @param[out] exparams A pointer to the structure to fill with extended parameters.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferGetParamsEx (int dmabuf_fd, NvBufferParamsEx *exparams);
/**
* Destroys a hardware buffer.
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` `hw_buffer` to destroy.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferDestroy (int dmabuf_fd);
/**
* Extracts the `dmabuf_fd` from the hardware buffer.
* @param[in] nvbuf Specifies the `hw_buffer`.
* @param[out] dmabuf_fd Returns DMABUF FD of `hw_buffer`.
*
* @returns 0 for success, -1 for failure.
*/
int ExtractFdFromNvBuffer (void *nvbuf, int *dmabuf_fd);
/**
* Releases the `dmabuf_fd` buffer.
* @see ExtractfdFromNvBuffer()
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` to release.
*
* @returns 0 for success, -1 for failure.
*/
int NvReleaseFd (int dmabuf_fd);
/**
* Syncs the hardware memory cache for the CPU.
*
* \sa NvBufferMemMap for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForCpu (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the CPU, API to be used for another process.
*
* \sa NvBufferMemMapEx for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForCpuEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the device.
*
* \sa NvBufferMemMap for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForDevice (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the device, API to be used for another process.
*
* \sa NvBufferMemMapEx for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForDeviceEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Gets the memory-mapped virtual address of the plane.
*
* The client must call NvBufferMemSyncForCpu() with the virtual address returned
* by this function before accessing the mapped memory in CPU.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and hardware device as
* follows:
* - CPU: If the CPU modifies any mapped memory, the client must call
* NvBufferMemSyncForDevice() before any hardware device accesses the memory.
* - Hardware device: If the mapped memory is modified by any hardware device,
* the client must call NvBufferMemSyncForCpu() before CPU accesses the memory.
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.(Applies to @ref NvBufferPayload_SurfArray.)
* @param[in] memflag NvBuffer memory flag.
* @param[out] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemMap (int dmabuf_fd, unsigned int plane, NvBufferMemFlags memflag, void **pVirtAddr);
/**
* Gets the memory-mapped virtual address of the plane, API to be used for another process.
*
* The client must call NvBufferMemSyncForCpuEx() with the virtual address returned
* by this function before accessing the mapped memory in CPU in another process.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and hardware device as
* follows:
* - CPU: If the CPU modifies any mapped memory, the client must call
* NvBufferMemSyncForDeviceEx() before any hardware device accesses the memory.
* - Hardware device: If the mapped memory is modified by any hardware device,
* the client must call NvBufferMemSyncForCpuEx() before CPU accesses the memory.
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.(Applies to @ref NvBufferPayload_SurfArray.)
* @param[in] memflag NvBuffer memory flag.
* @param[out] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemMapEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, NvBufferMemFlags memflag, void **pVirtAddr);
/**
* Unmaps the mapped virtual address of the plane.
*
* If the following conditions are both true, the client must call
* NvBufferMemSyncForDevice() before unmapping the memory:
* - Mapped memory was modified by the CPU.
* - Mapped memory will be accessed by a hardware device.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] plane Video frame plane. Applies to
* @ref NvBufferPayload_SurfArray.
* @param[in] pVirtAddr Virtual address pointer to the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemUnMap (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Unmaps the mapped virtual address of the plane, API to be used for another process.
*
* If the following conditions are both true, the client must call
* NvBufferMemSyncForDeviceEx() before unmapping the memory in another process:
* - Mapped memory was modified by the CPU.
* - Mapped memory will be accessed by a hardware device.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane Video frame plane. Applies to
* @ref NvBufferPayload_SurfArray.
* @param[in] pVirtAddr Virtual address pointer to the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemUnMapEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Copies the NvBuffer plane contents to a raw buffer plane.
* @param[in] dmabuf_fd DMABUF FD of NvBuffer.
* @param[in] plane video frame plane.
* @param[in] out_width aligned width of the raw data plane.
* @param[in] out_height aligned height of the raw data plane.
* @param[in] ptr pointer to the output raw plane data.
*
* @returns 0 for success, -1 for failure.
*/
int NvBuffer2Raw (int dmabuf_fd, unsigned int plane, unsigned int out_width, unsigned int out_height, unsigned char *ptr);
/**
* Copies raw buffer plane contents to an NvBuffer plane.
* @param[in] ptr pointer to the input raw plane data.
* @param[in] plane video frame plane.
* @param[in] in_width aligned width of the raw data plane.
* @param[in] in_height aligned height of the raw data plane.
* @param[in] dmabuf_fd DMABUF FD of NvBuffer.
*
* @returns 0 for success, -1 for failure.
*/
int Raw2NvBuffer (unsigned char *ptr, unsigned int plane, unsigned int in_width, unsigned int in_height, int dmabuf_fd);
/**
* Creates a new NvBufferSession for parallel scheduling of
* buffer transformations and compositions.
*
* @returns A session pointer, NULL for failure.
*/
NvBufferSession NvBufferSessionCreate(void);
/**
* Destroys an existing \ref NvBufferSession.
* @param[in] session An existing NvBufferSession.
*/
void NvBufferSessionDestroy(NvBufferSession session);
/**
* Transforms one DMA buffer to another DMA buffer.
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] transform_params transform parameters
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransform (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params);
/**
* Transforms one DMA buffer to another DMA buffer, API to be used for another process.
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] input_params extended input parameters for a hardware buffer.
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] output_params extended output parameters for a hardware buffer.
* @param[in] transform_params transform parameters
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransformEx (int src_dmabuf_fd, NvBufferParamsEx *input_params, int dst_dmabuf_fd, NvBufferParamsEx *output_params, NvBufferTransformParams *transform_params);
/**
* Transforms one DMA buffer to another DMA buffer asyncroniously (non-blocking).
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] transform_params transform parameters
* @param[in] syncobj nvbuffer sync point object
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransformAsync (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params, NvBufferSyncObj *syncobj);
/**
* \brief Composites multiple input DMA buffers to one output DMA buffer.
*
* This function can composite multiple input frames to one output.
*
* @param[in] src_dmabuf_fds An array of DMABUF FDs of source buffers.
* These buffers are composited together. Output
* is copied to the output buffer referenced by
* @a dst_dmabuf_fd.
* @param[in] dst_dmabuf_fd DMABUF FD of the compositing destination buffer.
* @param[in] composite_params Compositing parameters.
*/
int NvBufferComposite (int *src_dmabuf_fds, int dst_dmabuf_fd, NvBufferCompositeParams *composite_params);
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
@@ -244,7 +244,40 @@ typedef enum
NVBUF_COLOR_FORMAT_UYVP,
/** Specifies BT.601 colorspace - 10 bit YUV ER 4:2:2 interleaved. */
NVBUF_COLOR_FORMAT_UYVP_ER,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_2020,
/** Specifies BT.601 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_2020,
/** Specifies BT.601 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_2020,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_709_ER,
NVBUF_COLOR_FORMAT_LAST
} NvBufSurfaceColorFormat;
@@ -384,8 +417,16 @@ typedef struct NvBufSurfaceAllocateParams {
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** components tag to be used for memory allocation */
NvBufSurfaceTag memtag;
/** disable pitch padding allocation only applicable for cuda and system memory allocation
pitch would be width times bytes per pixel for the plane, for odd width it would be
multiple of 2, also note for some non standard video resolution cuda kernels may fail
due to unaligned pitch
*/
bool disablePitchPadding;
/** Used void* from custom param for 64 bit machine, using other uint32_t param */
uint32_t _reservedParam;
void * _reserved[STRUCTURE_PADDING];
void * _reserved[STRUCTURE_PADDING-1];
} NvBufSurfaceAllocateParams;
/**
@@ -476,6 +517,59 @@ typedef struct NvBufSurface {
void * _reserved[STRUCTURE_PADDING];
} NvBufSurface;
/**
* Holds plane parameters to map the buffer received from another process.
*/
typedef struct NvBufSurfaceMapPlaneParams
{
/** Holds the widths of planes */
uint32_t width;
/** Holds the heights of planes */
uint32_t height;
/** Holds the pitches of planes in bytes */
uint32_t pitch;
/** Holds the offsets of planes in bytes */
uint32_t offset;
/** Holds the sizes of planes in bytes */
uint32_t psize;
/** Holds offset of the second field for interlaced buffer */
uint32_t secondfieldoffset;
/** Holds block height of the planes for blockLinear layout buffer */
uint32_t blockheightlog2;
/** Holds flags associated with the planes */
uint64_t flags;
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceMapPlaneParams;
/**
* Holds buffer parameters to map the buffer received from another process.
*/
typedef struct NvBufSurfaceMapParams {
/** Holds the number of planes. */
uint32_t num_planes;
/** Holds a GPU ID */
uint32_t gpuId;
/** Holds a DMABUF FD */
uint64_t fd;
/** Holds the total size of allocated memory */
uint32_t totalSize;
/** Holds type of memory */
NvBufSurfaceMemType memType;
/** Holds BL or PL layout */
NvBufSurfaceLayout layout;
/** Holds display scan format */
NvBufSurfaceDisplayScanFormat scanformat;
/** Holds the color format */
NvBufSurfaceColorFormat colorFormat;
/** Holds chroma subsampling parameters */
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** Holds plane parameters */
NvBufSurfaceMapPlaneParams planes[NVBUF_MAX_PLANES];
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceMapParams;
/**
* \brief Allocates a batch of buffers.
*
@@ -605,7 +699,7 @@ int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
* This function can be used to copy plane memory content from source raw buffer pointer
* to specific destination batch buffer of supported memory type.
*
* @param[in] Surf pointer to NvBufSurface structure.
* @param[in] surf pointer to NvBufSurface structure.
* @param[in] index index of buffer in the batch.
* @param[in] plane index of plane in buffer.
* @param[in] out_width aligned width of the raw data plane.
@@ -614,7 +708,7 @@ int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurface2Raw (NvBufSurface *Surf, unsigned int index, unsigned int plane, unsigned int out_width, unsigned int out_height, unsigned char *ptr);
int NvBufSurface2Raw (NvBufSurface *Surf, unsigned int index, unsigned int plane, unsigned int outwidth, unsigned int outheight, unsigned char *ptr);
/**
* \brief Copies the raw buffer plane memory content to the NvBufSurface plane memory of a specific
@@ -628,11 +722,11 @@ int NvBufSurface2Raw (NvBufSurface *Surf, unsigned int index, unsigned int plane
* @param[in] plane index of plane in buffer.
* @param[in] in_width aligned width of the raw data plane.
* @param[in] in_height aligned height of the raw data plane.
* @param[in] Surf pointer to NvBufSurface structure.
* @param[in] surf pointer to NvBufSurface structure.
*
* @return 0 for success, -1 for failure.
*/
int Raw2NvBufSurface (unsigned char *ptr, unsigned int index, unsigned int plane, unsigned int in_width, unsigned int in_height, NvBufSurface *Surf);
int Raw2NvBufSurface (unsigned char *ptr, unsigned int index, unsigned int plane, unsigned int inwidth, unsigned int inheight, NvBufSurface *Surf);
/**
* Syncs the HW memory cache for the CPU.
@@ -732,6 +826,30 @@ int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
*/
int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
/**
* \brief Import parameters received from another process and create hardware buffer.
*
* Calling process must need to call NvBufferDestroy() to remove reference count for
* hardware buffer handle of the imported DMA buffer.
*
* @param[out] out_nvbuf_surf Pointer to hardware buffer.
* @param[in] in_params Parameters to create hardware buffer.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceImport (NvBufSurface **out_nvbuf_surf, const NvBufSurfaceMapParams *in_params);
/**
* \brief Get buffer information to map the buffer in another process.
*
* @param[in] surf Pointer to NvBufSurface structure.
* @param[in] index Index of a buffer in the batch.
* @param[out] params Pointer to NvBufSurfaceMapParams information of the buffer.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceGetMapParams (const NvBufSurface *surf, int index, NvBufSurfaceMapParams *params);
/** @} */
#ifdef __cplusplus

View File

@@ -1 +1 @@
jetson_35.1
jetson_35.6

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -100,6 +100,15 @@
*/
#define V4L2_PIX_FMT_NV24_10LE v4l2_fourcc('N', 'V', '1', '0') /* Y/CbCr 4:4:4, 10 bits per channel */
/**
* Defines the V4L2 pixel format for representing planar 10-bit Y/CbCr 4:4:4 decoder data.
*/
#define V4L2_PIX_FMT_YUV444_10LE v4l2_fourcc('P', '4', '1', '0') /* Y/Cb/Cr 4:4:4, 10 bits per channel */
/**
* Defines the V4L2 pixel format for representing planar 12-bit Y/CbCr 4:4:4 decoder data.
*/
#define V4L2_PIX_FMT_YUV444_12LE v4l2_fourcc('P', '4', '1', '2') /* Y/Cb/Cr 4:4:4, 12 bits per channel */
/** @cond UNUSED */
/* >> The declarations from here to the next endcond statement are not
@@ -153,6 +162,8 @@ enum v4l2_mpeg_video_h265_profile {
V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10 = 1,
/** H.265 MainStillPicture profile. */
V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE = 2,
/** H.265 FREXT profile. */
V4L2_MPEG_VIDEO_H265_PROFILE_FREXT = 3,
};
/**
@@ -453,7 +464,6 @@ struct v4l2_ctrl_vp8_frame_hdr {
* Read only. Valid after #V4L2_EVENT_RESOLUTION_CHANGE)
* - #V4L2_CID_MPEG_VIDEODEC_INPUT_METADATA
* - #V4L2_CID_MPEG_VIDEODEC_METADATA
* - #V4L2_CID_MPEG_VIDEO_BUF_API_TYPE
* - #V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE
* - #V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID
* - #V4L2_CID_MPEG_VIDEODEC_DROP_FRAME_INTERVAL
@@ -599,7 +609,7 @@ struct v4l2_ctrl_vp8_frame_hdr {
* @attention This control must be set after receiving V4L2_EVENT_RESOLUTION_CHANGE.
*
*/
#define V4L2_CID_MPEG_VIDEODEC_SAR_WIDTH (V4L2_CID_MPEG_BASE+569)
#define V4L2_CID_MPEG_VIDEODEC_SAR_WIDTH (V4L2_CID_MPEG_BASE+580)
/**
* Defines the Control ID to get Sample Aspect Ratio height for decoding.
@@ -609,7 +619,7 @@ struct v4l2_ctrl_vp8_frame_hdr {
* @attention This control must be set after receiving V4L2_EVENT_RESOLUTION_CHANGE.
*
*/
#define V4L2_CID_MPEG_VIDEODEC_SAR_HEIGHT (V4L2_CID_MPEG_BASE+570)
#define V4L2_CID_MPEG_VIDEODEC_SAR_HEIGHT (V4L2_CID_MPEG_BASE+581)
/**
* Defines the Control ID to embed the SEI data coming from upstream plugins.
@@ -1173,12 +1183,7 @@ struct v4l2_ctrl_vp8_frame_hdr {
/**
* Defines the Control ID to set buf api to be used by decoder/encoder.
*
* A boolean value should be supplied with this control, default is 0
* This has to be called before any other ioctls are used and cannot be changed.
*
* @attention This control must be set after setting formats on both the planes
* and before requesting buffers on either plane.
* This is internal ioctl due to be removed later.
* Note: This Control ID is no longer supported.
*/
#define V4L2_CID_MPEG_VIDEO_BUF_API_TYPE (V4L2_CID_MPEG_BASE+556)
@@ -1409,6 +1414,61 @@ struct v4l2_ctrl_vp8_frame_hdr {
*/
#define V4L2_CID_MPEG_VIDEOENC_PPE_INIT_PARAMS (V4L2_CID_MPEG_BASE+577)
/**
* Defines Control ID to configure PRESET id for CUVID Encoder
*
* An integer value between 1 to 7 should be supplied with this control.
*
* Check PRESET Guide for more details at
* https://docs.nvidia.com/video-technologies/video-codec-sdk/nvenc-preset-migration-guide/index.html
*
* @attention This control must be set after setting formats on both the planes
* and before requesting buffers on either plane.
*/
#define V4L2_CID_MPEG_VIDEOENC_CUDA_PRESET_ID (V4L2_CID_MPEG_BASE+578)
/**
* Defines Control ID to configure TUNING INFO id for CUVID Encoder
*
* An integer value between 1 to 4 should be supplied with this control.
*
* Check PRESET Guide for more details at
* https://docs.nvidia.com/video-technologies/video-codec-sdk/nvenc-preset-migration-guide/index.html
*
* @attention This control must be set after setting formats on both the planes
* and before requesting buffers on either plane.
*/
#define V4L2_CID_MPEG_VIDEOENC_CUDA_TUNING_INFO (V4L2_CID_MPEG_BASE+579)
/** Defines Control ID to configure CONSTQP VALUE for CUVID Encoder
*
* An integer value between 0 to 51 should be supplied with this control.
*
* @attention This control must be set after setting formats on both the planes
* and before requesting buffers on either plane.
*/
#define V4L2_CID_MPEG_VIDEOENC_CUDA_CONSTQP (V4L2_CID_MPEG_BASE+580)
/** Defines Control ID to configure FPS VALUE for CUVID Encoder
*
* A positive integer value should be supplied with this control.
*
* @attention This control is runtime configurable and can be called anytime after setting
* formats on both the planes.
*/
#define V4L2_CID_MPEG_VIDEOENC_RECONFIG_FPS (V4L2_CID_MPEG_BASE+581)
/**
* Defines the Control ID to disable SAO filter for HEVC.
*
* A boolean value should be supplied with this control.
* If value is true, SAO filter for H265 encoding is disabled. Enabled by default
*
* @attention This control should be set after setting formats on both the planes
* and before requesting buffers on either plane.
**/
#define V4L2_CID_MPEG_VIDEOENC_H265_DISABLE_SAO (V4L2_CID_MPEG_BASE + 582)
/** @} */
/**
@@ -1842,6 +1902,22 @@ enum v4l2_enc_hw_preset_type {
V4L2_ENC_HW_PRESET_SLOW,
};
/**
* Specifies the encoder HW Preset type.
*/
enum v4l2_enc_hw_tuning_info_type {
/** Encoder Tuning Info Undefined */
V4L2_ENC_TUNING_INFO_UNDEFINED = 0,
/** Encoder Tuning Info High Quality */
V4L2_ENC_TUNING_INFO_HIGH_QUALITY = 1,
/** Encoder Tuning Info Low Latency */
V4L2_ENC_TUNING_INFO_LOW_LATENCY,
/** Encoder Tuning Info Ultra Low Latency */
V4L2_ENC_TUNING_INFO_ULTRA_LOW_LATENCY,
/** Encoder Tuning Info Lossless */
V4L2_ENC_TUNING_INFO_LOSSLESS,
};
/**
* Holds encoder HW Preset type parameters
* to be used with #V4L2_CID_MPEG_VIDEOENC_HW_PRESET_TYPE_PARAM IOCTL.
@@ -1913,6 +1989,8 @@ enum v4l2_mpeg_video_h265_level {
V4L2_MPEG_VIDEO_H265_LEVEL_6_2_HIGH_TIER,
};
#define V4L2_MPEG_VIDEO_BITRATE_MODE_CONSTQP 0x2
/**
* Holds encoder slice length parameters, to be used with
* \c V4L2_CID_MPEG_VIDEOENC_SLICE_LENGTH_PARAM IOCTL.
@@ -2013,14 +2091,10 @@ typedef struct v4l2_enc_ppe_init_params_
__u8 enable_profiler;
/** The max number of milliseconds that Nvmedia should wait for each frame processing */
__s32 wait_time_ms;
/** Width of the frame */
__u32 width;
/** Height of the frame */
__u32 height;
/** Boolean value indicating if VIC should be used for frame downsampling */
__u8 taq_vic_downsampling;
/** Maximum strength of QP delta map for TAQ */
__u8 taq_max_qp_delta;
/** Boolean value indicating if TAQ should be applied for B-frames */
__u8 taq_b_frame_mode;
}v4l2_enc_ppe_init_params;
/**
@@ -2318,6 +2392,19 @@ typedef struct _v4l2_ctrl_video_qp_range
__u32 MaxQpB;
} v4l2_ctrl_video_qp_range;
typedef struct _v4l2_ctrl_video_constqp
{
__u32 constQpI;
__u32 constQpP;
__u32 constQpB;
} v4l2_ctrl_video_constqp;
typedef struct _v4l2_ctrl_video_framerate
{
__u32 fps_n;
__u32 fps_d;
} v4l2_ctrl_video_framerate;
/**
* Holds the encoder init QP parameters.
*