Compare commits

..

1 Commits

Author SHA1 Message Date
svcmobrel-release
6111720f79 Updating prebuilts and/or headers
77c130ed3990efc3a31750d9380ded85e2b660d1 - v4l2_nv_extensions.h
c2b683c77f90fbca8b8c2e0b59efee45db468369 - nvbufsurface.h
83e21353d1fe20cba4bd630c3b41c1615b8268ed - nvbuf_utils.h
80b3faf4a2e03de49089b320f0cf005d9a0a54ad - gst-v4l2/gstv4l2bufferpool.c
5948d70c07e87f9b1dc403789dcbed6acfa47ad9 - gst-v4l2/gstv4l2av1enc.c
a6f39a3f80f770833a35db7bf41e2ae5de9b6ace - gst-v4l2/sei_parse.c
d89a680415f6ff5acec2571cde0fce9054d8e81f - gst-v4l2/gstv4l2vp9enc.h
4311b3fbc6e5675353491a6fab52577ed36f499d - gst-v4l2/gstv4l2.c
b724de78f364b0855abfbbaf6fda9ae51ecbfd00 - gst-v4l2/gstv4l2videoenc.c
39fcb2f599e6906ab0fd7ab9a46fef3ea58a8cab - gst-v4l2/gstv4l2vp8enc.h
71be284b547ee68fb0e2cd14b0aeb14734a915a1 - gst-v4l2/gstv4l2bufferpool.h
c773f1e03097c888a3fda59ace02ea622e101d13 - gst-v4l2/Makefile
0eabbf0521068ee26f5c620698aac456a2b1d265 - gst-v4l2/gstv4l2object.c
e8e973c103725b65232d32817e0305d12d6ff309 - gst-v4l2/gstv4l2h264enc.c
b827fd6cb1e3b8ecebd6a07f8556e846e26cba17 - gst-v4l2/gstv4l2allocator.h
02d142337f4b96fcb0c9f2405a3cbe90c5917cca - gst-v4l2/gstv4l2vp9enc.c
85ff961e6bdfb02907033709ee001bc250af8e03 - gst-v4l2/gstv4l2object.h
fbdc964b443c64094f5b3f6e2bcd29697bc27694 - gst-v4l2/gstv4l2videodec.h
c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
cbc84dccd2506afa4c8f03849c95bb28c83ef4a3 - gst-v4l2/gstv4l2av1enc.h
d29e3a719400c3cb27314366d48ec792a3c12363 - gst-v4l2/gstv4l2h265enc.h
bb104683f5e4f7402e3f765a891e149edc794e02 - gst-v4l2/gstv4l2h264enc.h
b1cd923335aa60985ff9866fba91a2068e8671c7 - gst-v4l2/LICENSE.gst-nvvideo4linux2
c08d733da85d44332a0b7b6a9183308d307d160c - gst-v4l2/gstv4l2videodec.c
a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
605f3b6fd4cc1f0e790f5ab50c9e2d87dfea9523 - gst-v4l2/gstv4l2videoenc.h
73b03969d7ae0a8adb374c93999c43af88ea93b2 - gst-v4l2/v4l2_calls.c
807bc9859585a540b0f85e98f147756aab24e1bd - gst-v4l2/gstv4l2vp8enc.c
ed77613908dddf791481ea198dfd75f988684226 - gst-v4l2/gstv4l2allocator.c
4a047575250eb3ccb6db1947ed36e9562fe000af - gst-v4l2/gstv4l2h265enc.c
499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
20c4f7c0cb89c83256650bc3353ed82154cf3a9d - gst-v4l2/gst/gst-i18n-plugin.h
e864ee6647f3572b144403d799f68152e9900da1 - gst-v4l2/gst/gettext.h
522ab8fc8531a2c758b9278d29642f5b763fd3e7 - gst-v4l2/ext/videodev2.h
a745675b051a2b8434a430c80fde3f245864ca89 - gst-v4l2/ext/v4l2-common.h
1636366b5a062e4bc1791b7bc3012ccf5635b363 - gst-v4l2/ext/v4l2-controls.h
72a34a694337f8f6da3bb94c9faced6730cbd2fc - gst-v4l2/ext/types-compat.h

Change-Id: I317d1220d8119b764a2a5dbe1d5796b1c8f726ff
2022-08-15 08:53:41 -07:00
34 changed files with 1965 additions and 7727 deletions

View File

@@ -1,45 +1,39 @@
Updating prebuilts and/or headers
44b0e909f18f7e2f457ba501fc47d80ecedd150b - nvbufsurface.h
2c5c20979e5fca5ed70b425187c3d09b39c03171 - v4l2_nv_extensions.h
d27a433ddeaefb9f42d0312c23472514b0cd6a45 - gst-nvcustomevent.h
e9519308cbf7b36481da7665e3b74d36569cc3d1 - gst-v4l2/gstv4l2.c
ba87c2bc0bea986ef461e1bc2ab3ded89700a986 - gst-v4l2/gstv4l2h264enc.c
93eaaa0797c1f1dc21c20fbad1885dc109ccffd3 - gst-v4l2/gstv4l2bufferpool.c
9ff38f38c224577c4aaadc4ac4d808429f37ca69 - gst-v4l2/gstv4l2allocator.c
3d06f0b9ae8e465e8aecd7ef101e652ff62268c4 - gst-v4l2/Makefile
02d142337f4b96fcb0c9f2405a3cbe90c5917cca - gst-v4l2/gstv4l2vp9enc.c
34adbcb7d5cf5a360d28432429b735710bfe49c5 - gst-v4l2/wsl_utils.h
afc982d855f80b1e21ce1831930a9f327c41832b - gst-v4l2/gstv4l2h265enc.c
55a2c81ab3ffd72e07fc680369683d9635a3665c - gst-v4l2/gstv4l2h265enc.h
c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
b1cd923335aa60985ff9866fba91a2068e8671c7 - gst-v4l2/LICENSE.gst-nvvideo4linux2
aa816d369be13e7cb2f6f5283c74bb00f7f1c76e - gst-v4l2/v4l2_calls.c
77c130ed3990efc3a31750d9380ded85e2b660d1 - v4l2_nv_extensions.h
c2b683c77f90fbca8b8c2e0b59efee45db468369 - nvbufsurface.h
83e21353d1fe20cba4bd630c3b41c1615b8268ed - nvbuf_utils.h
80b3faf4a2e03de49089b320f0cf005d9a0a54ad - gst-v4l2/gstv4l2bufferpool.c
5948d70c07e87f9b1dc403789dcbed6acfa47ad9 - gst-v4l2/gstv4l2av1enc.c
a6f39a3f80f770833a35db7bf41e2ae5de9b6ace - gst-v4l2/sei_parse.c
d89a680415f6ff5acec2571cde0fce9054d8e81f - gst-v4l2/gstv4l2vp9enc.h
da6c40e84b3b99e443b76c72cbb433541bdc9bcf - gst-v4l2/gstv4l2videodec.c
0d69b17838c57184dace9bfa1d30bbe8f2f83848 - gst-v4l2/gstv4l2object.h
c3ac3836a2d29d813c3c274cde82d2a59dd45a5a - gst-v4l2/gstv4l2videodec.h
4b70823ac5f9a70cce0c909e284c73aed4bccbd6 - gst-v4l2/gstv4l2h26xparser.c
4311b3fbc6e5675353491a6fab52577ed36f499d - gst-v4l2/gstv4l2.c
b724de78f364b0855abfbbaf6fda9ae51ecbfd00 - gst-v4l2/gstv4l2videoenc.c
39fcb2f599e6906ab0fd7ab9a46fef3ea58a8cab - gst-v4l2/gstv4l2vp8enc.h
08d68910b07d04e1429763ad1e6dbbeb41c5277d - gst-v4l2/gstv4l2av1enc.h
a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
870a72e5038dba9f4df37f900d53a059beee9bbc - gst-v4l2/gstv4l2h26xparser.h
fac36b61500cf8d1b5f2513d6d2319ef73aa870e - gst-v4l2/sei_parse.c
71be284b547ee68fb0e2cd14b0aeb14734a915a1 - gst-v4l2/gstv4l2bufferpool.h
c773f1e03097c888a3fda59ace02ea622e101d13 - gst-v4l2/Makefile
0eabbf0521068ee26f5c620698aac456a2b1d265 - gst-v4l2/gstv4l2object.c
e8e973c103725b65232d32817e0305d12d6ff309 - gst-v4l2/gstv4l2h264enc.c
b827fd6cb1e3b8ecebd6a07f8556e846e26cba17 - gst-v4l2/gstv4l2allocator.h
e18e54d84e643676bfc88fd559d834f26f5b4d4d - gst-v4l2/wsl_utils.c
d0af17fd51ec44b79ef54c1279b631a46cf31f49 - gst-v4l2/gstv4l2videoenc.h
4e79cf75c4fa29791e1f5141318dc8aec13a7835 - gst-v4l2/nalutils.h
add535643bbb5c58b7eb98b45496204e4d63ebb1 - gst-v4l2/gstv4l2bufferpool.h
5ecd059e5ef9be4014eface37e5e2f7598960f4e - gst-v4l2/nalutils.c
719c8569e894b0146a6e027550187df5aaf5adc1 - gst-v4l2/gstv4l2av1enc.c
02d142337f4b96fcb0c9f2405a3cbe90c5917cca - gst-v4l2/gstv4l2vp9enc.c
85ff961e6bdfb02907033709ee001bc250af8e03 - gst-v4l2/gstv4l2object.h
fbdc964b443c64094f5b3f6e2bcd29697bc27694 - gst-v4l2/gstv4l2videodec.h
c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
cbc84dccd2506afa4c8f03849c95bb28c83ef4a3 - gst-v4l2/gstv4l2av1enc.h
d29e3a719400c3cb27314366d48ec792a3c12363 - gst-v4l2/gstv4l2h265enc.h
bb104683f5e4f7402e3f765a891e149edc794e02 - gst-v4l2/gstv4l2h264enc.h
eb5134c907dd4b25097491e4273591db6ac386fc - gst-v4l2/gstv4l2videoenc.c
b1cd923335aa60985ff9866fba91a2068e8671c7 - gst-v4l2/LICENSE.gst-nvvideo4linux2
c08d733da85d44332a0b7b6a9183308d307d160c - gst-v4l2/gstv4l2videodec.c
a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
605f3b6fd4cc1f0e790f5ab50c9e2d87dfea9523 - gst-v4l2/gstv4l2videoenc.h
73b03969d7ae0a8adb374c93999c43af88ea93b2 - gst-v4l2/v4l2_calls.c
807bc9859585a540b0f85e98f147756aab24e1bd - gst-v4l2/gstv4l2vp8enc.c
9c3d135576125a6620cc8fa0b249ac73c070110b - gst-v4l2/gstv4l2object.c
ed77613908dddf791481ea198dfd75f988684226 - gst-v4l2/gstv4l2allocator.c
4a047575250eb3ccb6db1947ed36e9562fe000af - gst-v4l2/gstv4l2h265enc.c
499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
20c4f7c0cb89c83256650bc3353ed82154cf3a9d - gst-v4l2/gst/gst-i18n-plugin.h
e864ee6647f3572b144403d799f68152e9900da1 - gst-v4l2/gst/gettext.h
499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
522ab8fc8531a2c758b9278d29642f5b763fd3e7 - gst-v4l2/ext/videodev2.h
a745675b051a2b8434a430c80fde3f245864ca89 - gst-v4l2/ext/v4l2-common.h
1636366b5a062e4bc1791b7bc3012ccf5635b363 - gst-v4l2/ext/v4l2-controls.h
72a34a694337f8f6da3bb94c9faced6730cbd2fc - gst-v4l2/ext/types-compat.h
583075e89482f1faa08be7f7b278336bf7756def - gst-v4l2/ext/v4l2-controls.h
fe847595bb202501a56702a7c602f0514d23c328 - gst-v4l2/ext/v4l2-common.h
2253e5f55e37aace35af706d5662ef017f17e877 - gst-v4l2/ext/videodev2.h

View File

@@ -1,235 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* @file
* <b>NVIDIA GStreamer: Custom Events</b>
*
* @b Description: This file specifies the NVIDIA GStreamer custom
* event functions.
*
*/
/**
* @defgroup gstreamer_nvevent Events: Custom Events API
*
* Specifies GStreamer custom event functions.
*
* @ingroup gst_mess_evnt_qry
* @{
*/
#ifndef __GST_NVCUSTOMEVENT_H__
#define __GST_NVCUSTOMEVENT_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
#define FLAG(name) GST_EVENT_TYPE_##name
/** Defines supported types of custom events. */
typedef enum {
/** Specifies a custom event to indicate decoder drop frame interval update
of a particular stream. */
GST_NVEVENT_DEC_DROP_FRAME_INTERVAL_UPDATE
= GST_EVENT_MAKE_TYPE (500, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate decoder skip frame update
of a particular stream. */
GST_NVEVENT_DEC_SKIP_FRAME_UPDATE
= GST_EVENT_MAKE_TYPE (501, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to enable decoder low-latency-mode
of a particular stream. */
GST_NVEVENT_DEC_ENABLE_LOW_LATENCY_MODE
= GST_EVENT_MAKE_TYPE (502, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate encoder bitrate update
of a particular stream. */
GST_NVEVENT_ENC_BITRATE_UPDATE
= GST_EVENT_MAKE_TYPE (503, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate encoder force IDR frame
of a particular stream. */
GST_NVEVENT_ENC_FORCE_IDR
= GST_EVENT_MAKE_TYPE (504, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate encoder force Intra frame
of a particular stream. */
GST_NVEVENT_ENC_FORCE_INTRA
= GST_EVENT_MAKE_TYPE (505, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate iframe interval update
of a particular stream. */
GST_NVEVENT_ENC_IFRAME_INTERVAL_UPDATE
= GST_EVENT_MAKE_TYPE (506, FLAG(DOWNSTREAM) | FLAG(SERIALIZED))
} GstNvCustomEventType;
#undef FLAG
/**
* Creates a new "nv-dec-drop-frame-interval-update" event.
*
* @param[out] stream_id Stream ID of the stream for which decoder-drop-frame-interval is to be sent
* @param[out] interval The decoder drop-frame interval obtained corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_dec_drop_frame_interval_update (gchar* stream_id, guint interval);
/**
* Parses a "nv-dec-drop-frame-interval-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a dec-drop-frame-interval-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] interval A pointer to the parsed interval
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_dec_drop_frame_interval_update (GstEvent * event, gchar** stream_id, guint *interval);
/**
* Creates a new "nv-dec-skip-frame-update" event.
*
* @param[out] stream_id Stream ID of the stream for which decoder-skip-frame-update is to be sent
* @param[out] frame_type The decoder frame-type to be skipped obtained corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_dec_skip_frame_update (gchar* stream_id, guint frame_type);
/**
* Parses a "nv-dec-skip-frame-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a skip-frame-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] frame_type A pointer to the parsed frame_type
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_dec_skip_frame_update (GstEvent * event, gchar** stream_id, guint *frame_type);
/**
* Creates a new "nv-dec-enable-low-latency-mode" event.
*
* @param[out] stream_id Stream ID of the stream for which decoder-low-latenct-mode is to be sent
* @param[out] enable The decoder low latency mode to be enabled corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_dec_enable_low_latency_mode (gchar* stream_id, gint enable);
/**
* Parses a "nv-dec-enable-low-latency-mode" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a enable-low-latency-mode event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] enable A pointer to the parsed enable flag
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_dec_enable_low_latency_mode (GstEvent * event, gchar** stream_id, gint *enable);
/**
* Creates a new "nv-enc-bitrate-update" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-bitrate-update is to be sent
* @param[out] bitrate The encoder bitrate to be set corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_bitrate_update (gchar* stream_id, guint bitrate);
/**
* Parses a "nv-enc-bitrate-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a bitrate-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] bitrate A pointer to the parsed bitrate value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_bitrate_update (GstEvent * event, gchar** stream_id, guint *bitrate);
/**
* Creates a new "nv-enc-force-idr" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-force-idr is to be sent
* @param[out] force The encoder force IDR frame corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_force_idr (gchar* stream_id, gint force);
/**
* Parses a "nv-enc-force-idr" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a force-idr event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] force A pointer to the parsed force value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_force_idr (GstEvent * event, gchar** stream_id, gint *force);
/**
* Creates a new "nv-enc-force-intra" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-force-intra is to be sent
* @param[out] force The encoder force Intra frame corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_force_intra (gchar* stream_id, gint force);
/**
* Parses a "nv-enc-force-intra" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a force-intra event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] force A pointer to the parsed force value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_force_intra (GstEvent * event, gchar** stream_id, gint *force);
/**
* Creates a new "nv-enc-iframeinterval-update" event.
*
* @param[out] stream_id Stream ID of the stream for which encoder-iframeinterval-update is to be sent
* @param[out] interval The encoder iframeinterval to be set corresponding to stream ID for the event.
*/
GstEvent * gst_nvevent_enc_iframeinterval_update (gchar* stream_id, guint interval);
/**
* Parses a "nv-enc-iframeinterval-update" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the stream ID sends a iframeinterval-update event.
* @param[out] stream_id A pointer to the parsed stream ID for which
* the event is sent.
* @param[out] bitrate A pointer to the parsed interval value
* corresponding to stream ID for the event.
*/
void gst_nvevent_parse_enc_iframeinterval_update (GstEvent * event, gchar** stream_id, guint *interval);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@@ -1,6 +1,6 @@
###############################################################################
#
# Copyright (c) 2018-2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
@@ -14,7 +14,7 @@ SO_NAME := libgstnvvideo4linux2.so
TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -)
NVDS_VERSION:=8.0
NVDS_VERSION:=6.0
ifeq ($(TARGET_DEVICE),aarch64)
GST_INSTALL_DIR?=/usr/lib/aarch64-linux-gnu/gstreamer-1.0/
@@ -27,10 +27,10 @@ else
CFLAGS:= -DUSE_V4L2_TARGET_NV_CODECSDK=1 -DUSE_V4L2_TARGET_NV_X86=1 -DUSE_V4L2_GST_HEADER_VER_1_8
endif
LIBS:= -lnvbufsurface -lnvbufsurftransform -lgstnvdsseimeta -lgstnvcustomhelper
LIBS:= -lnvbufsurface -lnvbufsurftransform -lgstnvdsseimeta
SRCS := $(wildcard *.c)
INCLUDES += -I./ -I../ -I/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/sources/includes/
INCLUDES += -I./ -I../
PKGS := gstreamer-1.0 \
gstreamer-base-1.0 \

View File

@@ -1,6 +1,6 @@
###############################################################################
#
# Copyright (c) 2018-2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
@@ -34,4 +34,4 @@ Steps to compile the "gst-nvvideo4linux2" sources natively:
Note: For Jetson, "make install" will copy library "libgstnvvideo4linux2.so"
into "/usr/lib/aarch64-linux-gnu/gstreamer-1.0" directory. For x86 platforms,
make install will copy the library "libgstnvvideo4linux2.so" into
/opt/nvidia/deepstream/deepstream/lib/gst-plugins
/opt/nvidia/deepstream/deepstream-4.0/lib/gst-plugins

View File

@@ -1,4 +1,3 @@
/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* include/linux/v4l2-common.h
*
@@ -54,7 +53,7 @@
#ifndef __V4L2_COMMON__
#define __V4L2_COMMON__
#include <linux/types.h>
#include "ext/types-compat.h"
/*
*
@@ -79,11 +78,24 @@
/* Current composing area plus all padding pixels */
#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
/* Backward compatibility target definitions --- to be removed. */
#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
/* Selection flags */
#define V4L2_SEL_FLAG_GE (1 << 0)
#define V4L2_SEL_FLAG_LE (1 << 1)
#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
/* Backward compatibility flag definitions --- to be removed. */
#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
struct v4l2_edid {
__u32 pad;
__u32 start_block;
@@ -92,17 +104,4 @@ struct v4l2_edid {
__u8 *edid;
};
/* Backward compatibility target definitions --- to be removed. */
#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
/* Backward compatibility flag definitions --- to be removed. */
#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
#endif /* __V4L2_COMMON__ */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* Video for Linux Two header file
*
* Copyright (C) 1999-2012 the contributors
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -54,21 +54,32 @@
* Hans Verkuil <hverkuil@xs4all.nl>
* et al.
*/
#ifndef __LINUX_VIDEODEV2_H
#define __LINUX_VIDEODEV2_H
#ifndef _UAPI__LINUX_VIDEODEV2_H
#define _UAPI__LINUX_VIDEODEV2_H
#include <sys/time.h>
#include <sys/ioctl.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/v4l2-common.h>
#include <linux/v4l2-controls.h>
#include "ext/types-compat.h"
#include "ext/v4l2-common.h"
#include "ext/v4l2-controls.h"
/*
* Common stuff for both V4L1 and V4L2
* Moved from videodev.h
*/
#ifdef USE_V4L2_TARGET_NV
/*
* As video decoder base class has to queue all the decoded frames
* between IDR interval for reverse playback, buffers are increased
* to 64 to support IDR interval till 60. As per the experiments,
* (IDR interval + 4) buffers are required at decoder capture plane
* for reverse playback
*/
#define VIDEO_MAX_FRAME 64
#else
#define VIDEO_MAX_FRAME 32
#endif
#define VIDEO_MAX_PLANES 8
/*
@@ -78,7 +89,7 @@
/* Four-character-code (FOURCC) */
#define v4l2_fourcc(a, b, c, d)\
((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1U << 31))
#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31))
/*
* E N U M S
@@ -105,14 +116,14 @@ enum v4l2_field {
transmitted first */
};
#define V4L2_FIELD_HAS_TOP(field) \
((field) == V4L2_FIELD_TOP ||\
((field) == V4L2_FIELD_TOP ||\
(field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
(field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
#define V4L2_FIELD_HAS_BOTTOM(field) \
((field) == V4L2_FIELD_BOTTOM ||\
((field) == V4L2_FIELD_BOTTOM ||\
(field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
@@ -128,13 +139,6 @@ enum v4l2_field {
((field) == V4L2_FIELD_BOTTOM ||\
(field) == V4L2_FIELD_TOP ||\
(field) == V4L2_FIELD_ALTERNATE)
#define V4L2_FIELD_IS_INTERLACED(field) \
((field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT)
#define V4L2_FIELD_IS_SEQUENTIAL(field) \
((field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
@@ -150,7 +154,6 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
V4L2_BUF_TYPE_META_CAPTURE = 13,
V4L2_BUF_TYPE_META_OUTPUT = 14,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -166,10 +169,7 @@ enum v4l2_buf_type {
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT \
|| (type) == V4L2_BUF_TYPE_META_OUTPUT)
#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type))
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT)
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
@@ -219,7 +219,9 @@ enum v4l2_colorspace {
V4L2_COLORSPACE_470_SYSTEM_M = 5,
/*
* EBU Tech 3213 PAL/SECAM colorspace.
* EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
* dealing with really old PAL/SECAM recordings. Superseded by
* SMPTE 170M.
*/
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
@@ -232,8 +234,8 @@ enum v4l2_colorspace {
/* For RGB colorspaces such as produces by most webcams. */
V4L2_COLORSPACE_SRGB = 8,
/* opRGB colorspace */
V4L2_COLORSPACE_OPRGB = 9,
/* AdobeRGB colorspace */
V4L2_COLORSPACE_ADOBERGB = 9,
/* BT.2020 colorspace, used for UHDTV. */
V4L2_COLORSPACE_BT2020 = 10,
@@ -265,7 +267,7 @@ enum v4l2_xfer_func {
*
* V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
*
* V4L2_COLORSPACE_OPRGB: V4L2_XFER_FUNC_OPRGB
* V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
*
* V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
*
@@ -276,7 +278,7 @@ enum v4l2_xfer_func {
V4L2_XFER_FUNC_DEFAULT = 0,
V4L2_XFER_FUNC_709 = 1,
V4L2_XFER_FUNC_SRGB = 2,
V4L2_XFER_FUNC_OPRGB = 3,
V4L2_XFER_FUNC_ADOBERGB = 3,
V4L2_XFER_FUNC_SMPTE240M = 4,
V4L2_XFER_FUNC_NONE = 5,
V4L2_XFER_FUNC_DCI_P3 = 6,
@@ -288,7 +290,7 @@ enum v4l2_xfer_func {
* This depends on the colorspace.
*/
#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
((colsp) == V4L2_COLORSPACE_OPRGB ? V4L2_XFER_FUNC_OPRGB : \
((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
@@ -302,7 +304,7 @@ enum v4l2_ycbcr_encoding {
*
* V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
* V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
* V4L2_COLORSPACE_OPRGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
* V4L2_COLORSPACE_ADOBERGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
*
* V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
*
@@ -324,12 +326,14 @@ enum v4l2_ycbcr_encoding {
/* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
V4L2_YCBCR_ENC_XV709 = 4,
#ifndef __KERNEL__
/*
* sYCC (Y'CbCr encoding of sRGB), identical to ENC_601. It was added
* originally due to a misunderstanding of the sYCC standard. It should
* not be used, instead use V4L2_YCBCR_ENC_601.
*/
V4L2_YCBCR_ENC_SYCC = 5,
#endif
/* BT.2020 Non-constant Luminance Y'CbCr */
V4L2_YCBCR_ENC_BT2020 = 6,
@@ -367,9 +371,9 @@ enum v4l2_hsv_encoding {
enum v4l2_quantization {
/*
* The default for R'G'B' quantization is always full range.
* For Y'CbCr the quantization is always limited range, except
* for COLORSPACE_JPEG: this is full range.
* The default for R'G'B' quantization is always full range, except
* for the BT2020 colorspace. For Y'CbCr the quantization is always
* limited range, except for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -378,22 +382,14 @@ enum v4l2_quantization {
/*
* Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
* This depends on whether the image is RGB or not, the colorspace.
* The Y'CbCr encoding is not used anymore, but is still there for backwards
* compatibility.
* This depends on whether the image is RGB or not, the colorspace and the
* Y'CbCr encoding.
*/
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
(((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
/*
* Deprecated names for opRGB colorspace (IEC 61966-2-5)
*
* WARNING: Please don't use these deprecated defines in your code, as
* there is a chance we have to remove them in the future.
*/
#define V4L2_COLORSPACE_ADOBERGB V4L2_COLORSPACE_OPRGB
#define V4L2_XFER_FUNC_ADOBERGB V4L2_XFER_FUNC_OPRGB
(((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
V4L2_QUANTIZATION_LIM_RANGE : \
(((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
enum v4l2_priority {
V4L2_PRIORITY_UNSET = 0, /* not initialized */
@@ -415,11 +411,6 @@ struct v4l2_fract {
__u32 denominator;
};
struct v4l2_area {
__u32 width;
__u32 height;
};
/**
* struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
*
@@ -476,24 +467,21 @@ struct v4l2_capability {
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
#define V4L2_CAP_IO_MC 0x20000000 /* Is input/output controlled by the media controller */
#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */
/*
* V I D E O I M A G E F O R M A T
*/
struct v4l2_pix_format {
__u32 width;
__u32 width;
__u32 height;
__u32 pixelformat;
__u32 field; /* enum v4l2_field */
__u32 bytesperline; /* for padding, zero if unused */
__u32 sizeimage;
__u32 bytesperline; /* for padding, zero if unused */
__u32 sizeimage;
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
@@ -509,44 +497,26 @@ struct v4l2_pix_format {
/* Pixel format FOURCC depth Description */
/* RGB formats (1 or 2 bytes per pixel) */
/* RGB formats */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */
#define V4L2_PIX_FMT_XRGB444 v4l2_fourcc('X', 'R', '1', '2') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_RGBA444 v4l2_fourcc('R', 'A', '1', '2') /* 16 rrrrgggg bbbbaaaa */
#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */
#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
#define V4L2_PIX_FMT_RGBA555 v4l2_fourcc('R', 'A', '1', '5') /* 16 RGBA-5-5-5-1 */
#define V4L2_PIX_FMT_RGBX555 v4l2_fourcc('R', 'X', '1', '5') /* 16 RGBX-5-5-5-1 */
#define V4L2_PIX_FMT_ABGR555 v4l2_fourcc('A', 'B', '1', '5') /* 16 ABGR-1-5-5-5 */
#define V4L2_PIX_FMT_XBGR555 v4l2_fourcc('X', 'B', '1', '5') /* 16 XBGR-1-5-5-5 */
#define V4L2_PIX_FMT_BGRA555 v4l2_fourcc('B', 'A', '1', '5') /* 16 BGRA-5-5-5-1 */
#define V4L2_PIX_FMT_BGRX555 v4l2_fourcc('B', 'X', '1', '5') /* 16 BGRX-5-5-5-1 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
/* RGB formats (3 or 4 bytes per pixel) */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */
#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */
#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('R', 'A', '2', '4') /* 32 ABGR-8-8-8-8 */
#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('R', 'X', '2', '4') /* 32 XBGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4') /* 32 RGBA-8-8-8-8 */
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
@@ -556,13 +526,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -580,12 +548,9 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV24 v4l2_fourcc('Y', 'U', 'V', '3') /* 24 YUV-8-8-8 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
#define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */
#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */
#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
/* two planes -- one Y, one Cr + Cb interleaved */
@@ -595,7 +560,6 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -649,20 +613,6 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
/* 12bit raw bayer packed, 6 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C')
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('G', 'R', '1', '4') /* 14 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.. GBGB.. */
/* 14bit raw bayer packed, 7 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E')
#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E')
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
@@ -683,18 +633,12 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */
#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */
#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */
#define V4L2_PIX_FMT_MPEG2_SLICE v4l2_fourcc('M', 'G', '2', 'S') /* MPEG-2 parsed slice data */
#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */
#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -728,15 +672,6 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
@@ -757,20 +692,12 @@ struct v4l2_pix_format {
/* Meta-data formats */
#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
/* Vendor specific - used for RK_ISP1 camera sub-system */
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
/* Flags */
#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001
#define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002
/*
* F O R M A T E N U M E R A T I O N
@@ -781,20 +708,11 @@ struct v4l2_fmtdesc {
__u32 flags;
__u8 description[32]; /* Description string */
__u32 pixelformat; /* Format fourcc */
__u32 mbus_code; /* Media bus code */
__u32 reserved[3];
__u32 reserved[4];
};
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004
#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008
#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010
#define V4L2_FMT_FLAG_CSC_COLORSPACE 0x0020
#define V4L2_FMT_FLAG_CSC_XFER_FUNC 0x0040
#define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080
#define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC
#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
/* Frame Size and frame rate enumeration */
/*
@@ -923,25 +841,13 @@ struct v4l2_jpegcompression {
/*
* M E M O R Y - M A P P I N G B U F F E R S
*/
struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 capabilities;
__u32 reserved[1];
__u32 reserved[2];
};
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
/**
* struct v4l2_plane - plane info for multi-planar buffers
* @bytesused: number of bytes occupied by data in the plane (payload)
@@ -954,10 +860,8 @@ struct v4l2_requestbuffers {
* pointing to this plane
* @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
* descriptor associated with this plane
* @m: union of @mem_offset, @userptr and @fd
* @data_offset: offset in the plane to the start of data; usually 0,
* unless there is a header in front of the data
* @reserved: drivers and applications must zero this array
*
* Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
* with two planes can have one plane for Y, and another for interleaved CbCr
@@ -999,14 +903,9 @@ struct v4l2_plane {
* a userspace file descriptor associated with this buffer
* @planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
* @m: union of @offset, @userptr, @planes and @fd
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
* @reserved2: drivers and applications must zero this field
* @request_fd: fd of the request that this buffer should use
* @reserved: for backwards compatibility with applications that do not know
* about @request_fd
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
@@ -1031,24 +930,9 @@ struct v4l2_buffer {
} m;
__u32 length;
__u32 reserved2;
union {
__s32 request_fd;
__u32 reserved;
};
__u32 reserved;
};
/**
* v4l2_timeval_to_ns - Convert timeval to nanoseconds
* @tv: pointer to the timeval variable to be converted
*
* Returns the scalar nanosecond representation of the timeval
* parameter.
*/
static __inline__ __u64 v4l2_timeval_to_ns(const struct timeval *tv)
{
return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000;
}
/* Flags for 'flags' field */
/* Buffer is mapped (flag) */
#define V4L2_BUF_FLAG_MAPPED 0x00000001
@@ -1064,12 +948,8 @@ static __inline__ __u64 v4l2_timeval_to_ns(const struct timeval *tv)
#define V4L2_BUF_FLAG_BFRAME 0x00000020
/* Buffer is ready, but the data contained within is corrupted. */
#define V4L2_BUF_FLAG_ERROR 0x00000040
/* Buffer is added to an unqueued request */
#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
/* Don't return the capture buffer until OUTPUT timestamp changes */
#define V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF 0x00000200
/* Buffer is prepared for queuing */
#define V4L2_BUF_FLAG_PREPARED 0x00000400
/* Cache handling flags */
@@ -1086,8 +966,6 @@ static __inline__ __u64 v4l2_timeval_to_ns(const struct timeval *tv)
#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
/* mem2mem encoder/decoder */
#define V4L2_BUF_FLAG_LAST 0x00100000
/* request_fd is valid */
#define V4L2_BUF_FLAG_REQUEST_FD 0x00800000
/**
* struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1099,7 +977,6 @@ static __inline__ __u64 v4l2_timeval_to_ns(const struct timeval *tv)
* @flags: flags for newly created file, currently only O_CLOEXEC is
* supported, refer to manual of open syscall for more details
* @fd: file descriptor associated with DMABUF (set by driver)
* @reserved: drivers and applications must zero this array
*
* Contains data used for exporting a video buffer as DMABUF file descriptor.
* The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
@@ -1157,16 +1034,16 @@ struct v4l2_framebuffer {
struct v4l2_clip {
struct v4l2_rect c;
struct v4l2_clip *next;
struct v4l2_clip __user *next;
};
struct v4l2_window {
struct v4l2_rect w;
__u32 field; /* enum v4l2_field */
__u32 chromakey;
struct v4l2_clip *clips;
struct v4l2_clip __user *clips;
__u32 clipcount;
void *bitmap;
void __user *bitmap;
__u8 global_alpha;
};
@@ -1238,10 +1115,6 @@ struct v4l2_selection {
typedef __u64 v4l2_std_id;
/*
* Attention: Keep the V4L2_STD_* bit definitions in sync with
* include/dt-bindings/display/sdtv-standards.h SDTV_STD_* bit definitions.
*/
/* one bit for each */
#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001)
#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002)
@@ -1297,7 +1170,7 @@ typedef __u64 v4l2_std_id;
V4L2_STD_NTSC_M_JP |\
V4L2_STD_NTSC_M_KR)
/* Secam macros */
#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
V4L2_STD_SECAM_K |\
V4L2_STD_SECAM_K1)
/* All Secam Standards */
@@ -1378,7 +1251,7 @@ struct v4l2_standard {
};
/*
* D V B T T I M I N G S
* D V B T T I M I N G S
*/
/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
@@ -1516,13 +1389,6 @@ struct v4l2_bt_timings {
* InfoFrame).
*/
#define V4L2_DV_FL_HAS_HDMI_VIC (1 << 8)
/*
* CEA-861 specific: only valid for video receivers.
* If set, then HW can detect the difference between regular FPS and
* 1000/1001 FPS. Note: This flag is only valid for HDMI VIC codes with
* the V4L2_DV_FL_CAN_REDUCE_FPS flag set.
*/
#define V4L2_DV_FL_CAN_DETECT_REDUCED_FPS (1 << 9)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1531,8 +1397,7 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
((bt)->interlaced ? \
((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
(bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
@@ -1706,46 +1571,36 @@ struct v4l2_ext_control {
union {
__s32 value;
__s64 value64;
char *string;
__u8 *p_u8;
__u16 *p_u16;
__u32 *p_u32;
struct v4l2_area *p_area;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_vp8_frame *p_vp8_frame;
struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation;
void *ptr;
char __user *string;
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
void __user *ptr;
};
} __attribute__ ((packed));
struct v4l2_ext_controls {
union {
#ifndef __KERNEL__
__u32 ctrl_class;
#endif
__u32 which;
};
__u32 count;
__u32 error_idx;
__s32 request_fd;
__u32 reserved[1];
__u32 reserved[2];
struct v4l2_ext_control *controls;
};
#define V4L2_CTRL_ID_MASK (0x0fffffff)
#define V4L2_CTRL_ID_MASK (0x0fffffff)
#ifndef __KERNEL__
#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
#endif
#define V4L2_CTRL_ID2WHICH(id) ((id) & 0x0fff0000UL)
#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000)
#define V4L2_CTRL_MAX_DIMS (4)
#define V4L2_CTRL_WHICH_CUR_VAL 0
#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000
#define V4L2_CTRL_WHICH_REQUEST_VAL 0x0f010000
enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_INTEGER = 1,
@@ -1763,25 +1618,6 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
V4L2_CTRL_TYPE_AREA = 0x0106,
V4L2_CTRL_TYPE_HDR10_CLL_INFO = 0x0110,
V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 0x0111,
V4L2_CTRL_TYPE_H264_SPS = 0x0200,
V4L2_CTRL_TYPE_H264_PPS = 0x0201,
V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0202,
V4L2_CTRL_TYPE_H264_SLICE_PARAMS = 0x0203,
V4L2_CTRL_TYPE_H264_DECODE_PARAMS = 0x0204,
V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 0x0205,
V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220,
V4L2_CTRL_TYPE_VP8_FRAME = 0x0240,
V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250,
V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251,
V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1828,11 +1664,11 @@ struct v4l2_querymenu {
/* Control flags */
#define V4L2_CTRL_FLAG_DISABLED 0x0001
#define V4L2_CTRL_FLAG_GRABBED 0x0002
#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
#define V4L2_CTRL_FLAG_UPDATE 0x0008
#define V4L2_CTRL_FLAG_INACTIVE 0x0010
#define V4L2_CTRL_FLAG_SLIDER 0x0020
#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
#define V4L2_CTRL_FLAG_UPDATE 0x0008
#define V4L2_CTRL_FLAG_INACTIVE 0x0010
#define V4L2_CTRL_FLAG_SLIDER 0x0020
#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
@@ -1946,21 +1782,21 @@ struct v4l2_hw_freq_seek {
*/
struct v4l2_rds_data {
__u8 lsb;
__u8 msb;
__u8 block;
__u8 lsb;
__u8 msb;
__u8 block;
} __attribute__ ((packed));
#define V4L2_RDS_BLOCK_MSK 0x7
#define V4L2_RDS_BLOCK_A 0
#define V4L2_RDS_BLOCK_B 1
#define V4L2_RDS_BLOCK_C 2
#define V4L2_RDS_BLOCK_D 3
#define V4L2_RDS_BLOCK_C_ALT 4
#define V4L2_RDS_BLOCK_INVALID 7
#define V4L2_RDS_BLOCK_MSK 0x7
#define V4L2_RDS_BLOCK_A 0
#define V4L2_RDS_BLOCK_B 1
#define V4L2_RDS_BLOCK_C 2
#define V4L2_RDS_BLOCK_D 3
#define V4L2_RDS_BLOCK_C_ALT 4
#define V4L2_RDS_BLOCK_INVALID 7
#define V4L2_RDS_BLOCK_CORRECTED 0x40
#define V4L2_RDS_BLOCK_ERROR 0x80
#define V4L2_RDS_BLOCK_ERROR 0x80
/*
* A U D I O
@@ -2037,7 +1873,6 @@ struct v4l2_encoder_cmd {
#define V4L2_DEC_CMD_STOP (1)
#define V4L2_DEC_CMD_PAUSE (2)
#define V4L2_DEC_CMD_RESUME (3)
#define V4L2_DEC_CMD_FLUSH (4)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
@@ -2219,7 +2054,6 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
* this plane will be used
* @bytesperline: distance in bytes between the leftmost pixels in two
* adjacent lines
* @reserved: drivers and applications must zero this array
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
@@ -2238,10 +2072,8 @@ struct v4l2_plane_pix_format {
* @num_planes: number of planes for this format
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @hsv_enc: enum v4l2_hsv_encoding, HSV encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
* @reserved: drivers and applications must zero this array
*/
struct v4l2_pix_format_mplane {
__u32 width;
@@ -2266,7 +2098,6 @@ struct v4l2_pix_format_mplane {
* struct v4l2_sdr_format - SDR format definition
* @pixelformat: little endian four character code (fourcc)
* @buffersize: maximum size in bytes required for data
* @reserved: drivers and applications must zero this array
*/
struct v4l2_sdr_format {
__u32 pixelformat;
@@ -2293,8 +2124,6 @@ struct v4l2_meta_format {
* @vbi: raw VBI capture or output parameters
* @sliced: sliced VBI capture or output parameters
* @raw_data: placeholder for future extensions and custom formats
* @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr, @meta
* and @raw_data
*/
struct v4l2_format {
__u32 type;
@@ -2462,7 +2291,6 @@ struct v4l2_dbg_chip_info {
* return: number of created buffers
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2470,8 +2298,7 @@ struct v4l2_create_buffers {
__u32 count;
__u32 memory;
struct v4l2_format format;
__u32 capabilities;
__u32 reserved[7];
__u32 reserved[8];
};
/*
@@ -2479,6 +2306,7 @@ struct v4l2_create_buffers {
*
*/
#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
#define VIDIOC_RESERVED _IO('V', 1)
#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
@@ -2524,8 +2352,8 @@ struct v4l2_create_buffers {
#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop)
#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression)
#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression)
#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */
@@ -2546,8 +2374,8 @@ struct v4l2_create_buffers {
* Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
* You must be root to use these ioctls. Never use these in applications!
*/
#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
@@ -2579,4 +2407,4 @@ struct v4l2_create_buffers {
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
#endif /* __LINUX_VIDEODEV2_H */
#endif /* _UAPI__LINUX_VIDEODEV2_H */

View File

@@ -2,8 +2,7 @@
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2.c: plugin for v4l2 elements
*
@@ -292,88 +291,6 @@ GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
#else
static gboolean
gst_v4l2_has_vp8_encoder(void)
{
gboolean ret = FALSE;
int fd = -1, rval = 0;
long len = -1;
struct stat statbuf;
char info[128];
fd = open(V4L2_DEVICE_PATH_TEGRA_INFO, O_RDONLY);
if (fd < 0)
return ret;
rval = stat(V4L2_DEVICE_PATH_TEGRA_INFO, &statbuf);
if (rval < 0)
{
close(fd);
return ret;
}
if (statbuf.st_size > 8 && statbuf.st_size < 128)
{
rval = read(fd, info, statbuf.st_size);
if (rval <= 0)
{
close(fd);
return ret;
}
len = statbuf.st_size - 8;
for (int i = 0; i < len; i ++)
{
if (strncmp(&info[i], "tegra", 5) == 0)
{
if (strncmp(&info[i], "tegra186", 8) == 0 ||
strncmp(&info[i], "tegra210", 8) == 0)
ret = TRUE;
break;
}
}
}
close(fd);
return ret;
}
static gboolean
gst_v4l2_is_v4l2_nvenc_present(void)
{
gboolean ret = TRUE;
int fd = -1;
struct stat statbuf;
char info[128];
if (access(V4L2_DEVICE_INFO_SOM_EEPROM, F_OK) == 0)
{
stat(V4L2_DEVICE_INFO_SOM_EEPROM, &statbuf);
if (statbuf.st_size > 0 && statbuf.st_size < 128)
{
fd = open(V4L2_DEVICE_INFO_SOM_EEPROM, O_RDONLY);
read(fd, info, statbuf.st_size);
for (int i = 0; i <= (statbuf.st_size - 9); i++)
{
if (strncmp(&info[i], "3767", 4) == 0)
{
/*
* Jetson Orin Nano 8GB (P3767-0003) Commercial module
* Jetson Orin Nano 4GB (P3767-0004) Commercial module
* Jetson Orin Nano 8GB with SD card slot (P3767-0005) For the Developer Kit only
*/
if (strncmp(&info[i + 5], "0003", 4) == 0 ||
strncmp(&info[i + 5], "0004", 4) == 0 ||
strncmp(&info[i + 5], "0005", 4) == 0)
{
ret = FALSE;
break;
}
}
}
close(fd);
}
}
return ret;
}
static gboolean
plugin_init (GstPlugin * plugin)
{
@@ -384,18 +301,17 @@ plugin_init (GstPlugin * plugin)
GST_DEBUG_CATEGORY_INIT (v4l2_debug, "v4l2", 0, "V4L2 API calls");
#ifndef USE_V4L2_TARGET_NV_X86
int result = -1;
result = (gboolean)system("lsmod | grep 'nvgpu' > /dev/null");
if (result == 0)
int ret_val = -1;
ret_val = system("lsmod | grep 'nvgpu' > /dev/null");
if (ret_val == -1) {
return FALSE;
}
else if (ret_val == 0) {
is_cuvid = FALSE;
else
}
else {
is_cuvid = TRUE;
if (getenv("AARCH64_DGPU"))
is_cuvid = TRUE;
else if (getenv("AARCH64_IGPU"))
is_cuvid = FALSE;
}
#endif
if (is_cuvid == TRUE)
@@ -428,16 +344,7 @@ plugin_init (GstPlugin * plugin)
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
gst_v4l2_av1_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
} else {
if (!gst_v4l2_is_v4l2_nvenc_present()) {
// Orin Nano does not have HW encoders, so early return here.
return ret;
}
gst_v4l2_h264_enc_register(plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
@@ -448,40 +355,41 @@ plugin_init (GstPlugin * plugin)
V4L2_DEVICE_PATH_NVENC_ALT,
NULL,
NULL);
gst_v4l2_av1_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
NULL,
NULL);
}
if (is_cuvid == FALSE) {
if (access (V4L2_DEVICE_PATH_NVENC, F_OK) == 0) {
if (gst_v4l2_has_vp8_encoder()) {
gst_v4l2_vp8_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
}
gst_v4l2_vp8_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
gst_v4l2_vp9_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
gst_v4l2_av1_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC,
NULL,
NULL);
} else {
if (gst_v4l2_has_vp8_encoder()) {
gst_v4l2_vp8_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
NULL,
NULL);
}
gst_v4l2_vp8_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
NULL,
NULL);
gst_v4l2_vp9_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
NULL,
NULL);
gst_v4l2_av1_enc_register (plugin,
V4L2_DEVICE_BASENAME_NVENC,
V4L2_DEVICE_PATH_NVENC_ALT,
NULL,
NULL);
}
}

View File

@@ -1,9 +1,7 @@
/*
* Copyright (C) 2014 Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -1188,7 +1186,7 @@ gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
for (i = 0; i < group->n_mem; i++) {
gint dmafd;
gsize size, offset = 0, maxsize = 0;
gsize size, offset, maxsize;
if (!gst_is_dmabuf_memory (dma_mem[i]))
goto not_dmabuf;
@@ -1267,11 +1265,10 @@ gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
gsize maxsize, psize;
if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
struct v4l2_pix_format_mplane *pix = &obj->format.fmt.pix_mp;
maxsize = pix->plane_fmt[i].sizeimage;
maxsize = group->planes[i].length;
psize = size[i];
} else {
maxsize = obj->format.fmt.pix.sizeimage;
maxsize = group->planes[i].length;
psize = img_size;
}
@@ -1369,19 +1366,13 @@ gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
if (obj->is_encode) {
if ((is_cuvid == true) && (obj->sei_payload != NULL)) {
gint ret;
struct v4l2_ext_control ctls[2];
struct v4l2_ext_control ctl;
struct v4l2_ext_controls ctrls;
ctls[0].id = V4L2_CID_MPEG_VIDEOENC_DS_SEI_DATA;
ctls[0].ptr = obj->sei_payload;
ctls[0].size = obj->sei_payload_size;
ctls[1].id = V4L2_CID_MPEG_VIDEOENC_DS_SEI_UUID;
ctls[1].string = obj->sei_uuid;
ctls[1].size = 16;
ctrls.count = 2;
ctrls.controls = ctls;
ctl.id = V4L2_CID_MPEG_VIDEOENC_DS_SEI_DATA;
ctl.ptr = obj->sei_payload;
ctl.size = obj->sei_payload_size;
ctrls.count = 1;
ctrls.controls = &ctl ;
ret = obj->ioctl (obj->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
{

View File

@@ -1,6 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -57,7 +56,6 @@ enum
PROP_ENABLE_TILE_CONFIG,
PROP_DISABLE_CDF,
PROP_ENABLE_SSIMRDO,
PROP_INSERT_SEQ_HDR,
PROP_NUM_REFERENCE_FRAMES,
};
@@ -90,9 +88,6 @@ gst_v4l2_av1_enc_set_property (GObject * object,
case PROP_ENABLE_SSIMRDO:
self->EnableSsimRdo = g_value_get_boolean (value);
break;
case PROP_INSERT_SEQ_HDR:
self->insert_sps_pps = g_value_get_boolean (value);
break;
case PROP_NUM_REFERENCE_FRAMES:
self->nRefFrames = g_value_get_uint (value);
break;
@@ -120,9 +115,6 @@ gst_v4l2_av1_enc_get_property (GObject * object,
case PROP_ENABLE_SSIMRDO:
g_value_set_boolean (value, self->EnableSsimRdo);
break;
case PROP_INSERT_SEQ_HDR:
g_value_set_boolean (value, self->insert_sps_pps);
break;
case PROP_NUM_REFERENCE_FRAMES:
g_value_set_uint (value, self->nRefFrames);
break;
@@ -223,34 +215,23 @@ set_v4l2_av1_encoder_properties (GstVideoEncoder * encoder)
return FALSE;
}
if (is_cuvid == FALSE) {
if (self->EnableTileConfig) {
if (!gst_v4l2_av1_enc_tile_configuration (video_enc->v4l2output,
self->EnableTileConfig, self->Log2TileRows, self->Log2TileCols)) {
g_print ("S_EXT_CTRLS for Tile Configuration failed\n");
return FALSE;
}
}
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_AV1_DISABLE_CDF_UPDATE, self->DisableCDFUpdate)) {
g_print ("S_EXT_CTRLS for DisableCDF Update failed\n");
if (self->EnableTileConfig) {
if (!gst_v4l2_av1_enc_tile_configuration (video_enc->v4l2output,
self->EnableTileConfig, self->Log2TileRows, self->Log2TileCols)) {
g_print ("S_EXT_CTRLS for Tile Configuration failed\n");
return FALSE;
}
if (self->EnableSsimRdo) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_AV1_ENABLE_SSIMRDO, self->EnableSsimRdo)) {
g_print ("S_EXT_CTRLS for SSIM RDO failed\n");
return FALSE;
}
}
}
if (self->insert_sps_pps) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_AV1_DISABLE_CDF_UPDATE, self->DisableCDFUpdate)) {
g_print ("S_EXT_CTRLS for DisableCDF Update failed\n");
return FALSE;
}
if (self->EnableSsimRdo) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_INSERT_SPS_PPS_AT_IDR, self->insert_sps_pps)) {
V4L2_CID_MPEG_VIDEOENC_AV1_ENABLE_SSIMRDO, self->EnableSsimRdo)) {
g_print ("S_EXT_CTRLS for SSIM RDO failed\n");
return FALSE;
}
@@ -274,7 +255,6 @@ gst_v4l2_av1_enc_init (GstV4l2Av1Enc * self)
self->EnableTileConfig = FALSE;
self->DisableCDFUpdate = TRUE;
self->EnableSsimRdo = FALSE;
self->insert_sps_pps = FALSE;
self->Log2TileRows= 0;
self->Log2TileCols= 0;
}
@@ -311,8 +291,6 @@ gst_v4l2_av1_enc_class_init (GstV4l2Av1EncClass * klass)
"Enable AV1 file and frame headers, if enabled, dump elementary stream",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
#if !defined(USE_V4L2_TARGET_NV_X86) && !defined(AARCH64_IS_SBSA)
g_object_class_install_property (gobject_class, PROP_ENABLE_TILE_CONFIG,
g_param_spec_string ("tiles", "AV1 Log2 Tile Configuration",
"Use string with values of Tile Configuration"
@@ -329,12 +307,6 @@ gst_v4l2_av1_enc_class_init (GstV4l2Av1EncClass * klass)
"Enable SSIM RDO",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
#endif
g_object_class_install_property (gobject_class, PROP_INSERT_SEQ_HDR,
g_param_spec_boolean ("insert-seq-hdr", "Insert sequence header",
"Insert sequence header at every IDR frame",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_NUM_REFERENCE_FRAMES,
g_param_spec_uint ("num-Ref-Frames",
"Sets the number of reference frames for encoder",

View File

@@ -1,6 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -46,7 +45,6 @@ struct _GstV4l2Av1Enc
gboolean EnableTileConfig;
gboolean DisableCDFUpdate;
gboolean EnableSsimRdo;
gboolean insert_sps_pps;
guint32 Log2TileRows;
guint32 Log2TileCols;
guint32 nRefFrames;

View File

@@ -3,9 +3,7 @@
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* 2009 Texas Instruments, Inc - http://www.ti.com/
*
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2bufferpool.c V4L2 buffer pool class
*
@@ -37,6 +35,7 @@
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include "gst/video/video.h"
#include "gst/video/gstvideometa.h"
#include "gst/video/gstvideopool.h"
@@ -82,8 +81,6 @@ report_metadata (GstV4l2Object * obj, guint32 buffer_index,
static void
v4l2_video_dec_get_enable_frame_type_reporting (GstV4l2Object * obj,
guint32 buffer_index, v4l2_ctrl_videodec_outputbuf_metadata * dec_metadata);
static void
v4l2_video_dec_clear_poll_interrupt (GstV4l2Object * obj);
#endif
static gboolean
@@ -138,28 +135,6 @@ done:
return valid;
}
static NvBufSurfTransform_Error CopySurfTransform(NvBufSurface* src, NvBufSurface* dest)
{
NvBufSurfTransform_Error status;
NvBufSurfTransformParams transformParams;
NvBufSurfTransformRect srcRect;
NvBufSurfTransformRect destRect;
srcRect.top = srcRect.left = 0;
destRect.top = destRect.left = 0;
srcRect.width = src->surfaceList[0].width;
srcRect.height = src->surfaceList[0].height;
destRect.width = dest->surfaceList[0].width;
destRect.height = dest->surfaceList[0].height;
transformParams.src_rect = &srcRect;
transformParams.dst_rect = &destRect;
transformParams.transform_flag = NVBUFSURF_TRANSFORM_FILTER;
transformParams.transform_flip = NvBufSurfTransform_None;
transformParams.transform_filter = NvBufSurfTransformInter_Nearest;
status = NvBufSurfTransform(src, dest, &transformParams);
return status;
}
static GstFlowReturn
gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest,
GstBuffer * src)
@@ -315,11 +290,10 @@ gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest,
return GST_FLOW_ERROR;
}
if (CopySurfTransform(src_bufsurf, dst_bufsurf) != NvBufSurfTransformError_Success)
if (NvBufSurfaceCopy(src_bufsurf, dst_bufsurf) != 0)
{
GST_ERROR_OBJECT(src, "ERROR in BufSurfacecopy \n");
gst_buffer_unmap(src, &inmap);
return GST_FLOW_ERROR;
g_print("ERROR in BufSurfacecopy \n");
return GST_FLOW_ERROR;
}
gst_buffer_unmap(src, &inmap);
}
@@ -415,10 +389,16 @@ gst_v4l2_buffer_pool_import_userptr (GstV4l2BufferPool * pool,
for (i = 0; i < GST_VIDEO_FORMAT_INFO_N_PLANES (finfo); i++) {
if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
gint tinfo = GST_VIDEO_FRAME_PLANE_STRIDE (&data->frame, i);
size[i] = GST_VIDEO_TILE_X_TILES (tinfo) *
GST_VIDEO_TILE_Y_TILES (tinfo) *
GST_VIDEO_FORMAT_INFO_TILE_SIZE (finfo, i);
gint pstride;
guint pheight;
pstride = GST_VIDEO_TILE_X_TILES (tinfo) <<
GST_VIDEO_FORMAT_INFO_TILE_WS (finfo);
pheight = GST_VIDEO_TILE_Y_TILES (tinfo) <<
GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
size[i] = pstride * pheight;
} else {
size[i] = GST_VIDEO_FRAME_PLANE_STRIDE (&data->frame, i) *
GST_VIDEO_FRAME_COMP_HEIGHT (&data->frame, i);
@@ -722,20 +702,6 @@ gst_v4l2_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
for (i = 0; i < group->n_mem; i++)
gst_buffer_append_memory (newbuf, group->mem[i]);
#ifdef USE_V4L2_TARGET_NV
if (!V4L2_TYPE_IS_OUTPUT(obj->type) && is_cuvid == FALSE) {
GstMapInfo map = GST_MAP_INFO_INIT;
NvBufSurface *nvbuf_surf = NULL;
gst_buffer_map (newbuf, &map, GST_MAP_READ);
nvbuf_surf = (NvBufSurface *) map.data;
if (g_queue_find(pool->allocated_surfaces_queue, nvbuf_surf) == NULL)
{
g_queue_push_tail (pool->allocated_surfaces_queue, nvbuf_surf);
}
gst_buffer_unmap (newbuf, &map);
}
#endif
} else if (newbuf == NULL) {
goto allocation_failed;
}
@@ -765,14 +731,13 @@ gst_v4l2_buffer_pool_set_config (GstBufferPool * bpool, GstStructure * config)
GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
GstV4l2Object *obj = pool->obj;
GstCaps *caps;
guint size, min_buffers = 0, max_buffers = 0;
GstAllocator *allocator = NULL;
GstAllocationParams params = { 0, };
guint size, min_buffers, max_buffers;
GstAllocator *allocator;
GstAllocationParams params;
gboolean can_allocate = FALSE;
gboolean updated = FALSE;
gboolean ret;
gst_allocation_params_init (&params);
pool->add_videometa =
gst_buffer_pool_config_has_option (config,
GST_BUFFER_POOL_OPTION_VIDEO_META);
@@ -877,16 +842,8 @@ gst_v4l2_buffer_pool_set_config (GstBufferPool * bpool, GstStructure * config)
}
/* Always update the config to ensure the configured size matches */
if ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC) || !strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) &&
(obj->mode == GST_V4L2_IO_DMABUF_IMPORT)) {
/*For DMABUF Import, queue size should be kept to max to avoid frame drops coming from decoder*/
min_buffers = max_buffers = 24; /* NvMM Encoder has MAX Q size as 24*/
gst_buffer_pool_config_set_params(config, caps, sizeof(NvBufSurface), min_buffers,
max_buffers);
}
else
gst_buffer_pool_config_set_params (config, caps, obj->info.size, min_buffers,
max_buffers);
gst_buffer_pool_config_set_params (config, caps, obj->info.size, min_buffers,
max_buffers);
#ifdef USE_V4L2_TARGET_NV
/* Need to adjust the size to 0th plane's size since we will only output
v4l2 memory associated with 0th plane. */
@@ -1006,12 +963,10 @@ gst_v4l2_buffer_pool_streamoff (GstV4l2BufferPool * pool)
#endif
gint i;
GST_OBJECT_LOCK (pool);
if (!pool->streaming) {
GST_OBJECT_UNLOCK (pool);
if (!pool->streaming)
return;
}
GST_OBJECT_LOCK (pool);
switch (obj->mode) {
case GST_V4L2_IO_MMAP:
@@ -1073,7 +1028,7 @@ gst_v4l2_buffer_pool_start (GstBufferPool * bpool)
GstV4l2Object *obj = pool->obj;
GstStructure *config;
GstCaps *caps;
guint size = 0, min_buffers = 0, max_buffers = 0;
guint size, min_buffers, max_buffers;
guint max_latency, min_latency, copy_threshold = 0;
gboolean can_allocate = FALSE, ret = TRUE;
@@ -1254,32 +1209,6 @@ gst_v4l2_buffer_pool_stop (GstBufferPool * bpool)
GST_DEBUG_OBJECT (pool, "stopping pool");
#ifdef USE_V4L2_TARGET_NV
/*
* On EOS, video_fd becomes -1, this makes VIDIOC_REQBUFS API fail which
* internally releases hardware buffers causing memory leak.
* In below code NvBufSurfaces are destroyed explicitly.
*/
if (pool->vallocator) {
if (pool->vallocator->obj->video_fd == -1 &&
!V4L2_TYPE_IS_OUTPUT (pool->vallocator->obj->type) &&
is_cuvid == FALSE) {
while(g_queue_get_length(pool->allocated_surfaces_queue) > 0) {
int retval = 0;
NvBufSurface* allocated_surface = NULL;
allocated_surface = g_queue_pop_head (pool->allocated_surfaces_queue);
if (allocated_surface) {
retval = NvBufSurfaceDestroy(allocated_surface);
if(retval < 0) {
GST_ERROR_OBJECT (pool, "failed to destroy nvbufsurface");
return GST_V4L2_ERROR;
}
}
}
}
}
#endif
if (pool->group_released_handler > 0) {
g_signal_handler_disconnect (pool->vallocator,
pool->group_released_handler);
@@ -1333,11 +1262,6 @@ gst_v4l2_buffer_pool_flush_start (GstBufferPool * bpool)
#ifndef USE_V4L2_TARGET_NV
gst_poll_set_flushing (pool->poll, TRUE);
#else
if (is_cuvid == TRUE)
{
v4l2_video_dec_clear_poll_interrupt (pool->obj);
}
#endif
GST_OBJECT_LOCK (pool);
@@ -1347,8 +1271,6 @@ gst_v4l2_buffer_pool_flush_start (GstBufferPool * bpool)
if (pool->other_pool)
gst_buffer_pool_set_flushing (pool->other_pool, TRUE);
GST_DEBUG_OBJECT (pool, "End flushing");
}
static void
@@ -1930,9 +1852,7 @@ gst_v4l2_buffer_pool_finalize (GObject * object)
if (pool->video_fd >= 0)
pool->obj->close (pool->video_fd);
#ifdef USE_V4L2_TARGET_NV
g_queue_free(pool->allocated_surfaces_queue);
#else
#ifndef USE_V4L2_TARGET_NV
gst_poll_free (pool->poll);
#endif
@@ -1956,10 +1876,6 @@ gst_v4l2_buffer_pool_init (GstV4l2BufferPool * pool)
#endif
g_cond_init (&pool->empty_cond);
pool->empty = TRUE;
#ifdef USE_V4L2_TARGET_NV
pool->allocated_surfaces_queue = g_queue_new();
#endif
}
static void
@@ -2084,7 +2000,7 @@ gst_v4l2_do_read (GstV4l2BufferPool * pool, GstBuffer * buf)
GstFlowReturn res;
GstV4l2Object *obj = pool->obj;
gint amount;
GstMapInfo map = GST_MAP_INFO_INIT;
GstMapInfo map;
gint toread;
toread = obj->info.size;
@@ -2613,25 +2529,5 @@ v4l2_video_dec_get_enable_frame_type_reporting (GstV4l2Object * obj,
if (ret < 0)
g_print ("Error while getting report metadata\n");
}
static void
v4l2_video_dec_clear_poll_interrupt (GstV4l2Object * obj)
{
struct v4l2_ext_control control;
struct v4l2_ext_controls ctrls;
gint ret = -1;
ctrls.count = 1;
ctrls.controls = &control;
ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
control.id = V4L2_CID_MPEG_SET_POLL_INTERRUPT;
control.value = 0;
ret = obj->ioctl (obj->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret < 0)
g_print ("Error while clearing poll interrupt\n");
}
#endif

View File

@@ -3,9 +3,7 @@
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* 2009 Texas Instruments, Inc - http://www.ti.com/
*
* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2bufferpool.h V4L2 buffer pool class
*
@@ -95,7 +93,6 @@ struct _GstV4l2BufferPool
#ifdef USE_V4L2_TARGET_NV
GstBuffer *buffers[NV_VIDEO_MAX_FRAME];
GQueue *allocated_surfaces_queue;
#else
GstBuffer *buffers[VIDEO_MAX_FRAME];
#endif

View File

@@ -1,8 +1,7 @@
/*
* Copyright (C) 2014 SUMOMO Computer Association
* Author: ayaka <ayaka@soulik.info>
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -50,8 +49,6 @@ gst_v4l2_videnc_profile_get_type (void);
gboolean gst_v4l2_h264_enc_slice_header_spacing (GstV4l2Object * v4l2object,
guint32 slice_header_spacing, enum v4l2_enc_slice_length_type slice_length_type);
gboolean set_v4l2_h264_encoder_properties (GstVideoEncoder * encoder);
gboolean gst_v4l2_h264_enc_slice_intrarefresh (GstV4l2Object * v4l2object,
guint32 slice_count, guint32 slice_interval);
#endif
#ifdef USE_V4L2_TARGET_NV
@@ -118,7 +115,7 @@ enum
#define MAX_NUM_REFERENCE_FRAMES 8
#define DEFAULT_BIT_PACKETIZATION FALSE
#define DEFAULT_SLICE_HEADER_SPACING 0
#define DEFAULT_INTRA_REFRESH_FRAME_INTERVAL 0
#define DEFAULT_INTRA_REFRESH_FRAME_INTERVAL 60
#define DEFAULT_PIC_ORDER_CNT_TYPE 0
#endif
@@ -305,8 +302,6 @@ v4l2_profile_from_string (const gchar * profile)
v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH;
} else if (g_str_equal (profile, "multiview-high")) {
v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH;
} else if (g_str_equal (profile, "constrained-high")) {
v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH;
} else {
GST_WARNING ("Unsupported profile string '%s'", profile);
}
@@ -352,8 +347,6 @@ v4l2_profile_to_string (gint v4l2_profile)
return "stereo-high";
case V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH:
return "multiview-high";
case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH:
return "constrained-high";
default:
GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
break;
@@ -456,20 +449,14 @@ gst_v4l2_h264_enc_init (GstV4l2H264Enc * self)
self->profile = DEFAULT_PROFILE;
self->insert_sps_pps = FALSE;
self->insert_aud = FALSE;
self->enableLossless = FALSE;
self->nRefFrames = 1;
if (is_cuvid == TRUE)
{
self->extended_colorformat = FALSE;
self->nRefFrames = 0;
self->insert_vui = TRUE;
}
#if !defined(USE_V4L2_TARGET_NV_X86) && !defined(AARCH64_IS_SBSA)
self->insert_vui = FALSE;
#endif
self->enableLossless = FALSE;
if (is_cuvid == TRUE)
self->extended_colorformat = FALSE;
self->nBFrames = 0;
self->nRefFrames = 1;
self->bit_packetization = DEFAULT_BIT_PACKETIZATION;
self->slice_header_spacing = DEFAULT_SLICE_HEADER_SPACING;
self->poc_type = DEFAULT_PIC_ORDER_CNT_TYPE;
@@ -510,16 +497,26 @@ gst_v4l2_h264_enc_class_init (GstV4l2H264EncClass * klass)
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_INSERT_VUI,
if (is_cuvid == TRUE) {
g_object_class_install_property (gobject_class, PROP_EXTENDED_COLORFORMAT,
g_param_spec_boolean ("extended-colorformat",
"Set Extended ColorFormat",
"Set Extended ColorFormat pixel values 0 to 255 in VUI Info",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
} else if (is_cuvid == FALSE) {
g_object_class_install_property (gobject_class, PROP_PIC_ORDER_CNT_TYPE,
g_param_spec_uint ("poc-type",
"Picture Order Count type",
"Set Picture Order Count type value",
0, 2, DEFAULT_PIC_ORDER_CNT_TYPE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_INSERT_VUI,
g_param_spec_boolean ("insert-vui",
"Insert H.264 VUI",
"Insert H.264 VUI(Video Usability Information) in SPS",
#if !defined(USE_V4L2_TARGET_NV_X86) && !defined(AARCH64_IS_SBSA)
FALSE,
#else
TRUE,
#endif
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_INSERT_SPS_PPS,
g_param_spec_boolean ("insert-sps-pps",
@@ -561,43 +558,6 @@ gst_v4l2_h264_enc_class_init (GstV4l2H264EncClass * klass)
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_NUM_REFERENCE_FRAMES,
g_param_spec_uint ("num-Ref-Frames",
"Sets the number of reference frames for encoder",
"Number of Reference Frames for encoder",
0, MAX_NUM_REFERENCE_FRAMES, (is_cuvid == TRUE) ? 0 : DEFAULT_NUM_REFERENCE_FRAMES,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_ENABLE_LOSSLESS_ENC,
g_param_spec_boolean ("enable-lossless",
"Enable Lossless encoding",
"Enable lossless encoding for YUV444",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_SLICE_INTRA_REFRESH_INTERVAL,
g_param_spec_uint ("SliceIntraRefreshInterval",
"SliceIntraRefreshInterval", "Set SliceIntraRefreshInterval", 0,
G_MAXUINT, DEFAULT_INTRA_REFRESH_FRAME_INTERVAL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
if (is_cuvid == TRUE) {
g_object_class_install_property (gobject_class, PROP_EXTENDED_COLORFORMAT,
g_param_spec_boolean ("extended-colorformat",
"Set Extended ColorFormat",
"Set Extended ColorFormat pixel values 0 to 255 in VUI Info",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
} else if (is_cuvid == FALSE) {
g_object_class_install_property (gobject_class, PROP_PIC_ORDER_CNT_TYPE,
g_param_spec_uint ("poc-type",
"Picture Order Count type",
"Set Picture Order Count type value",
0, 2, DEFAULT_PIC_ORDER_CNT_TYPE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_ENABLE_MV_META,
g_param_spec_boolean ("EnableMVBufferMeta",
"Enable Motion Vector Meta data",
@@ -605,12 +565,35 @@ gst_v4l2_h264_enc_class_init (GstV4l2H264EncClass * klass)
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class,
PROP_SLICE_INTRA_REFRESH_INTERVAL,
g_param_spec_uint ("SliceIntraRefreshInterval",
"SliceIntraRefreshInterval", "Set SliceIntraRefreshInterval", 0,
G_MAXUINT, DEFAULT_INTRA_REFRESH_FRAME_INTERVAL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_TWO_PASS_CBR,
g_param_spec_boolean ("EnableTwopassCBR",
"Enable Two pass CBR",
"Enable two pass CBR while encoding",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_NUM_REFERENCE_FRAMES,
g_param_spec_uint ("num-Ref-Frames",
"Sets the number of reference frames for encoder",
"Number of Reference Frames for encoder",
0, MAX_NUM_REFERENCE_FRAMES, DEFAULT_NUM_REFERENCE_FRAMES,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_ENABLE_LOSSLESS_ENC,
g_param_spec_boolean ("enable-lossless",
"Enable Lossless encoding",
"Enable lossless encoding for YUV444",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
}
#endif
baseclass->codec_name = "H264";
@@ -646,21 +629,16 @@ gst_v4l2_h264_enc_register (GstPlugin * plugin, const gchar * basename,
static GType
gst_v4l2_videnc_profile_get_type (void)
{
static GType profile = 0;
static volatile gsize profile = 0;
static const GEnumValue profile_type[] = {
{V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
"GST_V4L2_H264_VIDENC_BASELINE_PROFILE",
"Baseline"},
{V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE,
"GST_V4L2_H264_VIDENC_CONSTRAINED_BASELINE_PROFILE",
"Constrained-Baseline"},
{V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, "GST_V4L2_H264_VIDENC_MAIN_PROFILE",
"Main"},
{V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, "GST_V4L2_H264_VIDENC_HIGH_PROFILE",
"High"},
{V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH, "GST_V4L2_H264_VIDENC_CONSTRAINED_HIGH_PROFILE",
"Constrained-High"},
{V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE, "GST_V4L2_H264_VIDENC_HIGH_444_PREDICTIVE_PROFILE",
{V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE, "GST_V4L2_H264_VIDENC_HIGH_444_PREDICTIVE",
"High444"},
{0, NULL, NULL}
};
@@ -724,41 +702,6 @@ gst_v4l2_h264_enc_slice_header_spacing (GstV4l2Object * v4l2object,
return TRUE;
}
gboolean
gst_v4l2_h264_enc_slice_intrarefresh (GstV4l2Object * v4l2object,
guint32 slice_count, guint32 slice_interval)
{
struct v4l2_ext_control control;
struct v4l2_ext_controls ctrls;
gint ret;
v4l2_ctrl_intra_refresh cuvid_param = {1, slice_interval, slice_count};
v4l2_enc_slice_intrarefresh_param param = {slice_count};
memset (&control, 0, sizeof (control));
memset (&ctrls, 0, sizeof (ctrls));
ctrls.count = 1;
ctrls.controls = &control;
ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
control.id = V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM;
if (is_cuvid)
control.string = (gchar *)&cuvid_param;
else
control.string = (gchar *)&param;
ret = v4l2object->ioctl(v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret < 0)
{
g_print("Error while setting slice intrarefresh params\n");
return FALSE;
}
return TRUE;
}
gboolean
set_v4l2_h264_encoder_properties (GstVideoEncoder * encoder)
{
@@ -788,10 +731,12 @@ set_v4l2_h264_encoder_properties (GstVideoEncoder * encoder)
}
}
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_INSERT_VUI, self->insert_vui)) {
g_print ("S_EXT_CTRLS for INSERT_VUI failed\n");
return FALSE;
if (self->insert_vui) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_INSERT_VUI, 1)) {
g_print ("S_EXT_CTRLS for INSERT_VUI failed\n");
return FALSE;
}
}
if (is_cuvid == TRUE) {
@@ -851,9 +796,9 @@ set_v4l2_h264_encoder_properties (GstVideoEncoder * encoder)
}
if (self->SliceIntraRefreshInterval) {
if (!gst_v4l2_h264_enc_slice_intrarefresh (video_enc->v4l2output,
self->SliceIntraRefreshInterval,
video_enc->idrinterval)) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM,
self->SliceIntraRefreshInterval)) {
g_print ("S_EXT_CTRLS for SLICE_INTRAREFRESH_PARAM failed\n");
return FALSE;
}

View File

@@ -1,6 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -49,13 +48,12 @@ gst_v4l2_videnc_profile_get_type (void);
#define GST_TYPE_V4L2_VID_ENC_PROFILE (gst_v4l2_videnc_profile_get_type ())
/* prototypes */
gboolean set_v4l2_h265_encoder_properties(GstVideoEncoder * encoder);
gboolean set_v4l2_h265_encoder_properties (GstVideoEncoder * encoder);
gboolean gst_v4l2_h265_enc_slice_header_spacing (GstV4l2Object * v4l2object,
guint32 slice_header_spacing, enum v4l2_enc_slice_length_type slice_length_type);
void set_h265_video_enc_property (GstV4l2Object * v4l2object, guint label,
gint param);
gboolean gst_v4l2_h265_enc_slice_intrarefresh (GstV4l2Object * v4l2object,
guint32 slice_count, guint32 slice_interval);
enum
{
PROP_0,
@@ -78,7 +76,7 @@ enum
#define DEFAULT_PROFILE V4L2_MPEG_VIDEO_H265_PROFILE_MAIN
#define DEFAULT_BIT_PACKETIZATION FALSE
#define DEFAULT_SLICE_HEADER_SPACING 0
#define DEFAULT_INTRA_REFRESH_FRAME_INTERVAL 0
#define DEFAULT_INTRA_REFRESH_FRAME_INTERVAL 60
#define DEFAULT_NUM_B_FRAMES 0
#define MAX_NUM_B_FRAMES 2
#define DEFAULT_NUM_REFERENCE_FRAMES 1
@@ -216,8 +214,6 @@ v4l2_profile_from_string (const gchar * profile)
v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10;
} else if (g_str_equal (profile, "mainstillpicture")) {
v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE;
} else if (g_str_equal (profile, "frext")) {
v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_FREXT;
} else {
GST_WARNING ("Unsupported profile string '%s'", profile);
}
@@ -234,8 +230,6 @@ v4l2_profile_to_string (gint v4l2_profile)
return "main10";
case V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE:
return "mainstillpicture";
case V4L2_MPEG_VIDEO_H265_PROFILE_FREXT:
return "frext";
default:
GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
break;
@@ -247,64 +241,10 @@ static gint
v4l2_level_from_string (const gchar * level)
{
gint v4l2_level = -1;
#ifdef USE_V4L2_TARGET_NV
if (g_str_equal(level, "main_1.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_1_0_MAIN_TIER;
} else if (g_str_equal(level, "high_1.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_1_0_HIGH_TIER;
} else if (g_str_equal(level, "main_2.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_2_0_MAIN_TIER;
} else if (g_str_equal(level, "high_2.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_2_0_HIGH_TIER;
} else if (g_str_equal(level, "main_2.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_2_1_MAIN_TIER;
} else if (g_str_equal(level, "high_2.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_2_1_HIGH_TIER;
} else if (g_str_equal(level, "main_3.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_3_0_MAIN_TIER;
} else if (g_str_equal(level, "high_3.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_3_0_HIGH_TIER;
} else if (g_str_equal(level, "main_3.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_3_1_MAIN_TIER;
} else if (g_str_equal(level, "high_3.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_3_1_HIGH_TIER;
} else if (g_str_equal(level, "main_4.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_4_0_MAIN_TIER;
} else if (g_str_equal(level, "high_4.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_4_0_HIGH_TIER;
} else if (g_str_equal(level, "main_4.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_4_1_MAIN_TIER;
} else if (g_str_equal(level, "high_4.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_4_1_HIGH_TIER;
} else if (g_str_equal(level, "main_5.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_5_0_MAIN_TIER;
} else if (g_str_equal(level, "high_5.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_5_0_HIGH_TIER;
} else if (g_str_equal(level, "main_5.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_5_1_MAIN_TIER;
} else if (g_str_equal(level, "high_5.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_5_1_HIGH_TIER;
} else if (g_str_equal(level, "main_5.2")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_5_2_MAIN_TIER;
} else if (g_str_equal(level, "high_5.2")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_5_2_HIGH_TIER;
} else if (g_str_equal(level, "main_6.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_6_0_MAIN_TIER;
} else if (g_str_equal(level, "high_6.0")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_6_0_HIGH_TIER;
} else if (g_str_equal(level, "main_6.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_6_1_MAIN_TIER;
} else if (g_str_equal(level, "high_6.1")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_6_1_HIGH_TIER;
} else if (g_str_equal(level, "main_6.2")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_6_2_MAIN_TIER;
} else if (g_str_equal(level, "high_6.2")) {
v4l2_level = V4L2_MPEG_VIDEO_H265_LEVEL_6_2_HIGH_TIER;
} else
{
GST_WARNING("Unsupported level string '%s'", level);
}
#endif
//TODO : Since videodev2 file does not list H265 profiles
//we need to add profiles inside v4l2_nv_extensions.h
//and use them here.
return v4l2_level;
}
@@ -312,68 +252,6 @@ v4l2_level_from_string (const gchar * level)
static const gchar *
v4l2_level_to_string (gint v4l2_level)
{
#ifdef USE_V4L2_TARGET_NV
switch (v4l2_level)
{
case V4L2_MPEG_VIDEO_H265_LEVEL_1_0_MAIN_TIER:
return "main_1.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_1_0_HIGH_TIER:
return "high_1.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_2_0_MAIN_TIER:
return "main_2.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_2_0_HIGH_TIER:
return "high_2.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_2_1_MAIN_TIER:
return "main_2.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_2_1_HIGH_TIER:
return "high_2.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_3_0_MAIN_TIER:
return "main_3.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_3_0_HIGH_TIER:
return "high_3.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_3_1_MAIN_TIER:
return "main_3.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_3_1_HIGH_TIER:
return "high_3.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_4_0_MAIN_TIER:
return "main_4.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_4_0_HIGH_TIER:
return "high_4.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_4_1_MAIN_TIER:
return "main_4.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_4_1_HIGH_TIER:
return "high_4.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_5_0_MAIN_TIER:
return "main_5.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_5_0_HIGH_TIER:
return "high_5.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_5_1_MAIN_TIER:
return "main_5.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_5_1_HIGH_TIER:
return "high_5.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_5_2_MAIN_TIER:
return "main_5.2";
case V4L2_MPEG_VIDEO_H265_LEVEL_5_2_HIGH_TIER:
return "high_5.2";
case V4L2_MPEG_VIDEO_H265_LEVEL_6_0_MAIN_TIER:
return "main_6.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_6_0_HIGH_TIER:
return "high_6.0";
case V4L2_MPEG_VIDEO_H265_LEVEL_6_1_MAIN_TIER:
return "main_6.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_6_1_HIGH_TIER:
return "high_6.1";
case V4L2_MPEG_VIDEO_H265_LEVEL_6_2_MAIN_TIER:
return "main_6.2";
case V4L2_MPEG_VIDEO_H265_LEVEL_6_2_HIGH_TIER:
return "high_6.2";
default:
GST_WARNING("Unsupported V4L2 level %i", v4l2_level);
break;
}
#endif
return NULL;
}
@@ -383,22 +261,13 @@ gst_v4l2_h265_enc_init (GstV4l2H265Enc * self)
self->insert_sps_pps = FALSE;
self->profile = DEFAULT_PROFILE;
self->insert_aud = FALSE;
self->insert_vui = FALSE;
self->extended_colorformat = FALSE;
self->bit_packetization = DEFAULT_BIT_PACKETIZATION;
self->slice_header_spacing = DEFAULT_SLICE_HEADER_SPACING;
self->nRefFrames = 1;
self->nBFrames = 0;
self->enableLossless = FALSE;
if (is_cuvid == TRUE)
{
self->extended_colorformat = FALSE;
self->nRefFrames = 0;
self->insert_vui = TRUE;
}
#if !defined(USE_V4L2_TARGET_NV_X86) && !defined(AARCH64_IS_SBSA)
self->insert_vui = FALSE;
#endif
}
static void
@@ -436,17 +305,24 @@ gst_v4l2_h265_enc_class_init (GstV4l2H265EncClass * klass)
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_INSERT_SPS_PPS,
g_param_spec_boolean ("insert-sps-pps",
"Insert H.265 SPS, PPS",
"Insert H.265 SPS, PPS at every IDR frame",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
if (is_cuvid == TRUE) {
g_object_class_install_property (gobject_class, PROP_EXTENDED_COLORFORMAT,
g_param_spec_boolean ("extended-colorformat",
"Set Extended ColorFormat",
"Set Extended ColorFormat pixel values 0 to 255 in VUI info",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
} else if (is_cuvid == FALSE) {
g_object_class_install_property (gobject_class, PROP_INSERT_SPS_PPS,
g_param_spec_boolean ("insert-sps-pps",
"Insert H.265 SPS, PPS",
"Insert H.265 SPS, PPS at every IDR frame",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_INSERT_VUI,
g_object_class_install_property (gobject_class, PROP_INSERT_VUI,
g_param_spec_boolean ("insert-vui",
"Insert H.265 VUI",
"Insert H.265 VUI(Video Usability Information) in SPS",
(is_cuvid == TRUE) ? TRUE : FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_INSERT_AUD,
g_param_spec_boolean ("insert-aud",
@@ -468,6 +344,28 @@ gst_v4l2_h265_enc_class_init (GstV4l2H265EncClass * klass)
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_ENABLE_MV_META,
g_param_spec_boolean ("EnableMVBufferMeta",
"Enable Motion Vector Meta data",
"Enable Motion Vector Meta data for encoding",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class,
PROP_SLICE_INTRA_REFRESH_INTERVAL,
g_param_spec_uint ("SliceIntraRefreshInterval",
"SliceIntraRefreshInterval", "Set SliceIntraRefreshInterval", 0,
G_MAXUINT, DEFAULT_INTRA_REFRESH_FRAME_INTERVAL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_TWO_PASS_CBR,
g_param_spec_boolean ("EnableTwopassCBR",
"Enable Two pass CBR",
"Enable two pass CBR while encoding",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_NUM_BFRAMES,
g_param_spec_uint ("num-B-Frames",
"B Frames between two reference frames",
@@ -480,7 +378,7 @@ gst_v4l2_h265_enc_class_init (GstV4l2H265EncClass * klass)
g_param_spec_uint ("num-Ref-Frames",
"Sets the number of reference frames for encoder",
"Number of Reference Frames for encoder",
0, MAX_NUM_REFERENCE_FRAMES, (is_cuvid == TRUE) ? 0 : DEFAULT_NUM_REFERENCE_FRAMES,
0, MAX_NUM_REFERENCE_FRAMES, DEFAULT_NUM_REFERENCE_FRAMES,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
@@ -490,34 +388,6 @@ gst_v4l2_h265_enc_class_init (GstV4l2H265EncClass * klass)
"Enable lossless encoding for YUV444",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_SLICE_INTRA_REFRESH_INTERVAL,
g_param_spec_uint ("SliceIntraRefreshInterval",
"SliceIntraRefreshInterval", "Set SliceIntraRefreshInterval", 0,
G_MAXUINT, DEFAULT_INTRA_REFRESH_FRAME_INTERVAL,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
if (is_cuvid == TRUE) {
g_object_class_install_property (gobject_class, PROP_EXTENDED_COLORFORMAT,
g_param_spec_boolean ("extended-colorformat",
"Set Extended ColorFormat",
"Set Extended ColorFormat pixel values 0 to 255 in VUI info",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
} else if (is_cuvid == FALSE) {
g_object_class_install_property (gobject_class, PROP_ENABLE_MV_META,
g_param_spec_boolean ("EnableMVBufferMeta",
"Enable Motion Vector Meta data",
"Enable Motion Vector Meta data for encoding",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
g_object_class_install_property (gobject_class, PROP_TWO_PASS_CBR,
g_param_spec_boolean ("EnableTwopassCBR",
"Enable Two pass CBR",
"Enable two pass CBR while encoding",
FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
GST_PARAM_MUTABLE_READY));
}
#endif
@@ -525,7 +395,7 @@ gst_v4l2_h265_enc_class_init (GstV4l2H265EncClass * klass)
baseclass->profile_cid = V4L2_CID_MPEG_VIDEO_H265_PROFILE;
baseclass->profile_to_string = v4l2_profile_to_string;
baseclass->profile_from_string = v4l2_profile_from_string;
baseclass->level_cid = V4L2_CID_MPEG_VIDEOENC_H265_LEVEL;
//baseclass->level_cid = V4L2_CID_MPEG_VIDEO_H265_LEVEL;
baseclass->level_to_string = v4l2_level_to_string;
baseclass->level_from_string = v4l2_level_from_string;
baseclass->set_encoder_properties = set_v4l2_h265_encoder_properties;
@@ -551,14 +421,12 @@ gst_v4l2_h265_enc_register (GstPlugin * plugin, const gchar * basename,
static GType
gst_v4l2_videnc_profile_get_type (void)
{
static GType profile = 0;
static volatile gsize profile = 0;
static const GEnumValue profile_type[] = {
{V4L2_MPEG_VIDEO_H265_PROFILE_MAIN,
"GST_V4L2_H265_VIDENC_MAIN_PROFILE", "Main"},
{V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10,
"GST_V4L2_H265_VIDENC_MAIN10_PROFILE", "Main10"},
{V4L2_MPEG_VIDEO_H265_PROFILE_FREXT,
"GST_V4L2_H265_VIDENC_FREXT_PROFILE", "FREXT"},
{0, NULL, NULL}
};
@@ -598,41 +466,6 @@ gst_v4l2_h265_enc_slice_header_spacing (GstV4l2Object * v4l2object,
return TRUE;
}
gboolean
gst_v4l2_h265_enc_slice_intrarefresh(GstV4l2Object *v4l2object,
guint32 slice_count, guint32 slice_interval)
{
struct v4l2_ext_control control;
struct v4l2_ext_controls ctrls;
gint ret;
v4l2_ctrl_intra_refresh cuvid_param = {1, slice_interval, slice_count};
v4l2_enc_slice_intrarefresh_param param = {slice_count};
memset(&control, 0, sizeof(control));
memset(&ctrls, 0, sizeof(ctrls));
ctrls.count = 1;
ctrls.controls = &control;
ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
control.id = V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM;
if (is_cuvid)
control.string = (gchar *)&cuvid_param;
else
control.string = (gchar *)&param;
ret = v4l2object->ioctl(v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret < 0)
{
g_print("Error while setting slice intrarefresh params\n");
return FALSE;
}
return TRUE;
}
gboolean
set_v4l2_h265_encoder_properties (GstVideoEncoder * encoder)
{
@@ -660,10 +493,12 @@ set_v4l2_h265_encoder_properties (GstVideoEncoder * encoder)
}
}
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_INSERT_VUI, self->insert_vui)) {
g_print ("S_EXT_CTRLS for INSERT_VUI failed\n");
return FALSE;
if (self->insert_vui) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_INSERT_VUI, 1)) {
g_print ("S_EXT_CTRLS for INSERT_VUI failed\n");
return FALSE;
}
}
if (self->extended_colorformat) {
@@ -704,9 +539,9 @@ set_v4l2_h265_encoder_properties (GstVideoEncoder * encoder)
}
if (self->SliceIntraRefreshInterval) {
if (!gst_v4l2_h265_enc_slice_intrarefresh (video_enc->v4l2output,
self->SliceIntraRefreshInterval,
video_enc->idrinterval)) {
if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM,
self->SliceIntraRefreshInterval)) {
g_print ("S_EXT_CTRLS for SLICE_INTRAREFRESH_PARAM failed\n");
return FALSE;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -43,7 +43,6 @@ struct _GstV4l2H265Enc
GstV4l2VideoEnc parent;
gboolean insert_sps_pps;
guint profile;
guint level;
guint nBFrames;
guint nRefFrames;
gboolean insert_aud;

View File

@@ -1,896 +0,0 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "nalutils.h"
#include "gstv4l2h26xparser.h"
#include <gst/base/gstbytereader.h>
#include <gst/base/gstbitreader.h>
#include <string.h>
#include <math.h>
GST_DEBUG_CATEGORY_STATIC (h26x_parser_debug);
#define GST_CAT_DEFAULT h26x_parser_debug
static gboolean initialized = FALSE;
#define INITIALIZE_DEBUG_CATEGORY \
if (!initialized) { \
GST_DEBUG_CATEGORY_INIT (h26x_parser_debug, "codecparsers_h26x", 0, \
"h26x parser library"); \
initialized = TRUE; \
}
/**** Default scaling_lists according to Table 7-2 *****/
static const guint8 default_4x4_intra[16] = {
6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32,
32, 37, 37, 42
};
static const guint8 default_4x4_inter[16] = {
10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27,
27, 30, 30, 34
};
static const guint8 default_8x8_intra[64] = {
6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18,
18, 18, 18, 23, 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27,
27, 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31, 31, 33,
33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42
};
static const guint8 default_8x8_inter[64] = {
9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19,
19, 19, 19, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27, 27, 28,
28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35
};
/***** Utils ****/
#define EXTENDED_SAR 255
static gboolean
h264_parse_nalu_header (H264NalUnit * nalu)
{
guint8 *data = nalu->data + nalu->offset;
if (nalu->size < 1)
return FALSE;
nalu->type = (data[0] & 0x1f);
nalu->ref_idc = (data[0] & 0x60) >> 5;
nalu->idr_pic_flag = (nalu->type == 5 ? 1 : 0);
nalu->header_bytes = 1;
nalu->extension_type = H264_NAL_EXTENSION_NONE;
GST_DEBUG ("Nal type %u, ref_idc %u", nalu->type, nalu->ref_idc);
return TRUE;
}
static gboolean
h264_parser_parse_scaling_list (NalReader * nr,
guint8 scaling_lists_4x4[6][16], guint8 scaling_lists_8x8[6][64],
const guint8 fallback_4x4_inter[16], const guint8 fallback_4x4_intra[16],
const guint8 fallback_8x8_inter[64], const guint8 fallback_8x8_intra[64],
guint8 n_lists)
{
guint i;
static const guint8 *default_lists[12] = {
default_4x4_intra, default_4x4_intra, default_4x4_intra,
default_4x4_inter, default_4x4_inter, default_4x4_inter,
default_8x8_intra, default_8x8_inter,
default_8x8_intra, default_8x8_inter,
default_8x8_intra, default_8x8_inter
};
GST_DEBUG ("parsing scaling lists");
for (i = 0; i < 12; i++) {
gboolean use_default = FALSE;
if (i < n_lists) {
guint8 scaling_list_present_flag;
READ_UINT8 (nr, scaling_list_present_flag, 1);
if (scaling_list_present_flag) {
guint8 *scaling_list;
guint size;
guint j;
guint8 last_scale, next_scale;
if (i < 6) {
scaling_list = scaling_lists_4x4[i];
size = 16;
} else {
scaling_list = scaling_lists_8x8[i - 6];
size = 64;
}
last_scale = 8;
next_scale = 8;
for (j = 0; j < size; j++) {
if (next_scale != 0) {
gint32 delta_scale;
READ_SE (nr, delta_scale);
next_scale = (last_scale + delta_scale) & 0xff;
}
if (j == 0 && next_scale == 0) {
/* Use default scaling lists (7.4.2.1.1.1) */
memcpy (scaling_list, default_lists[i], size);
break;
}
last_scale = scaling_list[j] =
(next_scale == 0) ? last_scale : next_scale;
}
} else
use_default = TRUE;
} else
use_default = TRUE;
if (use_default) {
switch (i) {
case 0:
memcpy (scaling_lists_4x4[0], fallback_4x4_intra, 16);
break;
case 1:
memcpy (scaling_lists_4x4[1], scaling_lists_4x4[0], 16);
break;
case 2:
memcpy (scaling_lists_4x4[2], scaling_lists_4x4[1], 16);
break;
case 3:
memcpy (scaling_lists_4x4[3], fallback_4x4_inter, 16);
break;
case 4:
memcpy (scaling_lists_4x4[4], scaling_lists_4x4[3], 16);
break;
case 5:
memcpy (scaling_lists_4x4[5], scaling_lists_4x4[4], 16);
break;
case 6:
memcpy (scaling_lists_8x8[0], fallback_8x8_intra, 64);
break;
case 7:
memcpy (scaling_lists_8x8[1], fallback_8x8_inter, 64);
break;
case 8:
memcpy (scaling_lists_8x8[2], scaling_lists_8x8[0], 64);
break;
case 9:
memcpy (scaling_lists_8x8[3], scaling_lists_8x8[1], 64);
break;
case 10:
memcpy (scaling_lists_8x8[4], scaling_lists_8x8[2], 64);
break;
case 11:
memcpy (scaling_lists_8x8[5], scaling_lists_8x8[3], 64);
break;
default:
break;
}
}
}
return TRUE;
error:
GST_WARNING ("error parsing scaling lists");
return FALSE;
}
H264NalParser *
h264_nal_parser_new (void)
{
H264NalParser *nalparser;
nalparser = g_slice_new0 (H264NalParser);
INITIALIZE_DEBUG_CATEGORY;
return nalparser;
}
void
h264_nal_parser_free (H264NalParser * nalparser)
{
guint i;
for (i = 0; i < H264_MAX_SPS_COUNT; i++)
h264_sps_clear (&nalparser->sps[i]);
g_slice_free (H264NalParser, nalparser);
nalparser = NULL;
}
H264ParserResult
h264_parser_identify_nalu_unchecked (H264NalParser * nalparser,
const guint8 * data, guint offset, gsize size, H264NalUnit * nalu)
{
gint off1;
memset (nalu, 0, sizeof (*nalu));
if (size < offset + 4) {
GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
", offset %u", size, offset);
return H264_PARSER_ERROR;
}
off1 = scan_for_start_codes (data + offset, size - offset);
if (off1 < 0) {
GST_DEBUG ("No start code prefix in this buffer");
return H264_PARSER_NO_NAL;
}
if (offset + off1 == size - 1) {
GST_DEBUG ("Missing data to identify nal unit");
return H264_PARSER_ERROR;
}
nalu->sc_offset = offset + off1;
nalu->offset = offset + off1 + 3;
nalu->data = (guint8 *) data;
nalu->size = size - nalu->offset;
if (!h264_parse_nalu_header (nalu)) {
GST_WARNING ("error parsing \"NAL unit header\"");
nalu->size = 0;
return H264_PARSER_BROKEN_DATA;
}
nalu->valid = TRUE;
/* sc might have 2 or 3 0-bytes */
if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00
&& (nalu->type == H264_NAL_SPS || nalu->type == H264_NAL_PPS
|| nalu->type == H264_NAL_AU_DELIMITER))
nalu->sc_offset--;
if (nalu->type == H264_NAL_SEQ_END ||
nalu->type == H264_NAL_STREAM_END) {
GST_DEBUG ("end-of-seq or end-of-stream nal found");
nalu->size = 1;
return H264_PARSER_OK;
}
return H264_PARSER_OK;
}
H264ParserResult
h264_parser_identify_nalu (H264NalParser * nalparser,
const guint8 * data, guint offset, gsize size, H264NalUnit * nalu)
{
H264ParserResult res;
gint off2;
res =
h264_parser_identify_nalu_unchecked (nalparser, data, offset, size,
nalu);
if (res != H264_PARSER_OK)
goto beach;
/* The two NALs are exactly 1 byte size and are placed at the end of an AU,
* there is no need to wait for the following */
if (nalu->type == H264_NAL_SEQ_END ||
nalu->type == H264_NAL_STREAM_END)
goto beach;
off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset);
if (off2 < 0) {
GST_DEBUG ("Nal start %d, No end found", nalu->offset);
return H264_PARSER_NO_NAL_END;
}
/* Mini performance improvement:
* We could have a way to store how many 0s were skipped to avoid
* parsing them again on the next NAL */
while (off2 > 0 && data[nalu->offset + off2 - 1] == 00)
off2--;
nalu->size = off2;
if (nalu->size < 2)
return H264_PARSER_BROKEN_DATA;
GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size);
beach:
return res;
}
H264ParserResult
h264_parser_parse_sps (H264NalUnit * nalu,
H264SPS * sps, gboolean parse_vui_params)
{
H264ParserResult res = h264_parse_sps (nalu, sps, parse_vui_params);
return res;
}
/* Parse seq_parameter_set_data() */
static gboolean
h264_parse_sps_data (NalReader * nr, H264SPS * sps,
gboolean parse_vui_params)
{
gint width, height;
guint subwc[] = { 1, 2, 2, 1 };
guint subhc[] = { 1, 2, 1, 1 };
memset (sps, 0, sizeof (*sps));
/* set default values for fields that might not be present in the bitstream
and have valid defaults */
sps->extension_type = H264_NAL_EXTENSION_NONE;
sps->chroma_format_idc = 1;
memset (sps->scaling_lists_4x4, 16, 96);
memset (sps->scaling_lists_8x8, 16, 384);
READ_UINT8 (nr, sps->profile_idc, 8);
READ_UINT8 (nr, sps->constraint_set0_flag, 1);
READ_UINT8 (nr, sps->constraint_set1_flag, 1);
READ_UINT8 (nr, sps->constraint_set2_flag, 1);
READ_UINT8 (nr, sps->constraint_set3_flag, 1);
READ_UINT8 (nr, sps->constraint_set4_flag, 1);
READ_UINT8 (nr, sps->constraint_set5_flag, 1);
/* skip reserved_zero_2bits */
if (!_skip (nr, 2))
goto error;
READ_UINT8 (nr, sps->level_idc, 8);
READ_UE_MAX (nr, sps->id, H264_MAX_SPS_COUNT - 1);
if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
sps->profile_idc == 122 || sps->profile_idc == 244 ||
sps->profile_idc == 44 || sps->profile_idc == 83 ||
sps->profile_idc == 86 || sps->profile_idc == 118 ||
sps->profile_idc == 128) {
READ_UE_MAX (nr, sps->chroma_format_idc, 3);
if (sps->chroma_format_idc == 3)
READ_UINT8 (nr, sps->separate_colour_plane_flag, 1);
READ_UE_MAX (nr, sps->bit_depth_luma_minus8, 6);
READ_UE_MAX (nr, sps->bit_depth_chroma_minus8, 6);
READ_UINT8 (nr, sps->qpprime_y_zero_transform_bypass_flag, 1);
READ_UINT8 (nr, sps->scaling_matrix_present_flag, 1);
if (sps->scaling_matrix_present_flag) {
guint8 n_lists;
n_lists = (sps->chroma_format_idc != 3) ? 8 : 12;
if (!h264_parser_parse_scaling_list (nr,
sps->scaling_lists_4x4, sps->scaling_lists_8x8,
default_4x4_inter, default_4x4_intra,
default_8x8_inter, default_8x8_intra, n_lists))
goto error;
}
}
READ_UE_MAX (nr, sps->log2_max_frame_num_minus4, 12);
sps->max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
READ_UE_MAX (nr, sps->pic_order_cnt_type, 2);
if (sps->pic_order_cnt_type == 0) {
READ_UE_MAX (nr, sps->log2_max_pic_order_cnt_lsb_minus4, 12);
} else if (sps->pic_order_cnt_type == 1) {
guint i;
READ_UINT8 (nr, sps->delta_pic_order_always_zero_flag, 1);
READ_SE (nr, sps->offset_for_non_ref_pic);
READ_SE (nr, sps->offset_for_top_to_bottom_field);
READ_UE_MAX (nr, sps->num_ref_frames_in_pic_order_cnt_cycle, 255);
for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++)
READ_SE (nr, sps->offset_for_ref_frame[i]);
}
READ_UE (nr, sps->num_ref_frames);
READ_UINT8 (nr, sps->gaps_in_frame_num_value_allowed_flag, 1);
READ_UE (nr, sps->pic_width_in_mbs_minus1);
READ_UE (nr, sps->pic_height_in_map_units_minus1);
READ_UINT8 (nr, sps->frame_mbs_only_flag, 1);
if (!sps->frame_mbs_only_flag)
READ_UINT8 (nr, sps->mb_adaptive_frame_field_flag, 1);
READ_UINT8 (nr, sps->direct_8x8_inference_flag, 1);
READ_UINT8 (nr, sps->frame_cropping_flag, 1);
if (sps->frame_cropping_flag) {
READ_UE (nr, sps->frame_crop_left_offset);
READ_UE (nr, sps->frame_crop_right_offset);
READ_UE (nr, sps->frame_crop_top_offset);
READ_UE (nr, sps->frame_crop_bottom_offset);
}
/* calculate ChromaArrayType */
if (!sps->separate_colour_plane_flag)
sps->chroma_array_type = sps->chroma_format_idc;
/* Calculate width and height */
width = (sps->pic_width_in_mbs_minus1 + 1);
width *= 16;
height = (sps->pic_height_in_map_units_minus1 + 1);
height *= 16 * (2 - sps->frame_mbs_only_flag);
GST_LOG ("initial width=%d, height=%d", width, height);
if (width < 0 || height < 0) {
GST_WARNING ("invalid width/height in SPS");
goto error;
}
sps->width = width;
sps->height = height;
if (sps->frame_cropping_flag) {
const guint crop_unit_x = subwc[sps->chroma_format_idc];
const guint crop_unit_y =
subhc[sps->chroma_format_idc] * (2 - sps->frame_mbs_only_flag);
width -= (sps->frame_crop_left_offset + sps->frame_crop_right_offset)
* crop_unit_x;
height -= (sps->frame_crop_top_offset + sps->frame_crop_bottom_offset)
* crop_unit_y;
sps->crop_rect_width = width;
sps->crop_rect_height = height;
sps->crop_rect_x = sps->frame_crop_left_offset * crop_unit_x;
sps->crop_rect_y = sps->frame_crop_top_offset * crop_unit_y;
GST_LOG ("crop_rectangle x=%u y=%u width=%u, height=%u", sps->crop_rect_x,
sps->crop_rect_y, width, height);
}
sps->fps_num_removed = 0;
sps->fps_den_removed = 1;
return TRUE;
error:
return FALSE;
}
H264ParserResult
h264_parse_sps (H264NalUnit * nalu, H264SPS * sps,
gboolean parse_vui_params)
{
NalReader nr;
INITIALIZE_DEBUG_CATEGORY;
GST_DEBUG ("parsing SPS");
init_nal (&nr, nalu->data + nalu->offset + nalu->header_bytes,
nalu->size - nalu->header_bytes);
if (!h264_parse_sps_data (&nr, sps, parse_vui_params))
goto error;
sps->valid = TRUE;
return H264_PARSER_OK;
error:
GST_WARNING ("error parsing \"Sequence parameter set\"");
sps->valid = FALSE;
return H264_PARSER_ERROR;
}
void
h264_sps_clear (H264SPS * sps)
{
g_return_if_fail (sps != NULL);
}
/************************** H265 *****************************/
static gboolean
h265_parse_nalu_header (H265NalUnit * nalu)
{
guint8 *data = nalu->data + nalu->offset;
GstBitReader br = {0};
if (nalu->size < 2)
return FALSE;
gst_bit_reader_init (&br, data, nalu->size - nalu->offset);
/* skip the forbidden_zero_bit */
gst_bit_reader_skip_unchecked (&br, 1);
nalu->type = gst_bit_reader_get_bits_uint8_unchecked (&br, 6);
nalu->layer_id = gst_bit_reader_get_bits_uint8_unchecked (&br, 6);
nalu->temporal_id_plus1 = gst_bit_reader_get_bits_uint8_unchecked (&br, 3);
nalu->header_bytes = 2;
return TRUE;
}
/****** Parsing functions *****/
static gboolean
h265_parse_profile_tier_level (H265ProfileTierLevel * ptl,
NalReader * nr, guint8 maxNumSubLayersMinus1)
{
guint i, j;
GST_DEBUG ("parsing \"ProfileTierLevel parameters\"");
READ_UINT8 (nr, ptl->profile_space, 2);
READ_UINT8 (nr, ptl->tier_flag, 1);
READ_UINT8 (nr, ptl->profile_idc, 5);
for (j = 0; j < 32; j++)
READ_UINT8 (nr, ptl->profile_compatibility_flag[j], 1);
READ_UINT8 (nr, ptl->progressive_source_flag, 1);
READ_UINT8 (nr, ptl->interlaced_source_flag, 1);
READ_UINT8 (nr, ptl->non_packed_constraint_flag, 1);
READ_UINT8 (nr, ptl->frame_only_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_12bit_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_10bit_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_8bit_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_422chroma_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_420chroma_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_monochrome_constraint_flag, 1);
READ_UINT8 (nr, ptl->intra_constraint_flag, 1);
READ_UINT8 (nr, ptl->one_picture_only_constraint_flag, 1);
READ_UINT8 (nr, ptl->lower_bit_rate_constraint_flag, 1);
READ_UINT8 (nr, ptl->max_14bit_constraint_flag, 1);
/* skip the reserved zero bits */
if (!_skip (nr, 34))
goto error;
READ_UINT8 (nr, ptl->level_idc, 8);
for (j = 0; j < maxNumSubLayersMinus1; j++) {
READ_UINT8 (nr, ptl->sub_layer_profile_present_flag[j], 1);
READ_UINT8 (nr, ptl->sub_layer_level_present_flag[j], 1);
}
if (maxNumSubLayersMinus1 > 0) {
for (i = maxNumSubLayersMinus1; i < 8; i++)
if (!_skip (nr, 2))
goto error;
}
for (i = 0; i < maxNumSubLayersMinus1; i++) {
if (ptl->sub_layer_profile_present_flag[i]) {
READ_UINT8 (nr, ptl->sub_layer_profile_space[i], 2);
READ_UINT8 (nr, ptl->sub_layer_tier_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_profile_idc[i], 5);
for (j = 0; j < 32; j++)
READ_UINT8 (nr, ptl->sub_layer_profile_compatibility_flag[i][j], 1);
READ_UINT8 (nr, ptl->sub_layer_progressive_source_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_interlaced_source_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_non_packed_constraint_flag[i], 1);
READ_UINT8 (nr, ptl->sub_layer_frame_only_constraint_flag[i], 1);
if (!_skip (nr, 44))
goto error;
}
if (ptl->sub_layer_level_present_flag[i])
READ_UINT8 (nr, ptl->sub_layer_level_idc[i], 8);
}
return TRUE;
error:
GST_WARNING ("error parsing \"ProfileTierLevel Parameters\"");
return FALSE;
}
H265Parser *
h265_parser_new (void)
{
H265Parser *parser;
parser = g_slice_new0 (H265Parser);
INITIALIZE_DEBUG_CATEGORY;
return parser;
}
void
h265_parser_free (H265Parser * parser)
{
g_slice_free (H265Parser, parser);
parser = NULL;
}
H265ParserResult
h265_parser_identify_nalu_unchecked (H265Parser * parser,
const guint8 * data, guint offset, gsize size, H265NalUnit * nalu)
{
gint off1;
memset (nalu, 0, sizeof (*nalu));
if (size < offset + 4) {
GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
", offset %u", size, offset);
return H265_PARSER_ERROR;
}
off1 = scan_for_start_codes (data + offset, size - offset);
if (off1 < 0) {
GST_DEBUG ("No start code prefix in this buffer");
return H265_PARSER_NO_NAL;
}
if (offset + off1 == size - 1) {
GST_DEBUG ("Missing data to identify nal unit");
return H265_PARSER_ERROR;
}
nalu->sc_offset = offset + off1;
/* sc might have 2 or 3 0-bytes */
if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00)
nalu->sc_offset--;
nalu->offset = offset + off1 + 3;
nalu->data = (guint8 *) data;
nalu->size = size - nalu->offset;
if (!h265_parse_nalu_header (nalu)) {
GST_WARNING ("error parsing \"NAL unit header\"");
nalu->size = 0;
return H265_PARSER_BROKEN_DATA;
}
nalu->valid = TRUE;
if (nalu->type == H265_NAL_EOS || nalu->type == H265_NAL_EOB) {
GST_DEBUG ("end-of-seq or end-of-stream nal found");
nalu->size = 2;
return H265_PARSER_OK;
}
return H265_PARSER_OK;
}
H265ParserResult
h265_parser_identify_nalu (H265Parser * parser,
const guint8 * data, guint offset, gsize size, H265NalUnit * nalu)
{
H265ParserResult res;
gint off2;
res =
h265_parser_identify_nalu_unchecked (parser, data, offset, size,
nalu);
if (res != H265_PARSER_OK)
goto beach;
/* The two NALs are exactly 2 bytes size and are placed at the end of an AU,
* there is no need to wait for the following */
if (nalu->type == H265_NAL_EOS || nalu->type == H265_NAL_EOB)
goto beach;
off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset);
if (off2 < 0) {
GST_DEBUG ("Nal start %d, No end found", nalu->offset);
return H265_PARSER_NO_NAL_END;
}
/* Mini performance improvement:
* We could have a way to store how many 0s were skipped to avoid
* parsing them again on the next NAL */
while (off2 > 0 && data[nalu->offset + off2 - 1] == 00)
off2--;
nalu->size = off2;
if (nalu->size < 3)
return H265_PARSER_BROKEN_DATA;
GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size);
beach:
return res;
}
H265ParserResult
h265_parser_identify_nalu_hevc (H265Parser * parser,
const guint8 * data, guint offset, gsize size, guint8 nal_length_size,
H265NalUnit * nalu)
{
GstBitReader br = {0};
memset (nalu, 0, sizeof (*nalu));
if (size < offset + nal_length_size) {
GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
", offset %u", size, offset);
return H265_PARSER_ERROR;
}
size = size - offset;
gst_bit_reader_init (&br, data + offset, size);
nalu->size = gst_bit_reader_get_bits_uint32_unchecked (&br,
nal_length_size * 8);
nalu->sc_offset = offset;
nalu->offset = offset + nal_length_size;
if (size < nalu->size + nal_length_size) {
nalu->size = 0;
return H265_PARSER_NO_NAL_END;
}
nalu->data = (guint8 *) data;
if (!h265_parse_nalu_header (nalu)) {
GST_WARNING ("error parsing \"NAL unit header\"");
nalu->size = 0;
return H265_PARSER_BROKEN_DATA;
}
if (nalu->size < 2)
return H265_PARSER_BROKEN_DATA;
nalu->valid = TRUE;
return H265_PARSER_OK;
}
H265ParserResult
h265_parser_parse_sps (H265Parser * parser, H265NalUnit * nalu,
H265SPS * sps, gboolean parse_vui_params)
{
H265ParserResult res =
h265_parse_sps (parser, nalu, sps, parse_vui_params);
return res;
}
H265ParserResult
h265_parse_sps (H265Parser * parser, H265NalUnit * nalu,
H265SPS * sps, gboolean parse_vui_params)
{
NalReader nr;
guint8 vps_id;
guint i;
guint subwc[] = { 1, 2, 2, 1, 1 };
guint subhc[] = { 1, 2, 1, 1, 1 };
INITIALIZE_DEBUG_CATEGORY;
GST_DEBUG ("parsing SPS");
init_nal (&nr, nalu->data + nalu->offset + nalu->header_bytes,
nalu->size - nalu->header_bytes);
memset (sps, 0, sizeof (*sps));
READ_UINT8 (&nr, vps_id, 4);
READ_UINT8 (&nr, sps->max_sub_layers_minus1, 3);
READ_UINT8 (&nr, sps->temporal_id_nesting_flag, 1);
if (!h265_parse_profile_tier_level (&sps->profile_tier_level, &nr,
sps->max_sub_layers_minus1))
goto error;
READ_UE_MAX (&nr, sps->id, H265_MAX_SPS_COUNT - 1);
READ_UE_MAX (&nr, sps->chroma_format_idc, 3);
if (sps->chroma_format_idc == 3)
READ_UINT8 (&nr, sps->separate_colour_plane_flag, 1);
READ_UE_ALLOWED (&nr, sps->pic_width_in_luma_samples, 1, 16888);
READ_UE_ALLOWED (&nr, sps->pic_height_in_luma_samples, 1, 16888);
READ_UINT8 (&nr, sps->conformance_window_flag, 1);
if (sps->conformance_window_flag) {
READ_UE (&nr, sps->conf_win_left_offset);
READ_UE (&nr, sps->conf_win_right_offset);
READ_UE (&nr, sps->conf_win_top_offset);
READ_UE (&nr, sps->conf_win_bottom_offset);
}
READ_UE_MAX (&nr, sps->bit_depth_luma_minus8, 6);
READ_UE_MAX (&nr, sps->bit_depth_chroma_minus8, 6);
READ_UE_MAX (&nr, sps->log2_max_pic_order_cnt_lsb_minus4, 12);
READ_UINT8 (&nr, sps->sub_layer_ordering_info_present_flag, 1);
for (i =
(sps->sub_layer_ordering_info_present_flag ? 0 :
sps->max_sub_layers_minus1); i <= sps->max_sub_layers_minus1; i++) {
READ_UE_MAX (&nr, sps->max_dec_pic_buffering_minus1[i], 16);
READ_UE_MAX (&nr, sps->max_num_reorder_pics[i],
sps->max_dec_pic_buffering_minus1[i]);
READ_UE_MAX (&nr, sps->max_latency_increase_plus1[i], G_MAXUINT32 - 1);
}
/* setting default values if sps->sub_layer_ordering_info_present_flag is zero */
if (!sps->sub_layer_ordering_info_present_flag && sps->max_sub_layers_minus1) {
for (i = 0; i <= (guint)(sps->max_sub_layers_minus1 - 1); i++) {
sps->max_dec_pic_buffering_minus1[i] =
sps->max_dec_pic_buffering_minus1[sps->max_sub_layers_minus1];
sps->max_num_reorder_pics[i] =
sps->max_num_reorder_pics[sps->max_sub_layers_minus1];
sps->max_latency_increase_plus1[i] =
sps->max_latency_increase_plus1[sps->max_sub_layers_minus1];
}
}
/* The limits are calculted based on the profile_tier_level constraint
* in Annex-A: CtbLog2SizeY = 4 to 6 */
READ_UE_MAX (&nr, sps->log2_min_luma_coding_block_size_minus3, 3);
READ_UE_MAX (&nr, sps->log2_diff_max_min_luma_coding_block_size, 6);
READ_UE_MAX (&nr, sps->log2_min_transform_block_size_minus2, 3);
READ_UE_MAX (&nr, sps->log2_diff_max_min_transform_block_size, 3);
READ_UE_MAX (&nr, sps->max_transform_hierarchy_depth_inter, 4);
READ_UE_MAX (&nr, sps->max_transform_hierarchy_depth_intra, 4);
/* Calculate width and height */
sps->width = sps->pic_width_in_luma_samples;
sps->height = sps->pic_height_in_luma_samples;
if (sps->width < 0 || sps->height < 0) {
GST_WARNING ("invalid width/height in SPS");
goto error;
}
if (sps->conformance_window_flag) {
const guint crop_unit_x = subwc[sps->chroma_format_idc];
const guint crop_unit_y = subhc[sps->chroma_format_idc];
sps->crop_rect_width = sps->width -
(sps->conf_win_left_offset + sps->conf_win_right_offset) * crop_unit_x;
sps->crop_rect_height = sps->height -
(sps->conf_win_top_offset + sps->conf_win_bottom_offset) * crop_unit_y;
sps->crop_rect_x = sps->conf_win_left_offset * crop_unit_x;
sps->crop_rect_y = sps->conf_win_top_offset * crop_unit_y;
GST_LOG ("crop_rectangle x=%u y=%u width=%u, height=%u", sps->crop_rect_x,
sps->crop_rect_y, sps->crop_rect_width, sps->crop_rect_height);
}
sps->fps_num = 0;
sps->fps_den = 1;
sps->valid = TRUE;
return H265_PARSER_OK;
error:
GST_WARNING ("error parsing \"Sequence parameter set\"");
sps->valid = FALSE;
return H265_PARSER_ERROR;
}

View File

@@ -1,462 +0,0 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifndef __H26X_PARSER_H__
#define __H26X_PARSER_H__
#include <gst/gst.h>
G_BEGIN_DECLS
#define H264_MAX_SPS_COUNT 32
typedef enum
{
H264_NAL_UNKNOWN = 0,
H264_NAL_SLICE = 1,
H264_NAL_SLICE_DPA = 2,
H264_NAL_SLICE_DPB = 3,
H264_NAL_SLICE_DPC = 4,
H264_NAL_SLICE_IDR = 5,
H264_NAL_SEI = 6,
H264_NAL_SPS = 7,
H264_NAL_PPS = 8,
H264_NAL_AU_DELIMITER = 9,
H264_NAL_SEQ_END = 10,
H264_NAL_STREAM_END = 11,
H264_NAL_FILLER_DATA = 12,
H264_NAL_SPS_EXT = 13,
H264_NAL_PREFIX_UNIT = 14,
H264_NAL_SUBSET_SPS = 15,
H264_NAL_DEPTH_SPS = 16,
H264_NAL_SLICE_AUX = 19,
H264_NAL_SLICE_EXT = 20,
H264_NAL_SLICE_DEPTH = 21
} H264NalUnitType;
typedef enum
{
H264_NAL_EXTENSION_NONE = 0,
H264_NAL_EXTENSION_SVC,
H264_NAL_EXTENSION_MVC,
} H264NalUnitExtensionType;
typedef enum
{
H264_PARSER_OK,
H264_PARSER_BROKEN_DATA,
H264_PARSER_BROKEN_LINK,
H264_PARSER_ERROR,
H264_PARSER_NO_NAL,
H264_PARSER_NO_NAL_END
} H264ParserResult;
typedef enum
{
H264_FRAME_PACKING_NONE = 6,
H264_FRAME_PACKING_CHECKERBOARD_INTERLEAVING = 0,
H264_FRAME_PACKING_COLUMN_INTERLEAVING = 1,
H264_FRAME_PACKING_ROW_INTERLEAVING = 2,
H264_FRAME_PACKING_SIDE_BY_SIDE = 3,
H264_FRMAE_PACKING_TOP_BOTTOM = 4,
H264_FRAME_PACKING_TEMPORAL_INTERLEAVING = 5
} H264FramePackingType;
typedef enum
{
H264_P_SLICE = 0,
H264_B_SLICE = 1,
H264_I_SLICE = 2,
H264_SP_SLICE = 3,
H264_SI_SLICE = 4,
H264_S_P_SLICE = 5,
H264_S_B_SLICE = 6,
H264_S_I_SLICE = 7,
H264_S_SP_SLICE = 8,
H264_S_SI_SLICE = 9
} H264SliceType;
typedef enum
{
H264_CT_TYPE_PROGRESSIVE = 0,
H264_CT_TYPE_INTERLACED = 1,
H264_CT_TYPE_UNKNOWN = 2,
} CtType;
typedef struct _H264NalParser H264NalParser;
typedef struct _H264NalUnit H264NalUnit;
typedef struct _H264SPS H264SPS;
struct _H264NalUnit
{
guint16 ref_idc;
guint16 type;
/* calculated values */
guint8 idr_pic_flag;
guint size;
guint offset;
guint sc_offset;
gboolean valid;
guint8 *data;
guint8 header_bytes;
guint8 extension_type;
};
struct _H264SPS
{
gint id;
guint8 profile_idc;
guint8 constraint_set0_flag;
guint8 constraint_set1_flag;
guint8 constraint_set2_flag;
guint8 constraint_set3_flag;
guint8 constraint_set4_flag;
guint8 constraint_set5_flag;
guint8 level_idc;
guint8 chroma_format_idc;
guint8 separate_colour_plane_flag;
guint8 bit_depth_luma_minus8;
guint8 bit_depth_chroma_minus8;
guint8 qpprime_y_zero_transform_bypass_flag;
guint8 scaling_matrix_present_flag;
guint8 scaling_lists_4x4[6][16];
guint8 scaling_lists_8x8[6][64];
guint8 log2_max_frame_num_minus4;
guint8 pic_order_cnt_type;
/* if pic_order_cnt_type == 0 */
guint8 log2_max_pic_order_cnt_lsb_minus4;
/* else if pic_order_cnt_type == 1 */
guint8 delta_pic_order_always_zero_flag;
gint32 offset_for_non_ref_pic;
gint32 offset_for_top_to_bottom_field;
guint8 num_ref_frames_in_pic_order_cnt_cycle;
gint32 offset_for_ref_frame[255];
guint32 num_ref_frames;
guint8 gaps_in_frame_num_value_allowed_flag;
guint32 pic_width_in_mbs_minus1;
guint32 pic_height_in_map_units_minus1;
guint8 frame_mbs_only_flag;
guint8 mb_adaptive_frame_field_flag;
guint8 direct_8x8_inference_flag;
guint8 frame_cropping_flag;
/* if frame_cropping_flag */
guint32 frame_crop_left_offset;
guint32 frame_crop_right_offset;
guint32 frame_crop_top_offset;
guint32 frame_crop_bottom_offset;
guint8 vui_parameters_present_flag;
/* calculated values */
guint8 chroma_array_type;
guint32 max_frame_num;
gint width, height;
gint crop_rect_width, crop_rect_height;
gint crop_rect_x, crop_rect_y;
gint fps_num_removed, fps_den_removed; /* FIXME: remove */
gboolean valid;
/* Subset SPS extensions */
guint8 extension_type;
};
struct _H264NalParser
{
/*< private >*/
H264SPS sps[H264_MAX_SPS_COUNT];
H264SPS *last_sps;
};
H264NalParser *h264_nal_parser_new (void);
H264ParserResult h264_parser_identify_nalu (H264NalParser *nalparser,
const guint8 *data, guint offset,
gsize size, H264NalUnit *nalu);
H264ParserResult h264_parser_identify_nalu_unchecked (H264NalParser *nalparser,
const guint8 *data, guint offset,
gsize size, H264NalUnit *nalu);
H264ParserResult h264_parser_parse_sps (H264NalUnit *nalu,
H264SPS *sps, gboolean parse_vui_params);
void h264_nal_parser_free (H264NalParser *nalparser);
H264ParserResult h264_parse_sps (H264NalUnit *nalu,
H264SPS *sps, gboolean parse_vui_params);
void h264_sps_clear (H264SPS *sps);
#define H265_MAX_SUB_LAYERS 8
#define H265_MAX_SPS_COUNT 16
typedef enum
{
H265_NAL_SLICE_TRAIL_N = 0,
H265_NAL_SLICE_TRAIL_R = 1,
H265_NAL_SLICE_TSA_N = 2,
H265_NAL_SLICE_TSA_R = 3,
H265_NAL_SLICE_STSA_N = 4,
H265_NAL_SLICE_STSA_R = 5,
H265_NAL_SLICE_RADL_N = 6,
H265_NAL_SLICE_RADL_R = 7,
H265_NAL_SLICE_RASL_N = 8,
H265_NAL_SLICE_RASL_R = 9,
H265_NAL_SLICE_BLA_W_LP = 16,
H265_NAL_SLICE_BLA_W_RADL = 17,
H265_NAL_SLICE_BLA_N_LP = 18,
H265_NAL_SLICE_IDR_W_RADL = 19,
H265_NAL_SLICE_IDR_N_LP = 20,
H265_NAL_SLICE_CRA_NUT = 21,
H265_NAL_VPS = 32,
H265_NAL_SPS = 33,
H265_NAL_PPS = 34,
H265_NAL_AUD = 35,
H265_NAL_EOS = 36,
H265_NAL_EOB = 37,
H265_NAL_FD = 38,
H265_NAL_PREFIX_SEI = 39,
H265_NAL_SUFFIX_SEI = 40
} H265NalUnitType;
typedef enum
{
H265_PARSER_OK,
H265_PARSER_BROKEN_DATA,
H265_PARSER_BROKEN_LINK,
H265_PARSER_ERROR,
H265_PARSER_NO_NAL,
H265_PARSER_NO_NAL_END
} H265ParserResult;
typedef struct _H265Parser H265Parser;
typedef struct _H265NalUnit H265NalUnit;
typedef struct _H265SPS H265SPS;
typedef struct _H265ProfileTierLevel H265ProfileTierLevel;
struct _H265NalUnit
{
guint8 type;
guint8 layer_id;
guint8 temporal_id_plus1;
/* calculated values */
guint size;
guint offset;
guint sc_offset;
gboolean valid;
guint8 *data;
guint8 header_bytes;
};
struct _H265ProfileTierLevel {
guint8 profile_space;
guint8 tier_flag;
guint8 profile_idc;
guint8 profile_compatibility_flag[32];
guint8 progressive_source_flag;
guint8 interlaced_source_flag;
guint8 non_packed_constraint_flag;
guint8 frame_only_constraint_flag;
guint8 max_12bit_constraint_flag;
guint8 max_10bit_constraint_flag;
guint8 max_8bit_constraint_flag;
guint8 max_422chroma_constraint_flag;
guint8 max_420chroma_constraint_flag;
guint8 max_monochrome_constraint_flag;
guint8 intra_constraint_flag;
guint8 one_picture_only_constraint_flag;
guint8 lower_bit_rate_constraint_flag;
guint8 max_14bit_constraint_flag;
guint8 level_idc;
guint8 sub_layer_profile_present_flag[6];
guint8 sub_layer_level_present_flag[6];
guint8 sub_layer_profile_space[6];
guint8 sub_layer_tier_flag[6];
guint8 sub_layer_profile_idc[6];
guint8 sub_layer_profile_compatibility_flag[6][32];
guint8 sub_layer_progressive_source_flag[6];
guint8 sub_layer_interlaced_source_flag[6];
guint8 sub_layer_non_packed_constraint_flag[6];
guint8 sub_layer_frame_only_constraint_flag[6];
guint8 sub_layer_level_idc[6];
};
struct _H265SPS
{
guint8 id;
guint8 max_sub_layers_minus1;
guint8 temporal_id_nesting_flag;
H265ProfileTierLevel profile_tier_level;
guint8 chroma_format_idc;
guint8 separate_colour_plane_flag;
guint16 pic_width_in_luma_samples;
guint16 pic_height_in_luma_samples;
guint8 conformance_window_flag;
/* if conformance_window_flag */
guint32 conf_win_left_offset;
guint32 conf_win_right_offset;
guint32 conf_win_top_offset;
guint32 conf_win_bottom_offset;
guint8 bit_depth_luma_minus8;
guint8 bit_depth_chroma_minus8;
guint8 log2_max_pic_order_cnt_lsb_minus4;
guint8 sub_layer_ordering_info_present_flag;
guint8 max_dec_pic_buffering_minus1[H265_MAX_SUB_LAYERS];
guint8 max_num_reorder_pics[H265_MAX_SUB_LAYERS];
guint8 max_latency_increase_plus1[H265_MAX_SUB_LAYERS];
guint8 log2_min_luma_coding_block_size_minus3;
guint8 log2_diff_max_min_luma_coding_block_size;
guint8 log2_min_transform_block_size_minus2;
guint8 log2_diff_max_min_transform_block_size;
guint8 max_transform_hierarchy_depth_inter;
guint8 max_transform_hierarchy_depth_intra;
guint8 scaling_list_enabled_flag;
/* if scaling_list_enabled_flag */
guint8 scaling_list_data_present_flag;
guint8 amp_enabled_flag;
guint8 sample_adaptive_offset_enabled_flag;
guint8 pcm_enabled_flag;
/* if pcm_enabled_flag */
guint8 pcm_sample_bit_depth_luma_minus1;
guint8 pcm_sample_bit_depth_chroma_minus1;
guint8 log2_min_pcm_luma_coding_block_size_minus3;
guint8 log2_diff_max_min_pcm_luma_coding_block_size;
guint8 pcm_loop_filter_disabled_flag;
guint8 num_short_term_ref_pic_sets;
guint8 long_term_ref_pics_present_flag;
/* if long_term_ref_pics_present_flag */
guint8 num_long_term_ref_pics_sps;
guint16 lt_ref_pic_poc_lsb_sps[32];
guint8 used_by_curr_pic_lt_sps_flag[32];
guint8 temporal_mvp_enabled_flag;
guint8 strong_intra_smoothing_enabled_flag;
guint8 vui_parameters_present_flag;
/* if vui_parameters_present_flat */
guint8 sps_extension_flag;
/* calculated values */
guint8 chroma_array_type;
gint width, height;
gint crop_rect_width, crop_rect_height;
gint crop_rect_x, crop_rect_y;
gint fps_num, fps_den;
gboolean valid;
};
struct _H265Parser
{
/*< private >*/
H265SPS sps[H265_MAX_SPS_COUNT];
H265SPS *last_sps;
};
H265Parser * h265_parser_new (void);
H265ParserResult h265_parser_identify_nalu (H265Parser * parser,
const guint8 * data,
guint offset,
gsize size,
H265NalUnit * nalu);
H265ParserResult h265_parser_identify_nalu_unchecked (H265Parser * parser,
const guint8 * data,
guint offset,
gsize size,
H265NalUnit * nalu);
H265ParserResult h265_parser_identify_nalu_hevc (H265Parser * parser,
const guint8 * data,
guint offset,
gsize size,
guint8 nal_length_size,
H265NalUnit * nalu);
H265ParserResult h265_parser_parse_sps (H265Parser * parser,
H265NalUnit * nalu,
H265SPS * sps,
gboolean parse_vui_params);
void h265_parser_free (H265Parser * parser);
H265ParserResult h265_parse_sps (H265Parser * parser,
H265NalUnit * nalu,
H265SPS * sps,
gboolean parse_vui_params);
G_END_DECLS
#endif

View File

@@ -2,26 +2,21 @@
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
*
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2object.c: base class for V4L2 elements
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version. This library is distributed in the hope
* that it will be useful, but WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU Library General Public License for more details.
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
#ifdef HAVE_CONFIG_H
@@ -67,12 +62,6 @@ GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
#define ENCODED_BUFFER_SIZE (4 * 1024 * 1024)
#ifdef USE_V4L2_TARGET_NV
/* Structure to hold the video info inorder to modify the contents, incase of
* GST_VIDEO_FORMAT_I420_12LE format */
static GstVideoFormatInfo video_info;
#endif
enum
{
PROP_0,
@@ -183,9 +172,6 @@ static const GstV4L2FormatDesc gst_v4l2_formats[] = {
{V4L2_PIX_FMT_P012, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_P012M, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_NV24M, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_YUV444_10LE, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_YUV444_12LE, TRUE, GST_V4L2_RAW},
{V4L2_PIX_FMT_P216M, TRUE, GST_V4L2_RAW},
#endif
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
{V4L2_PIX_FMT_SBGGR8, TRUE, GST_V4L2_CODEC},
@@ -1052,9 +1038,7 @@ gst_v4l2_object_get_format_from_fourcc (GstV4l2Object * v4l2object,
#ifdef USE_V4L2_TARGET_NV
if (fourcc == V4L2_PIX_FMT_P010M ||
fourcc == V4L2_PIX_FMT_P012M ||
fourcc == V4L2_PIX_FMT_NV24M ||
fourcc == V4L2_PIX_FMT_YUV444_10LE ||
fourcc == V4L2_PIX_FMT_YUV444_12LE) {
fourcc == V4L2_PIX_FMT_NV24M) {
fmt->pixelformat = fourcc;
return fmt;
}
@@ -1153,9 +1137,6 @@ gst_v4l2_object_format_get_rank (const struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_P012: /* Y/CbCr 4:2:0, 12 bits per channel */
case V4L2_PIX_FMT_P012M:
case V4L2_PIX_FMT_NV24M:
case V4L2_PIX_FMT_YUV444_10LE:
case V4L2_PIX_FMT_YUV444_12LE:
case V4L2_PIX_FMT_P216M:
#endif
case V4L2_PIX_FMT_NV21: /* 12 Y/CrCb 4:2:0 */
case V4L2_PIX_FMT_NV21M: /* Same as NV21 */
@@ -1197,9 +1178,6 @@ gst_v4l2_object_format_get_rank (const struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV411P: /* Y41B, 12 bits per pixel */
rank = YUV_BASE_RANK + 4;
break;
#ifdef USE_V4L2_TARGET_NV
case V4L2_PIX_FMT_YUV422M:
#endif
case V4L2_PIX_FMT_YUV422P: /* Y42B, 16 bits per pixel */
rank = YUV_BASE_RANK + 8;
break;
@@ -1380,6 +1358,7 @@ static GstVideoFormat
gst_v4l2_object_v4l2fourcc_to_video_format (guint32 fourcc)
{
GstVideoFormat format;
switch (fourcc) {
case V4L2_PIX_FMT_GREY: /* 8 Greyscale */
format = GST_VIDEO_FORMAT_GRAY8;
@@ -1440,20 +1419,6 @@ gst_v4l2_object_v4l2fourcc_to_video_format (guint32 fourcc)
case V4L2_PIX_FMT_P012M:
format = GST_VIDEO_FORMAT_I420_12LE;
break;
case V4L2_PIX_FMT_YUV444:
format = GST_VIDEO_FORMAT_Y444;
break;
case V4L2_PIX_FMT_YUV444_10LE:
format = GST_VIDEO_FORMAT_Y444_10LE;
break;
case V4L2_PIX_FMT_YUV444_12LE:
format = GST_VIDEO_FORMAT_Y444_12LE;
break;
case V4L2_PIX_FMT_P216M:
/* NOTE: Gstreamer does not support P216 video format (16bit NV16) yet.
Hence, as a WAR it is mapped to GST_VIDEO_FORMAT_NV16_10LE32 for now. */
format = GST_VIDEO_FORMAT_NV16_10LE32;
break;
#endif
case V4L2_PIX_FMT_NV12MT:
format = GST_VIDEO_FORMAT_NV12_64Z32;
@@ -1648,10 +1613,6 @@ gst_v4l2_object_v4l2fourcc_to_bare_struct (guint32 fourcc)
case V4L2_PIX_FMT_NV61M:
#ifdef USE_V4L2_TARGET_NV
case V4L2_PIX_FMT_NV24M:
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV444_10LE:
case V4L2_PIX_FMT_YUV444_12LE:
case V4L2_PIX_FMT_P216M:
#endif
case V4L2_PIX_FMT_NV24: /* 24 Y/CrCb 4:4:4 */
case V4L2_PIX_FMT_YVU410:
@@ -1716,6 +1677,7 @@ gst_v4l2_object_v4l2fourcc_to_bare_struct (guint32 fourcc)
case V4L2_PIX_FMT_Y10:
case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y10BPACK:
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_Y41P:
@@ -1886,9 +1848,6 @@ gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
if (g_str_equal (mimetype, "video/x-raw")) {
switch (GST_VIDEO_INFO_FORMAT (info)) {
case GST_VIDEO_FORMAT_Y444:
fourcc = V4L2_PIX_FMT_YUV444;
break;
case GST_VIDEO_FORMAT_I420:
fourcc = V4L2_PIX_FMT_YUV420;
fourcc_nc = V4L2_PIX_FMT_YUV420M;
@@ -1907,9 +1866,6 @@ gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
break;
case GST_VIDEO_FORMAT_Y42B:
fourcc = V4L2_PIX_FMT_YUV422P;
#ifdef USE_V4L2_TARGET_NV
fourcc_nc = V4L2_PIX_FMT_YUV422M;
#endif
break;
case GST_VIDEO_FORMAT_NV12:
fourcc = V4L2_PIX_FMT_NV12;
@@ -1924,15 +1880,6 @@ gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
fourcc = V4L2_PIX_FMT_P012;
fourcc_nc = V4L2_PIX_FMT_P012M;
break;
case GST_VIDEO_FORMAT_Y444_10LE:
fourcc_nc = V4L2_PIX_FMT_YUV444_10LE;
break;
case GST_VIDEO_FORMAT_Y444_12LE:
fourcc_nc = V4L2_PIX_FMT_YUV444_12LE;
break;
case GST_VIDEO_FORMAT_NV16_10LE32:
fourcc = V4L2_PIX_FMT_P216M;
break;
#endif
case GST_VIDEO_FORMAT_NV12_64Z32:
fourcc_nc = V4L2_PIX_FMT_NV12MT;
@@ -2011,7 +1958,7 @@ gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
} else if (g_str_equal (mimetype, "image/jpeg")) {
fourcc = V4L2_PIX_FMT_JPEG;
} else if (g_str_equal (mimetype, "video/mpeg")) {
gint version = 0;
gint version;
if (gst_structure_get_int (structure, "mpegversion", &version)) {
switch (version) {
case 1:
@@ -2319,10 +2266,8 @@ gst_v4l2_object_get_colorspace (struct v4l2_format *fmt,
switch (transfer) {
case V4L2_XFER_FUNC_709:
if (colorspace == V4L2_COLORSPACE_BT2020 && fmt->fmt.pix.height >= 2160)
if (fmt->fmt.pix.height >= 2160)
cinfo->transfer = GST_VIDEO_TRANSFER_BT2020_12;
else if (colorspace == V4L2_COLORSPACE_SMPTE170M)
cinfo->transfer = GST_VIDEO_TRANSFER_BT601;
else
cinfo->transfer = GST_VIDEO_TRANSFER_BT709;
break;
@@ -2338,9 +2283,6 @@ gst_v4l2_object_get_colorspace (struct v4l2_format *fmt,
case V4L2_XFER_FUNC_NONE:
cinfo->transfer = GST_VIDEO_TRANSFER_GAMMA10;
break;
case V4L2_XFER_FUNC_SMPTE2084:
cinfo->transfer = GST_VIDEO_TRANSFER_SMPTE2084;
break;
case V4L2_XFER_FUNC_DEFAULT:
/* nothing, just use defaults for colorspace */
break;
@@ -2446,18 +2388,11 @@ gst_v4l2_object_add_interlace_mode (GstV4l2Object * v4l2object,
gst_value_list_append_and_take_value (&interlace_formats, &interlace_enum);
}
#ifdef USE_V4L2_TARGET_NV
if (gst_value_list_get_size (&interlace_formats))
{
#endif
if (gst_v4l2src_value_simplify (&interlace_formats)
|| gst_value_list_get_size (&interlace_formats) > 0)
gst_structure_take_value (s, "interlace-mode", &interlace_formats);
else
GST_WARNING_OBJECT (v4l2object, "Failed to determine interlace mode");
#ifdef USE_V4L2_TARGET_NV
}
#endif
if (gst_v4l2src_value_simplify (&interlace_formats)
|| gst_value_list_get_size (&interlace_formats) > 0)
gst_structure_take_value (s, "interlace-mode", &interlace_formats);
else
GST_WARNING_OBJECT (v4l2object, "Failed to determine interlace mode");
g_value_unset(&interlace_formats);
return;
@@ -2766,7 +2701,7 @@ unknown_type:
static gint
sort_by_frame_size (GstStructure * s1, GstStructure * s2)
{
int w1 = 0, h1 = 0, w2 = 0, h2 = 0;
int w1, h1, w2, h2;
gst_structure_get_int (s1, "width", &w1);
gst_structure_get_int (s1, "height", &h1);
@@ -3188,7 +3123,12 @@ gst_v4l2_object_setup_pool (GstV4l2Object * v4l2object, GstCaps * caps)
|| !strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT))) {
/* Currently, DMABUF_IMPORT io mode is used on encoder
output plane, when default mode V4L2_IO_AUTO is set */
mode = GST_V4L2_IO_DMABUF_IMPORT;
if (is_cuvid == TRUE) {
mode = GST_V4L2_IO_MMAP; //TODO make this default to dmabuf_import
} else if (is_cuvid == FALSE) {
mode = GST_V4L2_IO_DMABUF_IMPORT;
}
} else {
if (is_cuvid == TRUE){
mode = GST_V4L2_IO_MMAP;
@@ -3280,15 +3220,19 @@ gst_v4l2_object_set_stride (GstVideoInfo * info, GstVideoAlignment * align,
const GstVideoFormatInfo *finfo = info->finfo;
if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
gint x_tiles, y_tiles, tile_height, padded_height;
gint x_tiles, y_tiles, ws, hs, tile_height, padded_height;
tile_height = GST_VIDEO_FORMAT_INFO_TILE_HEIGHT (finfo, plane);
ws = GST_VIDEO_FORMAT_INFO_TILE_WS (finfo);
hs = GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
tile_height = 1 << hs;
padded_height = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (finfo, plane,
info->height + align->padding_top + align->padding_bottom);
padded_height = GST_ROUND_UP_N (padded_height, tile_height);
x_tiles = stride / GST_VIDEO_FORMAT_INFO_TILE_STRIDE (finfo, plane);
y_tiles = (padded_height + tile_height - 1) / tile_height;
x_tiles = stride >> ws;
y_tiles = padded_height >> hs;
info->stride[plane] = GST_VIDEO_TILE_MAKE_STRIDE (x_tiles, y_tiles);
} else {
info->stride[plane] = stride;
@@ -3382,10 +3326,12 @@ gst_v4l2_object_save_format (GstV4l2Object * v4l2object,
padded_height = format->fmt.pix.height;
if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
guint tile_height;
tile_height = GST_VIDEO_FORMAT_INFO_TILE_HEIGHT (finfo,0);
/* Round-up to tile_height as drivers are not forced to do so */
padded_height = (padded_height + tile_height - 1) / tile_height * tile_height;
guint hs, tile_height;
hs = GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
tile_height = 1 << hs;
padded_height = GST_ROUND_UP_N (padded_height, tile_height);
}
align->padding_bottom = padded_height - info->height - align->padding_top;
@@ -3497,10 +3443,9 @@ gst_v4l2_object_extrapolate_stride (const GstVideoFormatInfo * finfo,
case GST_VIDEO_FORMAT_NV16:
case GST_VIDEO_FORMAT_NV61:
case GST_VIDEO_FORMAT_NV24:
#ifdef USE_V4L2_TARGET_NV
#ifdef USE_V4L2_TARGET_NV
case GST_VIDEO_FORMAT_P010_10LE:
case GST_VIDEO_FORMAT_I420_12LE:
case GST_VIDEO_FORMAT_NV16_10LE32:
#endif
estride = (plane == 0 ? 1 : 2) *
GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (finfo, plane, stride);
@@ -3513,17 +3458,6 @@ gst_v4l2_object_extrapolate_stride (const GstVideoFormatInfo * finfo,
return estride;
}
void post_error_to_bus(GstElement *element, const gchar *error_message)
{
GError *error =
g_error_new_literal(GST_CORE_ERROR, GST_CORE_ERROR_FAILED, error_message);
gst_element_post_message
(GST_ELEMENT(element), gst_message_new_error(GST_OBJECT(element),
error, NULL));
g_error_free(error);
}
static gboolean
gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
gboolean try_only, GstV4l2Error * error)
@@ -3534,8 +3468,8 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
enum v4l2_field field;
guint32 pixelformat;
struct v4l2_fmtdesc *fmtdesc;
GstVideoInfo info = { 0, };
GstVideoAlignment align = { 0, };
GstVideoInfo info;
GstVideoAlignment align;
#ifndef USE_V4L2_TARGET_NV
gint width, height, fps_n, fps_d;
#else
@@ -3545,9 +3479,6 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
guint width, height, fps_n, fps_d;
GstV4l2VideoEnc *videoenc = NULL;
GstV4l2VideoDec *videodec = NULL;
gst_video_info_init (&info);
gst_video_alignment_reset (&align);
if (!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC)
|| !strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) {
videoenc = GST_V4L2_VIDEO_ENC (v4l2object->element);
@@ -3556,7 +3487,7 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
videodec = GST_V4L2_VIDEO_DEC (v4l2object->element);
}
GstV4l2VideoEncClass *klass = NULL;
{
if (is_cuvid == FALSE) {
if (!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC)
|| !strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) {
klass = GST_V4L2_VIDEO_ENC_GET_CLASS (v4l2object->element);
@@ -3763,8 +3694,8 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
gint stride = GST_VIDEO_INFO_PLANE_STRIDE (&info, i);
if (GST_VIDEO_FORMAT_INFO_IS_TILED (info.finfo))
stride = GST_VIDEO_TILE_X_TILES (stride) *
GST_VIDEO_FORMAT_INFO_TILE_STRIDE (info.finfo, i);
stride = GST_VIDEO_TILE_X_TILES (stride) <<
GST_VIDEO_FORMAT_INFO_TILE_WS (info.finfo);
format.fmt.pix_mp.plane_fmt[i].bytesperline = stride;
}
@@ -3782,8 +3713,8 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
format.fmt.pix.field = field;
if (GST_VIDEO_FORMAT_INFO_IS_TILED (info.finfo))
stride = GST_VIDEO_TILE_X_TILES (stride) *
GST_VIDEO_FORMAT_INFO_TILE_STRIDE (info.finfo, i);
stride = GST_VIDEO_TILE_X_TILES (stride) <<
GST_VIDEO_FORMAT_INFO_TILE_WS (info.finfo);
/* try to ask our prefered stride */
format.fmt.pix.bytesperline = stride;
@@ -3858,14 +3789,6 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
if (ret)
goto invalid_ctrl;
}
} else {
if ((!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVDEC)) &&
V4L2_TYPE_IS_OUTPUT (v4l2object->type) &&
(width == 0 || height == 0)) {
GST_WARNING_OBJECT (v4l2object->dbg_obj,
"Invalid caps: %s", gst_caps_to_string(caps));
goto invalid_caps;
}
}
#endif
@@ -3897,144 +3820,47 @@ gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
if (ret < 0)
goto invalid_ctrl;
if (videoenc)
{
if (V4L2_TYPE_IS_OUTPUT(v4l2object->type))
{
if (strcmp(klass->codec_name, "H264") == 0 || strcmp(klass->codec_name, "H265") == 0 || strcmp(klass->codec_name, "AV1") == 0)
{
if (!klass->set_encoder_properties(&videoenc->parent))
{
g_print("set_encoder_properties failed\n");
if (is_cuvid == FALSE) {
if (videoenc) {
if (V4L2_TYPE_IS_OUTPUT (v4l2object->type)) {
if (strcmp (klass->codec_name, "H264") == 0
|| strcmp (klass->codec_name, "H265") == 0
|| strcmp (klass->codec_name, "AV1") == 0) {
if (!klass->set_encoder_properties (&videoenc->parent)) {
g_print ("set_encoder_properties failed\n");
return FALSE;
}
}
if (!klass->set_video_encoder_properties (&videoenc->parent)) {
g_print ("set_video_encoder_properties failed\n");
return FALSE;
}
}
if (!klass->set_video_encoder_properties(&videoenc->parent))
{
g_print("set_video_encoder_properties failed\n");
return FALSE;
}
if (is_cuvid)
{
/* CUDA PRESETS are overridden if HW presets are set.*/
if (!videoenc->hw_preset_level)
{
ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_PRESET_ID;
ctl.value = videoenc->cudaenc_preset_id;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_TUNING_INFO;
ctl.value = videoenc->cudaenc_tuning_info_id;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
}
ctl.id = V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE;
ctl.value = videoenc->cudaenc_mem_type;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
v4l2_ctrl_video_constqp constqp;
constqp.constQpI = videoenc->constQpI;
constqp.constQpP = videoenc->constQpP;
constqp.constQpB = videoenc->constQpB;
ctrls.count = 1;
ctrls.controls = &ctl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_CONSTQP;
ctl.string = (gchar *)&constqp;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
v4l2_ctrl_video_init_qp init_qp;
init_qp.IInitQP = videoenc->IInitQP;
init_qp.PInitQP = videoenc->PInitQP;
init_qp.BInitQP = videoenc->BInitQP;
ctrls.count = 1;
ctrls.controls = &ctl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_INIT_FRAME_QP;
ctl.string = (gchar *)&init_qp;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEO_MAXBITRATE;
ctl.value = videoenc->maxbitrate;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_VBVBUFSIZE;
ctl.value = videoenc->vbvbufsize;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_VBVINIT;
ctl.value = videoenc->vbvinit;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_ENABLE_AQ;
ctl.value = videoenc->aqStrength;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_ENABLE_TEMPORAL_AQ;
ctl.value = videoenc->enableTemporalAQ;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
ctl.id = V4L2_CID_MPEG_VIDEOENC_TARGET_QUALITY;
ctl.value = videoenc->targetQuality;
ctrls.count = 1;
ctrls.controls = &ctl;
ret = v4l2object->ioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
}
}
}
if (videoenc) {
if (pixelformat == V4L2_PIX_FMT_AV1) {
set_v4l2_video_mpeg_class (videoenc->v4l2capture,
V4L2_CID_MPEG_VIDEOENC_AV1_HEADERS_WITH_FRAME, videoenc->v4l2capture->Enable_headers);
}
}
if (is_cuvid == FALSE) {
if (videoenc) {
if (pixelformat == V4L2_PIX_FMT_VP8 || pixelformat == V4L2_PIX_FMT_VP9) {
set_v4l2_video_mpeg_class (videoenc->v4l2capture,
V4L2_CID_MPEG_VIDEOENC_VPX_HEADERS_WITH_FRAME, videoenc->v4l2capture->Enable_headers);
}
if (pixelformat == V4L2_PIX_FMT_AV1) {
set_v4l2_video_mpeg_class (videoenc->v4l2capture,
V4L2_CID_MPEG_VIDEOENC_AV1_HEADERS_WITH_FRAME, videoenc->v4l2capture->Enable_headers);
}
}
}
else if (is_cuvid == TRUE) {
if (videoenc) {
ctl.id = V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID;
ctl.value = videoenc->cudaenc_gpu_id;
ctrls.count = 1;
ctrls.controls = &ctl ;
ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
if (ret)
goto invalid_ctrl;
}
}
#endif
@@ -4371,27 +4197,6 @@ get_parm_failed:
}
set_parm_failed:
{
if ((v4l2object->is_encode == 1) && (is_cuvid == TRUE))
{
gchar err_msg[100] = "ENCODER INITIALIZATION FAILED";
switch (errno)
{
case EINVAL:
strncpy (err_msg, "INVALID / UNSUPPORTED PARAM", 100);
break;
case ENODEV:
strncpy (err_msg, "INVALID / UNSUPPORTED / NO ENCODE DEVICE", 100);
break;
case ENOSYS:
strncpy (err_msg, "FEATURE UNIMPLEMENTED", 100);
break;
case EPERM:
strncpy (err_msg, "OPERATION NOT PERMITTED", 100);
break;
}
post_error_to_bus (v4l2object->element, err_msg);
}
GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
(_("Video device did not accept new frame rate setting.")),
GST_ERROR_SYSTEM);
@@ -4442,7 +4247,7 @@ gst_v4l2_object_acquire_format (GstV4l2Object * v4l2object, GstVideoInfo * info)
struct v4l2_rect *r = NULL;
GstVideoFormat format;
guint width, height;
GstVideoAlignment align = { 0, };
GstVideoAlignment align;
gst_video_info_init (info);
gst_video_alignment_reset (&align);
@@ -4494,19 +4299,6 @@ gst_v4l2_object_acquire_format (GstV4l2Object * v4l2object, GstVideoInfo * info)
gst_video_info_set_format (info, format, width, height);
#ifdef USE_V4L2_TARGET_NV
/* Currently gst plugins base doesn't have support for P012_12LE or NV12 12 bit format.
So we can only pass GST_VIDEO_FORMAT_I420_12LE to gst_video_format_get_info() method
which returns num planes as 3 and creates an assertion in gst_v4l2_object_extrapolate_info().
Once the support for P012_12LE or NV12 12 bit format are added correctly in gst plugins base,
We no longer need this check. */
if (format == GST_VIDEO_FORMAT_I420_12LE) {
memcpy (&video_info, info->finfo, sizeof(video_info));
video_info.n_planes = 2;
info->finfo = &video_info;
}
#endif
switch (fmt.fmt.pix.field) {
case V4L2_FIELD_ANY:
case V4L2_FIELD_NONE:
@@ -4602,7 +4394,7 @@ gboolean
gst_v4l2_object_caps_equal (GstV4l2Object * v4l2object, GstCaps * caps)
{
GstStructure *config;
GstCaps *oldcaps = NULL;
GstCaps *oldcaps;
gboolean ret;
if (!v4l2object->pool)
@@ -4773,7 +4565,7 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
GstCaps *caps;
GstBufferPool *pool = NULL, *other_pool = NULL;
GstStructure *config;
guint size = 0, min = 0, max = 0, own_min = 0;
guint size, min, max, own_min = 0;
gboolean update;
gboolean has_video_meta;
gboolean can_share_own_pool, pushing_from_our_pool = FALSE;
@@ -5078,7 +4870,7 @@ gst_v4l2_object_propose_allocation (GstV4l2Object * obj, GstQuery * query)
GstBufferPool *pool;
/* we need at least 2 buffers to operate */
guint size, min, max;
GstCaps *caps = NULL;
GstCaps *caps;
gboolean need_pool;
/* Set defaults allocation parameters */
@@ -5099,7 +4891,7 @@ gst_v4l2_object_propose_allocation (GstV4l2Object * obj, GstQuery * query)
gst_object_ref (pool);
if (pool != NULL) {
GstCaps *pcaps = NULL;
GstCaps *pcaps;
GstStructure *config;
/* we had a pool, check caps */
@@ -5169,7 +4961,8 @@ set_v4l2_video_mpeg_class (GstV4l2Object * v4l2object, guint label,
if (control.id == V4L2_CID_MPEG_VIDEOENC_VIRTUALBUFFER_SIZE) {
control.string = (gchar *) &buffer_size;
} else if (control.id == V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES) {
} else if ((control.id == V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM) ||
(control.id == V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES)) {
control.string = (gchar *) &params;
} else {
control.value = params;

View File

@@ -2,8 +2,7 @@
*
* Copyright (C) 2001-2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* gstv4l2object.h: base class for V4L2 elements
*
@@ -55,12 +54,10 @@ typedef struct _GstV4l2ObjectClassHelper GstV4l2ObjectClassHelper;
#define V4L2_DEVICE_BASENAME_NVDEC "nvdec"
#define V4L2_DEVICE_BASENAME_NVENC "msenc"
#define V4L2_DEVICE_PATH_NVDEC "/dev/nvhost-nvdec"
#define V4L2_DEVICE_PATH_NVDEC_ALT "/dev/v4l2-nvdec"
#define V4L2_DEVICE_PATH_NVDEC_ALT "/dev/dri/card0"
#define V4L2_DEVICE_PATH_NVDEC_MCCOY "/dev/nvidia0"
#define V4L2_DEVICE_PATH_NVENC "/dev/nvhost-msenc"
#define V4L2_DEVICE_PATH_NVENC_ALT "/dev/v4l2-nvenc"
#define V4L2_DEVICE_PATH_TEGRA_INFO "/sys/firmware/devicetree/base/compatible"
#define V4L2_DEVICE_INFO_SOM_EEPROM "/sys/firmware/devicetree/base/chosen/ids"
#endif
/* max frame width/height */
@@ -140,6 +137,7 @@ typedef gboolean (*GstV4l2UpdateFpsFunction) (GstV4l2Object * v4l2object);
return FALSE; \
}
struct _GstV4l2Object {
GstElement * element;
GstObject * dbg_obj;
@@ -228,7 +226,6 @@ struct _GstV4l2Object {
GMutex cplane_stopped_lock;
guint sei_payload_size;
void* sei_payload;
gchar *sei_uuid;
#endif
/* funcs */
@@ -374,8 +371,6 @@ gboolean gst_v4l2_get_attribute (GstV4l2Object * v4l2object, int attribute
gboolean gst_v4l2_set_attribute (GstV4l2Object * v4l2object, int attribute, const int value);
gboolean gst_v4l2_set_controls (GstV4l2Object * v4l2object, GstStructure * controls);
void post_error_to_bus(GstElement *element, const gchar *error_message);
#ifdef USE_V4L2_TARGET_NV
gboolean set_v4l2_video_mpeg_class (GstV4l2Object * v4l2object, guint label,
gint params);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,7 @@
/*
* Copyright (C) 2014 Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.co.uk>
* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -49,15 +48,8 @@ G_BEGIN_DECLS
#ifdef USE_V4L2_TARGET_NV
#define GstV4l2VideoDec GstNvV4l2VideoDec
#define GstV4l2VideoDecClass GstNvV4l2VideoDecClass
#define LOOP_COUNT_TO_WAIT_FOR_DQEVENT 10
#define LOOP_COUNT_TO_WAIT_FOR_DQEVENT 6
#define WAIT_TIME_PER_LOOP_FOR_DQEVENT 100*1000
#define VP8_START_BYTE_0 0x9D
#define VP8_START_BYTE_1 0x01
#define VP9_START_BYTE_0 0x49
#define VP9_START_BYTE_1 0x83
#define VP9_START_BYTE_2 0x42
#endif
typedef struct _GstV4l2VideoDec GstV4l2VideoDec;
@@ -77,12 +69,12 @@ struct _GstV4l2VideoDec
/* State */
GstVideoCodecState *input_state;
gboolean active;
GstFlowReturn output_flow;
guint64 frame_num;
#ifdef USE_V4L2_TARGET_NV
GHashTable* hash_pts_systemtime;
gdouble buffer_in_time;
guint64 decoded_picture_cnt;
guint32 skip_frames;
gboolean idr_received;
@@ -94,22 +86,13 @@ struct _GstV4l2VideoDec
gboolean enable_frame_type_reporting;
gboolean enable_error_check;
gboolean enable_max_performance;
gboolean set_format;
gboolean is_gdr_stream;
guint32 cudadec_mem_type;
guint32 cudadec_gpu_id;
guint32 cudadec_num_surfaces;
gboolean cudadec_low_latency;
gboolean extract_sei_type5_data;
gchar *sei_uuid_string;
gdouble rate;
guint32 cap_buf_dynamic_allocation;
guint32 current_width;
guint32 current_height;
guint32 old_width;
guint32 old_height;
gboolean valid_vpx;
GMutex pts_hashtable_lock;
#endif
};
@@ -123,9 +106,6 @@ struct _GstV4l2VideoDecClass
GType gst_v4l2_video_dec_get_type (void);
gboolean gst_v4l2_is_video_dec (GstCaps * sink_caps, GstCaps * src_caps);
#ifdef USE_V4L2_TARGET_NV
gboolean set_v4l2_controls (GstV4l2VideoDec *self);
#endif
void gst_v4l2_video_dec_register (GstPlugin * plugin,
const gchar * basename,
const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,7 @@
/*
* Copyright (C) 2014 SUMOMO Computer Association.
* Author: ayaka <ayaka@soulik.info>
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LGPL-2.0-only
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -55,9 +54,6 @@ struct _GstV4l2VideoEnc
#ifdef USE_V4L2_TARGET_NV
guint32 ratecontrol;
guint32 bitrate;
guint32 maxbitrate;
guint32 vbvbufsize;
guint32 vbvinit;
guint32 peak_bitrate;
guint32 idrinterval;
guint32 iframeinterval;
@@ -70,42 +66,20 @@ struct _GstV4l2VideoEnc
guint32 MaxQpP;
guint32 MinQpB;
guint32 MaxQpB;
guint32 constQpI;
guint32 constQpP;
guint32 constQpB;
guint32 IInitQP;
guint32 PInitQP;
guint32 BInitQP;
gboolean set_qpRange;
gboolean set_intrarefresh;
guint32 enableIntraRefresh;
guint32 intraRefreshPeriod;
guint32 intraRefreshCnt;
gboolean enableTemporalAQ;
guint32 aqStrength;
guint32 targetQuality;
guint32 hw_preset_level;
guint virtual_buffer_size;
gboolean measure_latency;
gboolean ratecontrol_enable;
gboolean force_idr;
gboolean force_intra;
gchar *sei_uuid;
gboolean maxperf_enable;
gboolean copy_timestamp;
FILE *tracing_file_enc;
GQueue *got_frame_pt;
guint32 cudaenc_mem_type;
guint32 cudaenc_gpu_id;
guint32 cudaenc_preset_id;
guint32 cudaenc_tuning_info_id;
gboolean slice_output;
GstVideoCodecFrame *best_prev;
GstClockTime buf_pts_prev;
gdouble buffer_in_time;
GHashTable* hash_pts_systemtime;
gboolean copy_meta;
gboolean enable_hwpreset;
#endif
/* < private > */
@@ -158,11 +132,5 @@ void gst_v4l2_video_enc_register (GstPlugin * plugin, GType type,
const char *codec, const gchar * basename, const gchar * device_path,
GstCaps * sink_caps, GstCaps * codec_caps, GstCaps * src_caps);
#ifdef USE_V4L2_TARGET_NV
void set_encoder_src_caps (GstVideoEncoder *encoder, GstCaps *input_caps);
gboolean is_drc (GstVideoEncoder *encoder, GstCaps *input_caps);
gboolean reconfigure_fps (GstVideoEncoder *encoder, GstCaps *input_caps, guint label);
#endif
G_END_DECLS
#endif /* __GST_V4L2_VIDEO_ENC_H__ */

View File

@@ -1,296 +0,0 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "nalutils.h"
/* Compute Ceil(Log2(v)) */
/* Derived from branchless code for integer log2(v) from:
<http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog> */
guint
ceil_log2 (guint32 v)
{
guint r, shift;
v--;
r = (v > 0xFFFF) << 4;
v >>= r;
shift = (v > 0xFF) << 3;
v >>= shift;
r |= shift;
shift = (v > 0xF) << 2;
v >>= shift;
r |= shift;
shift = (v > 0x3) << 1;
v >>= shift;
r |= shift;
r |= (v >> 1);
return r + 1;
}
/****** Nal parser ******/
void
init_nal (NalReader * nr, const guint8 * data, guint size)
{
nr->data = data;
nr->size = size;
nr->n_epb = 0;
nr->byte = 0;
nr->bits_in_cache = 0;
/* fill with something other than 0 to detect emulation prevention bytes */
nr->first_byte = 0xff;
nr->cache = 0xff;
}
gboolean
_read (NalReader * nr, guint nbits)
{
if (G_UNLIKELY (nr->byte * 8 + (nbits - nr->bits_in_cache) > nr->size * 8)) {
GST_DEBUG ("Can not read %u bits, bits in cache %u, Byte * 8 %u, size in "
"bits %u", nbits, nr->bits_in_cache, nr->byte * 8, nr->size * 8);
return FALSE;
}
while (nr->bits_in_cache < nbits) {
guint8 byte;
gboolean check_three_byte;
check_three_byte = TRUE;
next_byte:
if (G_UNLIKELY (nr->byte >= nr->size))
return FALSE;
byte = nr->data[nr->byte++];
/* check if the byte is a emulation_prevention_three_byte */
if (check_three_byte && byte == 0x03 && nr->first_byte == 0x00 &&
((nr->cache & 0xff) == 0)) {
/* next byte goes unconditionally to the cache, even if it's 0x03 */
check_three_byte = FALSE;
nr->n_epb++;
goto next_byte;
}
nr->cache = (nr->cache << 8) | nr->first_byte;
nr->first_byte = byte;
nr->bits_in_cache += 8;
}
return TRUE;
}
/* Skips the specified amount of bits. This is only suitable to a
cacheable number of bits */
gboolean
_skip (NalReader * nr, guint nbits)
{
g_assert (nbits <= 8 * sizeof (nr->cache));
if (G_UNLIKELY (!_read (nr, nbits)))
return FALSE;
nr->bits_in_cache -= nbits;
return TRUE;
}
/* Generic version to skip any number of bits */
gboolean
_skip_long (NalReader * nr, guint nbits)
{
/* Leave out enough bits in the cache once we are finished */
const guint skip_size = 4 * sizeof (nr->cache);
guint remaining = nbits;
nbits %= skip_size;
while (remaining > 0) {
if (!_skip (nr, nbits))
return FALSE;
remaining -= nbits;
nbits = skip_size;
}
return TRUE;
}
guint
_get_pos (const NalReader * nr)
{
return nr->byte * 8 - nr->bits_in_cache;
}
guint
_get_remaining (const NalReader * nr)
{
return (nr->size - nr->byte) * 8 + nr->bits_in_cache;
}
guint
_get_epb_count (const NalReader * nr)
{
return nr->n_epb;
}
#define _READ_BITS(bits) \
gboolean \
_get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits) \
{ \
guint shift; \
\
if (!_read (nr, nbits)) \
return FALSE; \
\
/* bring the required bits down and truncate */ \
shift = nr->bits_in_cache - nbits; \
*val = nr->first_byte >> shift; \
\
*val |= nr->cache << (8 - shift); \
/* mask out required bits */ \
if (nbits < bits) \
*val &= ((guint##bits)1 << nbits) - 1; \
\
nr->bits_in_cache = shift; \
\
return TRUE; \
} \
_READ_BITS (8);
_READ_BITS (16);
_READ_BITS (32);
#define _PEEK_BITS(bits) \
gboolean \
_peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits) \
{ \
NalReader tmp; \
\
tmp = *nr; \
return _get_bits_uint##bits (&tmp, val, nbits); \
}
_PEEK_BITS (8);
gboolean
_get_ue (NalReader * nr, guint32 * val)
{
guint i = 0;
guint8 bit;
guint32 value;
if (G_UNLIKELY (!_get_bits_uint8 (nr, &bit, 1)))
return FALSE;
while (bit == 0) {
i++;
if (G_UNLIKELY (!_get_bits_uint8 (nr, &bit, 1)))
return FALSE;
}
if (G_UNLIKELY (i > 31))
return FALSE;
if (G_UNLIKELY (!_get_bits_uint32 (nr, &value, i)))
return FALSE;
*val = (1 << i) - 1 + value;
return TRUE;
}
gboolean
_get_se (NalReader * nr, gint32 * val)
{
guint32 value;
if (G_UNLIKELY (!_get_ue (nr, &value)))
return FALSE;
if (value % 2)
*val = (value / 2) + 1;
else
*val = -(value / 2);
return TRUE;
}
gboolean
_is_byte_aligned (NalReader * nr)
{
if (nr->bits_in_cache != 0)
return FALSE;
return TRUE;
}
gboolean
_has_more_data (NalReader * nr)
{
NalReader nr_tmp;
guint remaining, nbits;
guint8 rbsp_stop_one_bit, zero_bits;
remaining = _get_remaining (nr);
if (remaining == 0)
return FALSE;
nr_tmp = *nr;
nr = &nr_tmp;
/* The spec defines that more_rbsp_data() searches for the last bit
equal to 1, and that it is the rbsp_stop_one_bit. Subsequent bits
until byte boundary is reached shall be zero.
This means that more_rbsp_data() is FALSE if the next bit is 1
and the remaining bits until byte boundary are zero. One way to
be sure that this bit was the very last one, is that every other
bit after we reached byte boundary are also set to zero.
Otherwise, if the next bit is 0 or if there are non-zero bits
afterwards, then then we have more_rbsp_data() */
if (!_get_bits_uint8 (nr, &rbsp_stop_one_bit, 1))
return FALSE;
if (!rbsp_stop_one_bit)
return TRUE;
nbits = --remaining % 8;
while (remaining > 0) {
if (!_get_bits_uint8 (nr, &zero_bits, nbits))
return FALSE;
if (zero_bits != 0)
return TRUE;
remaining -= nbits;
nbits = 8;
}
return FALSE;
}
/*********** end of nal parser ***************/
gint
scan_for_start_codes (const guint8 * data, guint size)
{
GstByteReader br;
gst_byte_reader_init (&br, data, size);
/* NALU not empty, so we can at least expect 1 (even 2) bytes following sc */
return gst_byte_reader_masked_scan_uint32 (&br, 0xffffff00, 0x00000100,
0, size);
}

View File

@@ -1,170 +0,0 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <gst/base/gstbytereader.h>
#include <gst/base/gstbitreader.h>
#include <string.h>
guint ceil_log2 (guint32 v);
typedef struct
{
const guint8 *data;
guint size;
guint n_epb; /* Number of emulation prevention bytes */
guint byte; /* Byte position */
guint bits_in_cache; /* bitpos in the cache of next bit */
guint8 first_byte;
guint64 cache; /* cached bytes */
} NalReader;
G_GNUC_INTERNAL
void init_nal (NalReader * nr, const guint8 * data, guint size);
G_GNUC_INTERNAL
gboolean _read (NalReader * nr, guint nbits);
G_GNUC_INTERNAL
gboolean _skip (NalReader * nr, guint nbits);
G_GNUC_INTERNAL
gboolean _skip_long (NalReader * nr, guint nbits);
G_GNUC_INTERNAL
guint _get_pos (const NalReader * nr);
G_GNUC_INTERNAL
guint _get_remaining (const NalReader * nr);
G_GNUC_INTERNAL
guint _get_epb_count (const NalReader * nr);
G_GNUC_INTERNAL
gboolean _is_byte_aligned (NalReader * nr);
G_GNUC_INTERNAL
gboolean _has_more_data (NalReader * nr);
#define _READ_BITS_H(bits) \
G_GNUC_INTERNAL \
gboolean _get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits)
_READ_BITS_H (8);
_READ_BITS_H (16);
_READ_BITS_H (32);
#define _PEEK_BITS_H(bits) \
G_GNUC_INTERNAL \
gboolean _peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits)
_PEEK_BITS_H (8);
G_GNUC_INTERNAL
gboolean _get_ue (NalReader * nr, guint32 * val);
G_GNUC_INTERNAL
gboolean _get_se (NalReader * nr, gint32 * val);
#define CHECK_ALLOWED_MAX(val, max) { \
if (val > max) { \
GST_WARNING ("value greater than max. value: %d, max %d", \
val, max); \
goto error; \
} \
}
#define CHECK_ALLOWED(val, min, max) { \
if (val < min || val > max) { \
GST_WARNING ("value not in allowed range. value: %d, range %d-%d", \
val, min, max); \
goto error; \
} \
}
#define READ_UINT8(nr, val, nbits) { \
if (!_get_bits_uint8 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint8, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UINT16(nr, val, nbits) { \
if (!_get_bits_uint16 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint16, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UINT32(nr, val, nbits) { \
if (!_get_bits_uint32 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint32, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UINT64(nr, val, nbits) { \
if (!_get_bits_uint64 (nr, &val, nbits)) { \
GST_WARNING ("failed to read uint32, nbits: %d", nbits); \
goto error; \
} \
}
#define READ_UE(nr, val) { \
if (!_get_ue (nr, &val)) { \
GST_WARNING ("failed to read UE"); \
goto error; \
} \
}
#define READ_UE_ALLOWED(nr, val, min, max) { \
guint32 tmp; \
READ_UE (nr, tmp); \
CHECK_ALLOWED (tmp, min, max); \
val = tmp; \
}
#define READ_UE_MAX(nr, val, max) { \
guint32 tmp; \
READ_UE (nr, tmp); \
CHECK_ALLOWED_MAX (tmp, max); \
val = tmp; \
}
#define READ_SE(nr, val) { \
if (!_get_se (nr, &val)) { \
GST_WARNING ("failed to read SE"); \
goto error; \
} \
}
#define READ_SE_ALLOWED(nr, val, min, max) { \
gint32 tmp; \
READ_SE (nr, tmp); \
CHECK_ALLOWED (tmp, min, max); \
val = tmp; \
}
G_GNUC_INTERNAL
gint scan_for_start_codes (const guint8 * data, guint size);

View File

@@ -26,23 +26,21 @@
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include "gstv4l2object.h"
#define UUID_SIZE 16
#define USER_DATA_UNREGISTERED_TYPE 5
gboolean check_uuid(uint8_t *stream, char *sei_uuid_string);
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string);
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size,
char *sei_uuid_string, guint32 pixelformat);
gboolean check_uuid(uint8_t *stream);
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size);
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size);
gboolean check_uuid(uint8_t *stream, char *sei_uuid_string)
gboolean check_uuid(uint8_t *stream)
{
char uuid_string[UUID_SIZE] = {0};
uint32_t size = snprintf (uuid_string, UUID_SIZE, "%s", stream);
if (size == (UUID_SIZE-1))
{
if (!strncmp (uuid_string, sei_uuid_string, (UUID_SIZE-1)))
if (!strncmp (uuid_string, "NVDS_CUSTOMMETA", (UUID_SIZE-1)))
return TRUE;
else
return FALSE;
@@ -51,27 +49,26 @@ gboolean check_uuid(uint8_t *stream, char *sei_uuid_string)
return FALSE;
}
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string)
uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size)
{
int payload_type = 0;
int payload_size = 0;
uint8_t* payload = NULL;
int i;
int i, emu_count;
/* printf("found a SEI NAL unit!\n"); */
payload_type = *bs_ptr++;
payload_type += *bs_ptr++;
while (payload_size % 0xFF == 0)
{
payload_size += *bs_ptr++;
}
//printf("payload_type = %i payload_size = %i\n", payload_type, payload_size);
/* printf("payload_type = %i payload_size = %i\n", payload_type, payload_size); */
if (!check_uuid (bs_ptr, sei_uuid_string))
if (!check_uuid (bs_ptr))
{
//printf ("Expected UUID not found\n");
bs_ptr += (payload_size - UUID_SIZE);
/* printf ("Expected UUID not found\n"); */
return NULL;
}
else
@@ -85,21 +82,19 @@ uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string)
{
payload = (uint8_t*)malloc((payload_size - UUID_SIZE)*sizeof(uint8_t));
for (i = 0; i < (payload_size - UUID_SIZE); i++)
for (i = 0, emu_count = 0; i < (payload_size - UUID_SIZE);
i++, emu_count++)
{
payload[i] = *bs_ptr;
if (strncmp (sei_uuid_string, "VST_CUSTOM_META", (UUID_SIZE-1)) != 0)
payload[i] = *bs_ptr++;
// drop emulation prevention bytes
if ((emu_count >= 2)
&& (payload[i] == 0x03)
&& (payload[i-1] == 0x00)
&& (payload[i-2] == 0x00))
{
// drop emulation prevention bytes
if ((*(bs_ptr) == 0x03)
&& (*(bs_ptr - 1) == 0x00)
&& (*(bs_ptr - 2) == 0x00))
{
i--;
}
i--;
emu_count = 0;
}
bs_ptr++;
}
return payload;
}
@@ -109,34 +104,13 @@ uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string)
}
}
/*************************************************************
+------H264-----+
|0|1|2|3|4|5|6|7|
+-+-+-+-+-+-+-+-+
|F|NRI| Type |
+---------------+
+------------H265---------------+
|0|1|2|3|4|5|6|7|0|1|2|3|4|5|6|7|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|F| Type | LayerId | TID |
+-------------+-----------------+
*************************************************************/
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size,
char *sei_uuid_string, guint32 pixelformat)
uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size)
{
if (sei_uuid_string == NULL)
return NULL;
int checklen = 0;
unsigned int sei_payload_size = 0;
uint8_t *bs_ptr = bs;
uint8_t *bs_ptr_end = bs + size;
uint8_t *payload = NULL;
while (bs_ptr_end > bs_ptr)
while (size > 0)
{
if (checklen < 2 && *bs_ptr++ == 0x00)
checklen++;
@@ -144,12 +118,9 @@ uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size,
checklen++;
else if (checklen == 3 && *bs_ptr++ == 0x01)
checklen++;
else if (checklen == 4 && ((pixelformat == V4L2_PIX_FMT_H264) ? *bs_ptr == 0x06 : (((*bs_ptr >> 1) & 0x3f) == 0x27)))
else if (checklen == 4 && *bs_ptr++ == 0x06)
{
bs_ptr++;
if (pixelformat == V4L2_PIX_FMT_H265)
bs_ptr++;
payload = parse_sei_unit(bs_ptr, &sei_payload_size, sei_uuid_string);
payload = parse_sei_unit(bs_ptr, &sei_payload_size);
checklen = 0;
if (payload != NULL)
{
@@ -157,12 +128,12 @@ uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size,
return payload;
}
else
{
continue;
}
return NULL;
}
else
checklen = 0;
size--;
}
return NULL;
}

View File

@@ -2,7 +2,7 @@
*
* Copyright (C) 2002 Ronald Bultje <rbultje@ronald.bitfreak.net>
* 2006 Edgard Lima <edgard.lima@gmail.com>
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* v4l2_calls.c - generic V4L2 calls handling
*
@@ -51,22 +51,9 @@
#include "gstv4l2videodec.h"
#include "gst/gst-i18n-plugin.h"
#include <ctype.h>
#include "wsl_utils.h"
GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
#define GST_CAT_DEFAULT v4l2_debug
#ifdef USE_V4L2_TARGET_NV
void __attribute__((constructor)) gstv4l2_constructor_init(void);
static bool is_wsl_system = 0;
void __attribute__((constructor)) gstv4l2_constructor_init(void)
{
is_wsl_system = is_running_in_WSL();
}
#endif
/******************************************************
* gst_v4l2_get_capabilities():
* get the device's capturing capabilities
@@ -553,14 +540,7 @@ gst_v4l2_open (GstV4l2Object * v4l2object)
if (is_cuvid == TRUE) {
for (i = 0; i < 16; i++)
{
if (is_wsl_system) {
/* WSL system doesn't have /dev/nvidia0 node. Use /dev/null instead.
We can use a dummy node since the ioctl calls we use are not true ioctls */
GST_INFO_OBJECT(v4l2object->dbg_obj, "Running inside WSL");
g_snprintf(buf, sizeof(buf), "/dev/null");
} else {
g_snprintf(buf, sizeof(buf), "/dev/nvidia%d", i);
}
g_snprintf(buf, sizeof(buf), "/dev/nvidia%d", i);
v4l2object->video_fd =
open (buf, O_RDWR /* | O_NONBLOCK */ );
if (v4l2object->video_fd != -1)

View File

@@ -1,41 +0,0 @@
/**
* SPDX-FileCopyrightText: Copyright (c) 2024-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
*
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
* property and proprietary rights in and to this material, related
* documentation and any modifications thereto. Any use, reproduction,
* disclosure or distribution of this material and related documentation
* without an express license agreement from NVIDIA CORPORATION or
* its affiliates is strictly prohibited.
*/
#include "wsl_utils.h"
bool is_running_in_WSL(void)
{
static volatile bool verified = false;
static volatile bool ret = false;
if (!verified) {
verified = true;
FILE *versionFile = fopen("/proc/version", "r");
if (versionFile != NULL) {
char versionInfo[512];
if (fgets(versionInfo, sizeof(versionInfo), versionFile) != NULL) {
for (int i=0; versionInfo[i] != '\0'; i++) {
versionInfo[i] = tolower((unsigned char)versionInfo[i]);
}
if (strstr(versionInfo, "microsoft") != NULL) {
/* Yes, Running inside WSL */
ret = true;
}
}
fclose(versionFile);
} else {
printf("ERROR: opening /proc/version failed\n");
}
}
return ret;
}

View File

@@ -1,24 +0,0 @@
/**
* SPDX-FileCopyrightText: Copyright (c) 2024-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
*
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
* property and proprietary rights in and to this material, related
* documentation and any modifications thereto. Any use, reproduction,
* disclosure or distribution of this material and related documentation
* without an express license agreement from NVIDIA CORPORATION or
* its affiliates is strictly prohibited.
*/
#ifndef _WSL_UTILS_
#define _WSL_UTILS_
#include <stdio.h>
#include <stdbool.h>
#include <ctype.h>
#include <string.h>
/* Function to check if running inside Windows Subsystem For Linux (WSL) */
bool is_running_in_WSL(void);
#endif //_WSL_UTILS_

897
nvbuf_utils.h Normal file
View File

@@ -0,0 +1,897 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: Buffering and Transform/Composition/Blending</b>
*
*/
/**
* @defgroup ee_nvbuffering_group NvBufUtils API (Deprecated)
* @ingroup ds_nvbuf_api
* NVIDIA buffering utility library for use by applications.
* The utility also transforms, composits, and blends.
* @{
*/
#ifndef _NVBUF_UTILS_H_
#define _NVBUF_UTILS_H_
#ifdef __cplusplus
extern "C"
{
#endif
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <errno.h>
#include <stdbool.h>
/**
* Defines the maximum number of planes for a video frame.
*/
#define MAX_NUM_PLANES 4
/**
* Defines the maximum number of input video frames that can be used for composition.
*/
#define MAX_COMPOSITE_FRAME 16
/**
* Defines the default values for chroma subsampling.
* The default value matches JPEG/MPEG use cases.
*/
#define NVBUF_CHROMA_SUBSAMPLING_HORIZ_DEFAULT 0
#define NVBUF_CHROMA_SUBSAMPLING_VERT_DEFAULT 1
/**
* Defines the maximum number of sync object parameters.
*/
#define NVBUF_MAX_SYNCOBJ_PARAMS 5
/**
* Use this value to represent an infinite wait interval.
* A value of zero should not be interpreted as infinite,
* it should be interpreted as "time out immediately" and
* simply check whether the event has already happened.
*/
#define NVBUFFER_SYNCPOINT_WAIT_INFINITE 0xFFFFFFFF
/**
* Defines Payload types for NvBuffer.
*/
typedef enum
{
/** buffer payload with hardware memory handle for set of planes. */
NvBufferPayload_SurfArray,
/** buffer payload with hardware memory handle for specific memory size. */
NvBufferPayload_MemHandle,
} NvBufferPayloadType;
/**
* Defines display scan formats for NvBuffer video planes.
*/
typedef enum
{
/** Progessive scan formats. */
NvBufferDisplayScanFormat_Progressive = 0,
/** Interlaced scan formats. */
NvBufferDisplayScanFormat_Interlaced,
} NvBufferDisplayScanFormat;
/**
* Defines Layout formats for NvBuffer video planes.
*/
typedef enum
{
/** Pitch Layout. */
NvBufferLayout_Pitch,
/** BlockLinear Layout. */
NvBufferLayout_BlockLinear,
} NvBufferLayout;
/**
* Defines memory access flags for NvBuffer.
*/
typedef enum
{
/** Memory read. */
NvBufferMem_Read,
/** Memory write. */
NvBufferMem_Write,
/** Memory read & write. */
NvBufferMem_Read_Write,
} NvBufferMemFlags;
/**
* Defines tags that identify the components requesting a memory allocation.
* The tags can be used later to identify the total memory allocated to
* particular types of components.
*/
typedef enum
{
/** tag None. */
NvBufferTag_NONE = 0x0,
/** tag for Camera. */
NvBufferTag_CAMERA = 0x200,
/** tag for Jpeg Encoder/Decoder. */
NvBufferTag_JPEG = 0x1500,
/** tag for VPR Buffers. */
NvBufferTag_PROTECTED = 0x1504,
/** tag for H264/H265 Video Encoder. */
NvBufferTag_VIDEO_ENC = 0x1200,
/** tag for H264/H265/VP9 Video Decoder. */
NvBufferTag_VIDEO_DEC = 0x1400,
/** tag for Video Transform/Composite. */
NvBufferTag_VIDEO_CONVERT = 0xf01,
} NvBufferTag;
/**
* Defines color formats for NvBuffer.
*/
typedef enum
{
/** BT.601 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420,
/** BT.601 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YVU420,
/** BT.601 colorspace - YUV422 multi-planar. */
NvBufferColorFormat_YUV422,
/** BT.601 colorspace - YUV420 ER multi-planar. */
NvBufferColorFormat_YUV420_ER,
/** BT.601 colorspace - YVU420 ER multi-planar. */
NvBufferColorFormat_YVU420_ER,
/** BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12,
/** BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_ER,
/** BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV21,
/** BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV21_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_UYVY,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_UYVY_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_VYUY,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_VYUY_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_YUYV,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_YUYV_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_YVYU,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_YVYU_ER,
/** LegacyRGBA colorspace - BGRA-8-8-8-8 planar. */
NvBufferColorFormat_ABGR32,
/** LegacyRGBA colorspace - XRGB-8-8-8-8 planar. */
NvBufferColorFormat_XRGB32,
/** LegacyRGBA colorspace - ARGB-8-8-8-8 planar. */
NvBufferColorFormat_ARGB32,
/** BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE,
/** BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_709,
/** BT.709_ER colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_709_ER,
/** BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_2020,
/** BT.601 colorspace - Y/CrCb 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV21_10LE,
/** BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV12_12LE,
/** BT.2020 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV12_12LE_2020,
/** BT.601 colorspace - Y/CrCb 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV21_12LE,
/** BT.709 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420_709,
/** BT.709 colorspace - YUV420 ER multi-planar. */
NvBufferColorFormat_YUV420_709_ER,
/** BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_709,
/** BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_709_ER,
/** BT.2020 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420_2020,
/** BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_2020,
/** BT.601 colorspace - YUV444 multi-planar. */
NvBufferColorFormat_YUV444,
/** Optical flow */
NvBufferColorFormat_SignedR16G16,
/** Optical flow SAD calculation Buffer format */
NvBufferColorFormat_A32,
/** 8-bit grayscale. */
NvBufferColorFormat_GRAY8,
/** BT.601 colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16,
/** BT.601 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NvBufferColorFormat_NV16_10LE,
/** BT.601 colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24,
/** BT.601 colorspace - Y/CrCb 4:4:4 10-bit multi-planar. */
NvBufferColorFormat_NV24_10LE,
/** BT.601_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_ER,
/** BT.601_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_ER,
/** BT.709 colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_709,
/** BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_709,
/** BT.709_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_709_ER,
/** BT.709_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_709_ER,
/** BT.709 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_709,
/** BT.709 ER colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_709_ER,
/** BT.2020 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_2020,
/** BT.2020 colorspace - Y/CbCr 12 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_12LE_2020,
/** Non-linear RGB BT.709 colorspace - RGBA-10-10-10-2 planar. */
NvBufferColorFormat_RGBA_10_10_10_2_709,
/** Non-linear RGB BT.2020 colorspace - RGBA-10-10-10-2 planar. */
NvBufferColorFormat_RGBA_10_10_10_2_2020,
/** Non-linear RGB BT.709 colorspace - BGRA-10-10-10-2 planar. */
NvBufferColorFormat_BGRA_10_10_10_2_709,
/** Non-linear RGB BT.2020 colorspace - BGRA-10-10-10-2 planar. */
NvBufferColorFormat_BGRA_10_10_10_2_2020,
/** Invalid color format. */
NvBufferColorFormat_Invalid,
} NvBufferColorFormat;
/**
* Defines video flip methods.
*/
typedef enum
{
/** Video flip none. */
NvBufferTransform_None,
/** Video flip rotate 90 degree counter-clockwise. */
NvBufferTransform_Rotate90,
/** Video flip rotate 180 degree. */
NvBufferTransform_Rotate180,
/** Video flip rotate 270 degree counter-clockwise. */
NvBufferTransform_Rotate270,
/** Video flip with respect to X-axis. */
NvBufferTransform_FlipX,
/** Video flip with respect to Y-axis. */
NvBufferTransform_FlipY,
/** Video flip transpose. */
NvBufferTransform_Transpose,
/** Video flip inverse transpode. */
NvBufferTransform_InvTranspose,
} NvBufferTransform_Flip;
/**
* Defines transform video filter types.
*/
typedef enum
{
/** transform filter nearest. */
NvBufferTransform_Filter_Nearest,
/** transform filter bilinear. */
NvBufferTransform_Filter_Bilinear,
/** transform filter 5 tap. */
NvBufferTransform_Filter_5_Tap,
/** transform filter 10 tap. */
NvBufferTransform_Filter_10_Tap,
/** transform filter smart. */
NvBufferTransform_Filter_Smart,
/** transform filter nicest. */
NvBufferTransform_Filter_Nicest,
} NvBufferTransform_Filter;
/**
* Defines flags to indicate for valid transform.
*/
typedef enum {
/** transform flag to crop source rectangle. */
NVBUFFER_TRANSFORM_CROP_SRC = 1,
/** transform flag to crop destination rectangle. */
NVBUFFER_TRANSFORM_CROP_DST = 1 << 1,
/** transform flag to set filter type. */
NVBUFFER_TRANSFORM_FILTER = 1 << 2,
/** transform flag to set flip method. */
NVBUFFER_TRANSFORM_FLIP = 1 << 3,
} NvBufferTransform_Flag;
/**
* Defines flags that specify valid composition/blending operations.
*/
typedef enum {
/** flag to set for composition. */
NVBUFFER_COMPOSITE = 1,
/** flag to set for blending. */
NVBUFFER_BLEND = 1 << 1,
/** composition flag to set filter type. */
NVBUFFER_COMPOSITE_FILTER = 1 << 2,
} NvBufferComposite_Flag;
/**
* Holds parameters for buffer sync point object.
* sync object params is simply a data structure containing [sync point ID,value] pair.
* This can be used by clients to describe an event that might want to wait for.
*/
typedef struct _NvBufferSyncObjParams
{
uint32_t syncpointID;
uint32_t value;
}NvBufferSyncObjParams;
/**
* buffer sync point object.
*/
typedef struct _NvBufferSyncObjRec
{
NvBufferSyncObjParams insyncobj[NVBUF_MAX_SYNCOBJ_PARAMS];
uint32_t num_insyncobj;
NvBufferSyncObjParams outsyncobj;
uint32_t use_outsyncobj;
}NvBufferSyncObj;
/**
* Holds composition background r,g,b colors.
*/
typedef struct
{
/** background color value for r. */
float r;
/** background color value for g. */
float g;
/** background color value for b. */
float b;
}NvBufferCompositeBackground;
/**
* Holds coordinates for a rectangle.
*/
typedef struct
{
/** rectangle top. */
uint32_t top;
/** rectangle left. */
uint32_t left;
/** rectangle width. */
uint32_t width;
/** rectangle height. */
uint32_t height;
}NvBufferRect;
/**
* Holds an opaque NvBuffer session type required for parallel buffer
* tranformations and compositions. Operations using a single session are
* scheduled sequentially, after the previous operation finishes. Operations for
* multiple sessions are scheduled in parallel.
*/
typedef struct _NvBufferSession * NvBufferSession;
/**
* Holds Chroma Subsampling parameters.
*/
typedef struct _NvBufferChromaSubSamplingParams
{
/** location settings */
uint8_t chromaLocHoriz;
uint8_t chromaLocVert;
}NvBufferChromaSubsamplingParams;
#define NVBUF_CHROMA_SUBSAMPLING_PARAMS_DEFAULT \
{ \
NVBUF_CHROMA_SUBSAMPLING_HORIZ_DEFAULT, \
NVBUF_CHROMA_SUBSAMPLING_VERT_DEFAULT \
}
/**
* Holds the input parameters for hardware buffer creation.
*/
typedef struct _NvBufferCreateParams
{
/** width of the buffer. */
int32_t width;
/** height of the buffer. */
int32_t height;
/** payload type of the buffer. */
NvBufferPayloadType payloadType;
/** size of the memory.(Applicale for NvBufferPayload_MemHandle) */
int32_t memsize;
/** layout of the buffer. */
NvBufferLayout layout;
/** colorformat of the buffer. */
NvBufferColorFormat colorFormat;
/** tag to associate with the buffer. */
NvBufferTag nvbuf_tag;
}NvBufferCreateParams;
/**
* Holds parameters for a hardware buffer.
*/
typedef struct _NvBufferParams
{
/** Holds the DMABUF FD of the hardware buffer. */
uint32_t dmabuf_fd;
/** pointer to hardware buffer memory. */
void *nv_buffer;
/** payload type of the buffer. */
NvBufferPayloadType payloadType;
/** size of the memory.(Applicale for NvBufferPayload_MemHandle) */
int32_t memsize;
/** size of hardware buffer. */
uint32_t nv_buffer_size;
/** video format type of hardware buffer. */
NvBufferColorFormat pixel_format;
/** number of planes of hardware buffer. */
uint32_t num_planes;
/** width of each planes of hardware buffer. */
uint32_t width[MAX_NUM_PLANES];
/** height of each planes of hardware buffer. */
uint32_t height[MAX_NUM_PLANES];
/** pitch of each planes of hardware buffer. */
uint32_t pitch[MAX_NUM_PLANES];
/** memory offset values of each video planes of hardware buffer. */
uint32_t offset[MAX_NUM_PLANES];
/** size of each vodeo planes of hardware buffer. */
uint32_t psize[MAX_NUM_PLANES];
/** layout type of each planes of hardware buffer. */
uint32_t layout[MAX_NUM_PLANES];
}NvBufferParams;
/**
* Holds extended parameters for a hardware buffer.
*/
typedef struct _NvBufferParamsEx
{
/** nvbuffer basic parameters. */
NvBufferParams params;
/** offset in bytes from the start of the buffer to the first valid byte.
(Applicale for NvBufferPayload_MemHandle) */
int32_t startofvaliddata;
/** size of the valid data from the first to the last valid byte.
(Applicale for NvBufferPayload_MemHandle) */
int32_t sizeofvaliddatainbytes;
/** display scan format - progressive/interlaced. */
NvBufferDisplayScanFormat scanformat[MAX_NUM_PLANES];
/** offset of the second field for interlaced buffer. */
uint32_t secondfieldoffset[MAX_NUM_PLANES];
/** block height of the planes for blockLinear layout hardware buffer. */
uint32_t blockheightlog2[MAX_NUM_PLANES];
/** physical address of allocated planes. */
uint32_t physicaladdress[MAX_NUM_PLANES];
/** flags associated with planes */
uint64_t flags[MAX_NUM_PLANES];
/** metadata associated with the hardware buffer. */
void *payloadmetaInfo;
/** chroma subsampling parameters */
NvBufferChromaSubsamplingParams chromaSubsampling;
/** get buffer vpr information. */
bool is_protected;
/** buffer sync point object parameters */
NvBufferSyncObj syncobj;
/** reserved field. */
void *reserved;
}NvBufferParamsEx;
/**
* Holds parameters related to compositing/blending.
*/
typedef struct _NvBufferCompositeParams
{
/** flag to indicate which of the composition/blending parameters are valid. */
uint32_t composite_flag;
/** number of the input buffers to be composited. */
uint32_t input_buf_count;
/** filters to use for composition. */
NvBufferTransform_Filter composite_filter[MAX_COMPOSITE_FRAME];
/** alpha values of input buffers for the blending. */
float dst_comp_rect_alpha[MAX_COMPOSITE_FRAME];
/** source rectangle coordinates of input buffers for composition. */
NvBufferRect src_comp_rect[MAX_COMPOSITE_FRAME];
/** destination rectangle coordinates of input buffers for composition. */
NvBufferRect dst_comp_rect[MAX_COMPOSITE_FRAME];
/** background color values for composition. */
NvBufferCompositeBackground composite_bgcolor;
/** NvBufferSession to be used for composition. If NULL, the default session
* is used. */
NvBufferSession session;
}NvBufferCompositeParams;
/**
* Holds parameters for buffer transform functions.
*/
typedef struct _NvBufferTransformParams
{
/** flag to indicate which of the transform parameters are valid. */
uint32_t transform_flag;
/** flip method. */
NvBufferTransform_Flip transform_flip;
/** transform filter. */
NvBufferTransform_Filter transform_filter;
/** source rectangle coordinates for crop opeartion. */
NvBufferRect src_rect;
/** destination rectangle coordinates for crop opeartion. */
NvBufferRect dst_rect;
/** NvBufferSession to be used for transform. If NULL, the default session
* is used. */
NvBufferSession session;
}NvBufferTransformParams;
/**
* This method can be used to wait on sync point ID.
*
* @param[in] syncobj_params sync point object parameters.
* @param[in] timeout sync point wait timeout value.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferSyncObjWait (NvBufferSyncObjParams *syncobj_params, unsigned int timeout);
/**
* This method can be used to get hardware Buffer struct size.
*
* @returns hardware Buffer struct size.
*/
int NvBufferGetSize (void);
/**
* Creates an instance of EGLImage from a DMABUF FD.
*
* @param[in] display An EGLDisplay object used during the creation
* of the EGLImage. If NULL, nvbuf_utils() uses
* its own instance of EGLDisplay.
* @param[in] dmabuf_fd DMABUF FD of the buffer from which the EGLImage
* is to be created.
*
* @returns `EGLImageKHR` for success, `NULL` for failure
*/
EGLImageKHR NvEGLImageFromFd (EGLDisplay display, int dmabuf_fd);
/**
* Destroys an EGLImage object.
*
* @param[in] display An EGLDisplay object used to destroy the EGLImage.
* If NULL, nvbuf_utils() uses its own instance of
* EGLDisplay.
* @param[in] eglImage The EGLImageKHR object to be destroyed.
*
* @returns 0 for success, -1 for failure
*/
int NvDestroyEGLImage (EGLDisplay display, EGLImageKHR eglImage);
/**
* Allocates a hardware buffer (deprecated).
*
* @deprecated Use NvBufferCreateEx() instead.
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] width Buffer width, in bytes.
* @param[in] height Buffer height, in bytes.
* @param[in] layout Layout of the buffer.
* @param[in] colorFormat Color format of the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufferCreate (int *dmabuf_fd, int width, int height,
NvBufferLayout layout, NvBufferColorFormat colorFormat);
/**
* Allocates a hardware buffer.
*
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateEx (int *dmabuf_fd, NvBufferCreateParams *input_params);
/**
* Allocates a hardware buffer for interlace scan format.
*
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateInterlace (int *dmabuf_fd, NvBufferCreateParams *input_params);
/**
* Allocates a hardware buffer with a given chroma subsampling location.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
* @param[in] chromaSubsampling Chroma location parameters.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateWithChromaLoc (int *dmabuf_fd, NvBufferCreateParams *input_params, NvBufferChromaSubsamplingParams *chromaSubsampling);
/**
* Gets buffer parameters.
* @param[in] dmabuf_fd `DMABUF FD` of buffer.
* @param[out] params A pointer to the structure to fill with parameters.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferGetParams (int dmabuf_fd, NvBufferParams *params);
/**
* Gets buffer extended parameters.
* @param[in] dmabuf_fd `DMABUF FD` of buffer.
* @param[out] exparams A pointer to the structure to fill with extended parameters.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferGetParamsEx (int dmabuf_fd, NvBufferParamsEx *exparams);
/**
* Destroys a hardware buffer.
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` `hw_buffer` to destroy.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferDestroy (int dmabuf_fd);
/**
* Extracts the `dmabuf_fd` from the hardware buffer.
* @param[in] nvbuf Specifies the `hw_buffer`.
* @param[out] dmabuf_fd Returns DMABUF FD of `hw_buffer`.
*
* @returns 0 for success, -1 for failure.
*/
int ExtractFdFromNvBuffer (void *nvbuf, int *dmabuf_fd);
/**
* Releases the `dmabuf_fd` buffer.
* @see ExtractfdFromNvBuffer()
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` to release.
*
* @returns 0 for success, -1 for failure.
*/
int NvReleaseFd (int dmabuf_fd);
/**
* Syncs the hardware memory cache for the CPU.
*
* \sa NvBufferMemMap for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForCpu (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the CPU, API to be used for another process.
*
* \sa NvBufferMemMapEx for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForCpuEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the device.
*
* \sa NvBufferMemMap for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForDevice (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the device, API to be used for another process.
*
* \sa NvBufferMemMapEx for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForDeviceEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Gets the memory-mapped virtual address of the plane.
*
* The client must call NvBufferMemSyncForCpu() with the virtual address returned
* by this function before accessing the mapped memory in CPU.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and hardware device as
* follows:
* - CPU: If the CPU modifies any mapped memory, the client must call
* NvBufferMemSyncForDevice() before any hardware device accesses the memory.
* - Hardware device: If the mapped memory is modified by any hardware device,
* the client must call NvBufferMemSyncForCpu() before CPU accesses the memory.
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.(Applies to @ref NvBufferPayload_SurfArray.)
* @param[in] memflag NvBuffer memory flag.
* @param[out] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemMap (int dmabuf_fd, unsigned int plane, NvBufferMemFlags memflag, void **pVirtAddr);
/**
* Gets the memory-mapped virtual address of the plane, API to be used for another process.
*
* The client must call NvBufferMemSyncForCpuEx() with the virtual address returned
* by this function before accessing the mapped memory in CPU in another process.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and hardware device as
* follows:
* - CPU: If the CPU modifies any mapped memory, the client must call
* NvBufferMemSyncForDeviceEx() before any hardware device accesses the memory.
* - Hardware device: If the mapped memory is modified by any hardware device,
* the client must call NvBufferMemSyncForCpuEx() before CPU accesses the memory.
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.(Applies to @ref NvBufferPayload_SurfArray.)
* @param[in] memflag NvBuffer memory flag.
* @param[out] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemMapEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, NvBufferMemFlags memflag, void **pVirtAddr);
/**
* Unmaps the mapped virtual address of the plane.
*
* If the following conditions are both true, the client must call
* NvBufferMemSyncForDevice() before unmapping the memory:
* - Mapped memory was modified by the CPU.
* - Mapped memory will be accessed by a hardware device.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] plane Video frame plane. Applies to
* @ref NvBufferPayload_SurfArray.
* @param[in] pVirtAddr Virtual address pointer to the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemUnMap (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Unmaps the mapped virtual address of the plane, API to be used for another process.
*
* If the following conditions are both true, the client must call
* NvBufferMemSyncForDeviceEx() before unmapping the memory in another process:
* - Mapped memory was modified by the CPU.
* - Mapped memory will be accessed by a hardware device.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane Video frame plane. Applies to
* @ref NvBufferPayload_SurfArray.
* @param[in] pVirtAddr Virtual address pointer to the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemUnMapEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Copies the NvBuffer plane contents to a raw buffer plane.
* @param[in] dmabuf_fd DMABUF FD of NvBuffer.
* @param[in] plane video frame plane.
* @param[in] out_width aligned width of the raw data plane.
* @param[in] out_height aligned height of the raw data plane.
* @param[in] ptr pointer to the output raw plane data.
*
* @returns 0 for success, -1 for failure.
*/
int NvBuffer2Raw (int dmabuf_fd, unsigned int plane, unsigned int out_width, unsigned int out_height, unsigned char *ptr);
/**
* Copies raw buffer plane contents to an NvBuffer plane.
* @param[in] ptr pointer to the input raw plane data.
* @param[in] plane video frame plane.
* @param[in] in_width aligned width of the raw data plane.
* @param[in] in_height aligned height of the raw data plane.
* @param[in] dmabuf_fd DMABUF FD of NvBuffer.
*
* @returns 0 for success, -1 for failure.
*/
int Raw2NvBuffer (unsigned char *ptr, unsigned int plane, unsigned int in_width, unsigned int in_height, int dmabuf_fd);
/**
* Creates a new NvBufferSession for parallel scheduling of
* buffer transformations and compositions.
*
* @returns A session pointer, NULL for failure.
*/
NvBufferSession NvBufferSessionCreate(void);
/**
* Destroys an existing \ref NvBufferSession.
* @param[in] session An existing NvBufferSession.
*/
void NvBufferSessionDestroy(NvBufferSession session);
/**
* Transforms one DMA buffer to another DMA buffer.
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] transform_params transform parameters
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransform (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params);
/**
* Transforms one DMA buffer to another DMA buffer, API to be used for another process.
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] input_params extended input parameters for a hardware buffer.
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] output_params extended output parameters for a hardware buffer.
* @param[in] transform_params transform parameters
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransformEx (int src_dmabuf_fd, NvBufferParamsEx *input_params, int dst_dmabuf_fd, NvBufferParamsEx *output_params, NvBufferTransformParams *transform_params);
/**
* Transforms one DMA buffer to another DMA buffer asyncroniously (non-blocking).
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] transform_params transform parameters
* @param[in] syncobj nvbuffer sync point object
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransformAsync (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params, NvBufferSyncObj *syncobj);
/**
* \brief Composites multiple input DMA buffers to one output DMA buffer.
*
* This function can composite multiple input frames to one output.
*
* @param[in] src_dmabuf_fds An array of DMABUF FDs of source buffers.
* These buffers are composited together. Output
* is copied to the output buffer referenced by
* @a dst_dmabuf_fd.
* @param[in] dst_dmabuf_fd DMABUF FD of the compositing destination buffer.
* @param[in] composite_params Compositing parameters.
*/
int NvBufferComposite (int *src_dmabuf_fds, int dst_dmabuf_fd, NvBufferCompositeParams *composite_params);
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@@ -1,13 +1,11 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
* property and proprietary rights in and to this material, related
* documentation and any modifications thereto. Any use, reproduction,
* disclosure or distribution of this material and related documentation
* without an express license agreement from NVIDIA CORPORATION or
* its affiliates is strictly prohibited.
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
@@ -246,72 +244,7 @@ typedef enum
NVBUF_COLOR_FORMAT_UYVP,
/** Specifies BT.601 colorspace - 10 bit YUV ER 4:2:2 interleaved. */
NVBUF_COLOR_FORMAT_UYVP_ER,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_2020,
/** Specifies BT.601 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_10LE_2020,
/** Specifies BT.601 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_YUV444_12LE_2020,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_709_ER,
/** Specifies 8 bit GRAY scale ER - single plane */
NVBUF_COLOR_FORMAT_GRAY8_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:2 planar */
NVBUF_COLOR_FORMAT_UYVY_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:2 planar */
NVBUF_COLOR_FORMAT_UYVY_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:2 planar */
NVBUF_COLOR_FORMAT_UYVY_2020,
/** Specifies 16 bit GRAY scale - single plane */
NVBUF_COLOR_FORMAT_GRAY16_LE,
/** Specifies 64 bit BGRA (B16 G16 R16 A16) interleaved */
NVBUF_COLOR_FORMAT_BGRA64_LE,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:2 multi-planar. */
NVBUF_COLOR_FORMAT_NV16_2020,
/** Specifies BT.601_ER colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_10LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_10LE_709,
/** Specifies BT.709_ER colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_10LE_2020,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:2 12-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_12LE,
/** Specifies BT.601_ER colorspace - Y/CbCr 4:2:2 12-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_12LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:2 12-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_12LE_709,
/** Specifies BT.709_ER colorspace - Y/CbCr 4:2:2 12-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_12LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:2 12-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_12LE_2020,
NVBUF_COLOR_FORMAT_LAST
} NvBufSurfaceColorFormat;
@@ -375,9 +308,7 @@ typedef struct NvBufSurfacePlaneParamsEx
uint32_t physicaladdress[NVBUF_MAX_PLANES];
/** flags associated with planes */
uint64_t flags[NVBUF_MAX_PLANES];
/** DRM modifier for plane */
uint64_t drmModifier[NVBUF_MAX_PLANES];
/** Holds the reserved space for future use. */
void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
} NvBufSurfacePlaneParamsEx;
@@ -400,25 +331,19 @@ typedef struct NvBufSurfacePlaneParams
uint32_t psize[NVBUF_MAX_PLANES];
/** Holds the number of bytes occupied by a pixel in each plane. */
uint32_t bytesPerPix[NVBUF_MAX_PLANES];
/** Holds the reserved space for future use. */
void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
} NvBufSurfacePlaneParams;
/**
* Holds Chroma Subsampling parameters for NvBufSurface allocation.
* The members chromaLocHoriz and chromaLocVert accept these values:
* 0: Left horizontal or top vertical position
* 1: Center horizontal or center vertical position
* 2: Right horizontal or bottom vertical position
*/
typedef struct NvBufSurfaceChromaSubsamplingParams
{
/** location settings */
uint8_t chromaLocHoriz;
uint8_t chromaLocVert;
/** Reserved for alignment */
uint8_t _reserved[6];
} NvBufSurfaceChromaSubsamplingParams;
/**
@@ -444,8 +369,6 @@ typedef struct NvBufSurfaceCreateParams {
NvBufSurfaceLayout layout;
/** Holds the type of memory to be allocated. */
NvBufSurfaceMemType memType;
/** Holds the reserved space for future use. */
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceCreateParams;
/**
@@ -453,23 +376,15 @@ typedef struct NvBufSurfaceCreateParams {
* (Applicable for NvBufSurfaceAllocate API)
*/
typedef struct NvBufSurfaceAllocateParams {
/** Hold legacy NvBufSurface creation parameters */
/** Hold legacy NvBufSurface creation parameters */
NvBufSurfaceCreateParams params;
/** Display scan format */
/** Display scan format */
NvBufSurfaceDisplayScanFormat displayscanformat;
/** Chroma Subsampling parameters */
/** Chroma Subsampling parameters */
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** components tag to be used for memory allocation */
/** components tag to be used for memory allocation */
NvBufSurfaceTag memtag;
/** disable pitch padding allocation only applicable for cuda and system memory allocation
pitch would be width times bytes per pixel for the plane, for odd width it would be
multiple of 2, also note for some non standard video resolution cuda kernels may fail
due to unaligned pitch
*/
bool disablePitchPadding;
/** Used void* from custom param for 64 bit machine, using other uint32_t param */
uint32_t _reservedParam;
/** Holds the reserved space for future use. */
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceAllocateParams;
@@ -481,11 +396,7 @@ typedef struct NvBufSurfaceMappedAddr {
void * addr[NVBUF_MAX_PLANES];
/** Holds a pointer to a mapped EGLImage. */
void *eglImage;
/** Holds a pointer to a mapped NVRM memory */
void *nvmmPtr;
/** Holds a pointer to a mapped CUDA memory */
void *cudaPtr;
/** Holds the reserved space for future use. */
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceMappedAddr;
@@ -510,32 +421,6 @@ typedef struct NvBufSurfaceParamsEx {
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceParamsEx;
/**
* Holds information of CUDA buffer.
* Applicable for tegra OpenRM only.
*/
typedef struct NvBufSurfaceCudaBuffer {
/**
* Holds a base pointer to allocated CUDA memory.
* It is different from dataPtr when CUDA allocated
* address is not page aligned for image buffers.
* It is same as dataPtr for other buffers.
*/
void *basePtr;
/**
* Holds a page aligned data pointer to CUDA memory for image buffers
* if CUDA allocated address is not page aligned.
* It is same as basePtr for other buffers.
*/
void *dataPtr;
/** Holds a pointer to external CUDA memory for imported CUDA buffers */
void *extMem;
/** Holds a pointer to external CUDA mipmaped array for imported CUDA buffers */
void *mipmap;
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceCudaBuffer;
/**
* Hold the information of single buffer in the batch.
*/
@@ -565,10 +450,8 @@ typedef struct NvBufSurfaceParams {
NvBufSurfaceMappedAddr mappedAddr;
/** pointers of extended parameters of single buffer in the batch.*/
NvBufSurfaceParamsEx *paramex;
/** Holds a pointer to CUDA buffer. Applicable for only CUDA Device and CUDA Host memory on tegra OpenRM.*/
NvBufSurfaceCudaBuffer *cudaBuffer;
void * _reserved[STRUCTURE_PADDING];
void * _reserved[STRUCTURE_PADDING - 1];
} NvBufSurfaceParams;
/**
@@ -589,117 +472,10 @@ typedef struct NvBufSurface {
NvBufSurfaceMemType memType;
/** Holds a pointer to an array of batched buffers. */
NvBufSurfaceParams *surfaceList;
/** Holds a flag for Imported buffer. */
bool isImportedBuf;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurface;
/**
* Holds plane parameters to map the buffer received from another process.
*/
typedef struct NvBufSurfaceMapPlaneParams
{
/** Holds the widths of planes */
uint32_t width;
/** Holds the heights of planes */
uint32_t height;
/** Holds the pitches of planes in bytes */
uint32_t pitch;
/** Holds the offsets of planes in bytes */
uint32_t offset;
/** Holds the sizes of planes in bytes */
uint32_t psize;
/** Holds offset of the second field for interlaced buffer */
uint32_t secondfieldoffset;
/** Holds block height of the planes for blockLinear layout buffer */
uint32_t blockheightlog2;
/** Holds flags associated with the planes */
uint64_t flags;
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceMapPlaneParams;
/**
* CUDA IPC memory handle for NvBufSurface
*/
typedef struct NvBufSurfaceCudaIpcMemHandle_t
{
char reserved[64];
} NvBufSurfaceCudaIpcMemHandle;
/**
* The extended map parameters NvBufSurface
*/
typedef struct NvBufSurfaceExtendedMapParams_t
{
NvBufSurfaceCudaIpcMemHandle memHandle;
void *reserved[64];
} NvBufSurfaceExtendedMapParams;
/**
* Holds buffer parameters to map the buffer received from another process.
*/
typedef struct NvBufSurfaceMapParams {
/** Holds the number of planes. */
uint32_t num_planes;
/** Holds a GPU ID */
uint32_t gpuId;
/** Holds a DMABUF FD */
uint64_t fd;
/** Holds the total size of allocated memory */
uint32_t totalSize;
/** Holds type of memory */
NvBufSurfaceMemType memType;
/** Holds BL or PL layout */
NvBufSurfaceLayout layout;
/** Holds display scan format */
NvBufSurfaceDisplayScanFormat scanformat;
/** Holds the color format */
NvBufSurfaceColorFormat colorFormat;
/** Holds chroma subsampling parameters */
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** Holds plane parameters */
NvBufSurfaceMapPlaneParams planes[NVBUF_MAX_PLANES];
/** Holds the extended Map parameters */
void *extendedMapParams;
/** Holds the reserved space for future use. */
void *_reserved[STRUCTURE_PADDING];
} NvBufSurfaceMapParams;
/**
* Holds information about mapped CUDA buffer
*/
typedef struct NvBufSurfaceNvmmBuffer {
/** Holds a pointer to mapped nvmm memory */
void *dataPtr;
/** Holds a DMABUF FD */
uint64_t bufferDesc;
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceNvmmBuffer;
/**
* Defines the type of underlying kernel driver detected for GPU access.
*/
typedef enum {
NVBUF_DRIVER_TYPE_UNKNOWN = 0,
NVBUF_DRIVER_TYPE_NVGPU,
NVBUF_DRIVER_TYPE_RM
} NvBufSurfaceDriverType;
/**
* Holds information about the underlying device.
*/
typedef struct NvBufSurfaceDeviceInfo {
/** The detected device type (nvgpu, OpenRM, etc.). */
NvBufSurfaceDriverType driverType;
/** Indicates if VIC is present on the platform. */
bool isVicPresent;
/** Reserved for future use. */
uint8_t reserved[64];
} NvBufSurfaceDeviceInfo;
/**
* \brief Allocates a batch of buffers.
*
@@ -956,118 +732,7 @@ int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
*/
int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
/**
* \brief Import parameters received from another process and create hardware buffer.
*
* Calling process must need to call NvBufferDestroy() to remove reference count for
* hardware buffer handle of the imported DMA buffer.
*
* @param[out] out_nvbuf_surf Pointer to hardware buffer.
* @param[in] in_params Parameters to create hardware buffer.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceImport (NvBufSurface **out_nvbuf_surf, const NvBufSurfaceMapParams *in_params);
/**
* \brief Get buffer information to map the buffer in another process.
*
* @param[in] surf Pointer to NvBufSurface structure.
* @param[in] index Index of a buffer in the batch.
* @param[out] params Pointer to NvBufSurfaceMapParams information of the buffer.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceGetMapParams (const NvBufSurface *surf, int index, NvBufSurfaceMapParams *params);
/**
* \brief Creates an CUDA buffer from the memory of one or more
* \ref NvBufSurface buffers.
*
* Only memory type \ref NVBUF_MEM_SURFACE_ARRAY is supported.
*
* This function returns the created CUDA buffer by storing its address at
* \a surf->surfaceList->mappedAddr->cudaPtr. (\a surf is a pointer to
* an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a cudaPtr is a pointer to an \ref NvBufSurfaceCudaBuffer.
*
* You can use this function in scenarios where a CUDA operation on Jetson
* hardware memory (identified by \ref NVBUF_MEM_SURFACE_ARRAY) is required.
* The NvBufSurfaceCudaBuffer struct provided by this function can be used
* to get dataPtr of CUDA memory.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores a pointer to the created CUDA buffer in
* a descendant of this structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 specifies all buffers
* in the batch.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceMapCudaBuffer (NvBufSurface *surf, int index);
/**
* \brief Destroys the previously created CUDA buffer.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index The index of a buffer in the batch. -1 specifies all
* buffers in the batch.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMapCudaBuffer (NvBufSurface *surf, int index);
/**
* \brief Creates an NVMM buffer from the memory of one or more
* \ref NvBufSurface buffers.
*
* Only memory type \ref NVBUF_MEM_CUDA_DEVICE and \ref NVBUF_MEM_CUDA_PINNED
* are supported.
*
* This function returns the created NVMM buffer by storing its address at
* \a surf->surfaceList->mappedAddr->nvmmPtr. (\a surf is a pointer to
* an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a nvmmPtr is a pointer to NVMM buffer of memory type \ref NVBUF_MEM_SURFACE_ARRAY.
*
* You can use this function in scenarios where a NVBUF_MEM_SURFACE_ARRAY operation
* on Jetson hardware memory identified by \ref NVBUF_MEM_CUDA_DEVICE and
* \ref NVBUF_MEM_CUDA_PINNED are required.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores a pointer to the created NVMM buffer in
* a descendant of this structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 specifies all buffers
* in the batch.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceMapNvmmBuffer (NvBufSurface *surf, int index);
/**
* \brief Destroys the previously created NVMM buffer.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index The index of a buffer in the batch. -1 specifies all
* buffers in the batch.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMapNvmmBuffer (NvBufSurface *surf, int index);
/**
* \brief Retrieves information about the underlying GPU device driver.
*
* @param[out] info Pointer to NvBufSurfaceDeviceInfo structure.
*
* @return 0 if successful, or -1 otherwise.
*
* This function attempts to determine if the system is using 'nvgpu' or
* an OpenRM-based driver by checking loaded kernel modules. Also it checks
* if VIC is present on the platform.
*/
int NvBufSurfaceGetDeviceInfo (NvBufSurfaceDeviceInfo *info);
/** @} */
#ifdef __cplusplus
}

View File

@@ -1 +1 @@
jetson_38.2
jetson_35.1

View File

File diff suppressed because it is too large Load Diff