diff --git a/commitFile.txt b/commitFile.txt
new file mode 100644
index 0000000..6c73ffa
--- /dev/null
+++ b/commitFile.txt
@@ -0,0 +1,43 @@
+Updating prebuilts and/or headers
+
+ed8273ff6102bb0b4fa7975a401b12b3e95a7187 - nvbufsurface.h
+7af73b80b2f930ab91431d66cd84ec794da9e117 - v4l2_nv_extensions.h
+d27a433ddeaefb9f42d0312c23472514b0cd6a45 - gst-nvcustomevent.h
+21a860247c06670e4619b8eaae1d92db31bdd3e8 - gst-v4l2/gstv4l2.c
+e8e973c103725b65232d32817e0305d12d6ff309 - gst-v4l2/gstv4l2h264enc.c
+49a66f0ce02abc71f33e096a65645ddedf5c7f46 - gst-v4l2/gstv4l2bufferpool.c
+9f726e4439379bb399f29c68736242f21dab3dd0 - gst-v4l2/gstv4l2allocator.c
+65de802e5f162aa04518b7ade5841cc3ced01111 - gst-v4l2/Makefile
+02d142337f4b96fcb0c9f2405a3cbe90c5917cca - gst-v4l2/gstv4l2vp9enc.c
+dc1a3f7292873f1f71dc27300f97f3ab918ed79f - gst-v4l2/gstv4l2h265enc.c
+d29e3a719400c3cb27314366d48ec792a3c12363 - gst-v4l2/gstv4l2h265enc.h
+c81eacb7d88c4fb839506dd70055e30d7a9feeec - gst-v4l2/v4l2-utils.h
+b1cd923335aa60985ff9866fba91a2068e8671c7 - gst-v4l2/LICENSE.gst-nvvideo4linux2
+73b03969d7ae0a8adb374c93999c43af88ea93b2 - gst-v4l2/v4l2_calls.c
+d89a680415f6ff5acec2571cde0fce9054d8e81f - gst-v4l2/gstv4l2vp9enc.h
+b52a5ee4c739818736b9a3683442df285ebe9eda - gst-v4l2/gstv4l2videodec.c
+3f7cafe5beb4395caf2e1591bf0a835e5076031a - gst-v4l2/gstv4l2object.h
+d5952b0286c34bf13fbf5e09fe552ced0da49368 - gst-v4l2/gstv4l2videodec.h
+398c24d1eef98ec9003a06587bc3784050602cd2 - gst-v4l2/gstv4l2h26xparser.c
+39fcb2f599e6906ab0fd7ab9a46fef3ea58a8cab - gst-v4l2/gstv4l2vp8enc.h
+cbc84dccd2506afa4c8f03849c95bb28c83ef4a3 - gst-v4l2/gstv4l2av1enc.h
+a002edef13a3bbbdc41e42a7fca40e574ad1bb3e - gst-v4l2/v4l2-utils.c
+c2099692cdb374440c2a040cb6ad01bbc1549ce5 - gst-v4l2/gstv4l2h26xparser.h
+99d65d620807b5ba1ca29a838e032940c9b019cc - gst-v4l2/sei_parse.c
+b827fd6cb1e3b8ecebd6a07f8556e846e26cba17 - gst-v4l2/gstv4l2allocator.h
+489fde70531590e94d1d211a42f10f81ae68d2b9 - gst-v4l2/gstv4l2videoenc.h
+4e79cf75c4fa29791e1f5141318dc8aec13a7835 - gst-v4l2/nalutils.h
+71be284b547ee68fb0e2cd14b0aeb14734a915a1 - gst-v4l2/gstv4l2bufferpool.h
+5ecd059e5ef9be4014eface37e5e2f7598960f4e - gst-v4l2/nalutils.c
+5948d70c07e87f9b1dc403789dcbed6acfa47ad9 - gst-v4l2/gstv4l2av1enc.c
+bb104683f5e4f7402e3f765a891e149edc794e02 - gst-v4l2/gstv4l2h264enc.h
+9681f7b98dfdfbc4d845f9ce7f11c3692b923195 - gst-v4l2/gstv4l2videoenc.c
+807bc9859585a540b0f85e98f147756aab24e1bd - gst-v4l2/gstv4l2vp8enc.c
+884e5b97b9fa8d07b6153e6efe6999884922b813 - gst-v4l2/gstv4l2object.c
+20c4f7c0cb89c83256650bc3353ed82154cf3a9d - gst-v4l2/gst/gst-i18n-plugin.h
+e864ee6647f3572b144403d799f68152e9900da1 - gst-v4l2/gst/gettext.h
+499a9feb17ceabf1f1443923dffa1e0180bf5972 - gst-v4l2/gst/glib-compat-private.h
+72a34a694337f8f6da3bb94c9faced6730cbd2fc - gst-v4l2/ext/types-compat.h
+1636366b5a062e4bc1791b7bc3012ccf5635b363 - gst-v4l2/ext/v4l2-controls.h
+a745675b051a2b8434a430c80fde3f245864ca89 - gst-v4l2/ext/v4l2-common.h
+522ab8fc8531a2c758b9278d29642f5b763fd3e7 - gst-v4l2/ext/videodev2.h
diff --git a/gst-nvcustomevent.h b/gst-nvcustomevent.h
new file mode 100644
index 0000000..e5656a1
--- /dev/null
+++ b/gst-nvcustomevent.h
@@ -0,0 +1,235 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * NVIDIA GStreamer: Custom Events
+ *
+ * @b Description: This file specifies the NVIDIA GStreamer custom
+ * event functions.
+ *
+ */
+
+/**
+ * @defgroup gstreamer_nvevent Events: Custom Events API
+ *
+ * Specifies GStreamer custom event functions.
+ *
+ * @ingroup gst_mess_evnt_qry
+ * @{
+ */
+
+#ifndef __GST_NVCUSTOMEVENT_H__
+#define __GST_NVCUSTOMEVENT_H__
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FLAG(name) GST_EVENT_TYPE_##name
+
+/** Defines supported types of custom events. */
+typedef enum {
+ /** Specifies a custom event to indicate decoder drop frame interval update
+ of a particular stream. */
+ GST_NVEVENT_DEC_DROP_FRAME_INTERVAL_UPDATE
+ = GST_EVENT_MAKE_TYPE (500, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
+ /** Specifies a custom event to indicate decoder skip frame update
+ of a particular stream. */
+ GST_NVEVENT_DEC_SKIP_FRAME_UPDATE
+ = GST_EVENT_MAKE_TYPE (501, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
+ /** Specifies a custom event to enable decoder low-latency-mode
+ of a particular stream. */
+ GST_NVEVENT_DEC_ENABLE_LOW_LATENCY_MODE
+ = GST_EVENT_MAKE_TYPE (502, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
+
+ /** Specifies a custom event to indicate encoder bitrate update
+ of a particular stream. */
+ GST_NVEVENT_ENC_BITRATE_UPDATE
+ = GST_EVENT_MAKE_TYPE (503, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
+ /** Specifies a custom event to indicate encoder force IDR frame
+ of a particular stream. */
+ GST_NVEVENT_ENC_FORCE_IDR
+ = GST_EVENT_MAKE_TYPE (504, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
+ /** Specifies a custom event to indicate encoder force Intra frame
+ of a particular stream. */
+ GST_NVEVENT_ENC_FORCE_INTRA
+ = GST_EVENT_MAKE_TYPE (505, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
+ /** Specifies a custom event to indicate iframe interval update
+ of a particular stream. */
+ GST_NVEVENT_ENC_IFRAME_INTERVAL_UPDATE
+ = GST_EVENT_MAKE_TYPE (506, FLAG(DOWNSTREAM) | FLAG(SERIALIZED))
+} GstNvCustomEventType;
+#undef FLAG
+
+/**
+ * Creates a new "nv-dec-drop-frame-interval-update" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which decoder-drop-frame-interval is to be sent
+ * @param[out] interval The decoder drop-frame interval obtained corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_dec_drop_frame_interval_update (gchar* stream_id, guint interval);
+
+/**
+ * Parses a "nv-dec-drop-frame-interval-update" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a dec-drop-frame-interval-update event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] interval A pointer to the parsed interval
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_dec_drop_frame_interval_update (GstEvent * event, gchar** stream_id, guint *interval);
+
+/**
+ * Creates a new "nv-dec-skip-frame-update" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which decoder-skip-frame-update is to be sent
+ * @param[out] frame_type The decoder frame-type to be skipped obtained corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_dec_skip_frame_update (gchar* stream_id, guint frame_type);
+
+/**
+ * Parses a "nv-dec-skip-frame-update" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a skip-frame-update event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] frame_type A pointer to the parsed frame_type
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_dec_skip_frame_update (GstEvent * event, gchar** stream_id, guint *frame_type);
+
+
+/**
+ * Creates a new "nv-dec-enable-low-latency-mode" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which decoder-low-latenct-mode is to be sent
+ * @param[out] enable The decoder low latency mode to be enabled corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_dec_enable_low_latency_mode (gchar* stream_id, gint enable);
+
+/**
+ * Parses a "nv-dec-enable-low-latency-mode" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a enable-low-latency-mode event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] enable A pointer to the parsed enable flag
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_dec_enable_low_latency_mode (GstEvent * event, gchar** stream_id, gint *enable);
+
+/**
+ * Creates a new "nv-enc-bitrate-update" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which encoder-bitrate-update is to be sent
+ * @param[out] bitrate The encoder bitrate to be set corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_enc_bitrate_update (gchar* stream_id, guint bitrate);
+
+/**
+ * Parses a "nv-enc-bitrate-update" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a bitrate-update event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] bitrate A pointer to the parsed bitrate value
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_enc_bitrate_update (GstEvent * event, gchar** stream_id, guint *bitrate);
+
+/**
+ * Creates a new "nv-enc-force-idr" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which encoder-force-idr is to be sent
+ * @param[out] force The encoder force IDR frame corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_enc_force_idr (gchar* stream_id, gint force);
+
+/**
+ * Parses a "nv-enc-force-idr" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a force-idr event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] force A pointer to the parsed force value
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_enc_force_idr (GstEvent * event, gchar** stream_id, gint *force);
+
+/**
+ * Creates a new "nv-enc-force-intra" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which encoder-force-intra is to be sent
+ * @param[out] force The encoder force Intra frame corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_enc_force_intra (gchar* stream_id, gint force);
+
+/**
+ * Parses a "nv-enc-force-intra" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a force-intra event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] force A pointer to the parsed force value
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_enc_force_intra (GstEvent * event, gchar** stream_id, gint *force);
+
+/**
+ * Creates a new "nv-enc-iframeinterval-update" event.
+ *
+ * @param[out] stream_id Stream ID of the stream for which encoder-iframeinterval-update is to be sent
+ * @param[out] interval The encoder iframeinterval to be set corresponding to stream ID for the event.
+ */
+GstEvent * gst_nvevent_enc_iframeinterval_update (gchar* stream_id, guint interval);
+
+/**
+ * Parses a "nv-enc-iframeinterval-update" event received on the sinkpad.
+ *
+ * @param[in] event The event received on the sinkpad
+ * when the stream ID sends a iframeinterval-update event.
+ * @param[out] stream_id A pointer to the parsed stream ID for which
+ * the event is sent.
+ * @param[out] bitrate A pointer to the parsed interval value
+ * corresponding to stream ID for the event.
+ */
+void gst_nvevent_parse_enc_iframeinterval_update (GstEvent * event, gchar** stream_id, guint *interval);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/** @} */
diff --git a/gst-v4l2/LICENSE.gst-nvvideo4linux2 b/gst-v4l2/LICENSE.gst-nvvideo4linux2
new file mode 100644
index 0000000..e932da5
--- /dev/null
+++ b/gst-v4l2/LICENSE.gst-nvvideo4linux2
@@ -0,0 +1,397 @@
+The software listed below is licensed under the terms of the LGPLv2
+(see below). To obtain source code, contact oss-requests@nvidia.com.
+
+gst-nvvideo4linux2 (libgstnvvideo4linux2.so)
+
+------------------------------------
+
+GNU LIBRARY GENERAL PUBLIC LICENSE
+
+Version 2, June 1991
+
+Copyright (C) 1991 Free Software Foundation, Inc.
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to share
+and change it. By contrast, the GNU General Public Licenses are intended to
+guarantee your freedom to share and change free software--to make sure the
+software is free for all its users.
+
+This license, the Library General Public License, applies to some specially
+designated Free Software Foundation software, and to any other libraries whose
+authors decide to use it. You can use it for your libraries, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom to
+distribute copies of free software (and charge for this service if you wish),
+that you receive source code or can get it if you want it, that you can change
+the software or use pieces of it in new free programs; and that you know you can
+do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to deny
+you these rights or to ask you to surrender the rights. These restrictions
+translate to certain responsibilities for you if you distribute copies of the
+library, or if you modify it.
+
+For example, if you distribute copies of the library, whether gratis or for a
+fee, you must give the recipients all the rights that we gave you. You must make
+sure that they, too, receive or can get the source code. If you link a program
+with the library, you must provide complete object files to the recipients so
+that they can relink them with the library, after making changes to the library
+and recompiling it. And you must show them these terms so they know their
+rights.
+
+Our method of protecting your rights has two steps: (1) copyright the library,
+and (2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the library.
+
+Also, for each distributor's protection, we want to make certain that everyone
+understands that there is no warranty for this free library. If the library is
+modified by someone else and passed on, we want its recipients to know that what
+they have is not the original version, so that any problems introduced by others
+will not reflect on the original authors' reputations.
+
+Finally, any free program is threatened constantly by software patents. We wish
+to avoid the danger that companies distributing free software will individually
+obtain patent licenses, thus in effect transforming the program into proprietary
+software. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+Most GNU software, including some libraries, is covered by the ordinary GNU
+General Public License, which was designed for utility programs. This license,
+the GNU Library General Public License, applies to certain designated libraries.
+This license is quite different from the ordinary one; be sure to read it in
+full, and don't assume that anything in it is the same as in the ordinary
+license.
+
+The reason we have a separate public license for some libraries is that they
+blur the distinction we usually make between modifying or adding to a program
+and simply using it. Linking a program with a library, without changing the
+library, is in some sense simply using the library, and is analogous to running
+a utility program or application program. However, in a textual and legal sense,
+the linked executable is a combined work, a derivative of the original library,
+and the ordinary General Public License treats it as such.
+
+Because of this blurred distinction, using the ordinary General Public License
+for libraries did not effectively promote software sharing, because most
+developers did not use the libraries. We concluded that weaker conditions might
+promote sharing better.
+
+However, unrestricted linking of non-free programs would deprive the users of
+those programs of all benefit from the free status of the libraries themselves.
+This Library General Public License is intended to permit developers of non-free
+programs to use free libraries, while preserving your freedom as a user of such
+programs to change the free libraries that are incorporated in them. (We have
+not seen how to achieve this as regards changes in header files, but we have
+achieved it as regards changes in the actual functions of the Library.) The hope
+is that this will lead to faster development of free libraries.
+
+The precise terms and conditions for copying, distribution and modification
+follow. Pay close attention to the difference between a "work based on the
+library" and a "work that uses the library". The former contains code derived
+from the library, while the latter only works together with the library.
+
+Note that it is possible for a library to be covered by the ordinary General
+Public License rather than by this special one.
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License Agreement applies to any software library which contains a
+notice placed by the copyright holder or other authorized party saying it may be
+distributed under the terms of this Library General Public License (also called
+"this License"). Each licensee is addressed as "you".
+
+A "library" means a collection of software functions and/or data prepared so as
+to be conveniently linked with application programs (which use some of those
+functions and data) to form executables.
+
+The "Library", below, refers to any such software library or work which has been
+distributed under these terms. A "work based on the Library" means either the
+Library or any derivative work under copyright law: that is to say, a work
+containing the Library or a portion of it, either verbatim or with modifications
+and/or translated straightforwardly into another language. (Hereinafter,
+translation is included without limitation in the term "modification".)
+
+"Source code" for a work means the preferred form of the work for making
+modifications to it. For a library, complete source code means all the source
+code for all modules it contains, plus any associated interface definition
+files, plus the scripts used to control compilation and installation of the
+library.
+
+Activities other than copying, distribution and modification are not covered by
+this License; they are outside its scope. The act of running a program using the
+Library is not restricted, and output from such a program is covered only if its
+contents constitute a work based on the Library (independent of the use of the
+Library in a tool for writing it). Whether that is true depends on what the
+Library does and what the program that uses the Library does.
+
+1. You may copy and distribute verbatim copies of the Library's complete source
+code as you receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice and
+disclaimer of warranty; keep intact all the notices that refer to this License
+and to the absence of any warranty; and distribute a copy of this License along
+with the Library.
+
+You may charge a fee for the physical act of transferring a copy, and you may at
+your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Library or any portion of it, thus
+forming a work based on the Library, and copy and distribute such modifications
+or work under the terms of Section 1 above, provided that you also meet all of
+these conditions:
+
+ a) The modified work must itself be a software library.
+ b) You must cause the files modified to carry prominent notices stating that
+you changed the files and the date of any change.
+ c) You must cause the whole of the work to be licensed at no charge to all
+third parties under the terms of this License.
+ d) If a facility in the modified Library refers to a function or a table of
+data to be supplied by an application program that uses the facility, other than
+as an argument passed when the facility is invoked, then you must make a good
+faith effort to ensure that, in the event an application does not supply such
+function or table, the facility still operates, and performs whatever part of
+its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has a purpose
+that is entirely well-defined independent of the application. Therefore,
+Subsection 2d requires that any application-supplied function or table used by
+this function must be optional: if the application does not supply it, the
+square root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If identifiable
+sections of that work are not derived from the Library, and can be reasonably
+considered independent and separate works in themselves, then this License, and
+its terms, do not apply to those sections when you distribute them as separate
+works. But when you distribute the same sections as part of a whole which is a
+work based on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the entire whole,
+and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest your
+rights to work written entirely by you; rather, the intent is to exercise the
+right to control the distribution of derivative or collective works based on the
+Library.
+
+In addition, mere aggregation of another work not based on the Library with the
+Library (or with a work based on the Library) on a volume of a storage or
+distribution medium does not bring the other work under the scope of this
+License.
+
+3. You may opt to apply the terms of the ordinary GNU General Public License
+instead of this License to a given copy of the Library. To do this, you must
+alter all the notices that refer to this License, so that they refer to the
+ordinary GNU General Public License, version 2, instead of to this License. (If
+a newer version than version 2 of the ordinary GNU General Public License has
+appeared, then you can specify that version instead if you wish.) Do not make
+any other change in these notices.
+
+Once this change is made in a given copy, it is irreversible for that copy, so
+the ordinary GNU General Public License applies to all subsequent copies and
+derivative works made from that copy.
+
+This option is useful when you wish to copy part of the code of the Library into
+a program that is not a library.
+
+4. You may copy and distribute the Library (or a portion or derivative of it,
+under Section 2) in object code or executable form under the terms of Sections 1
+and 2 above provided that you accompany it with the complete corresponding
+machine-readable source code, which must be distributed under the terms of
+Sections 1 and 2 above on a medium customarily used for software interchange.
+
+If distribution of object code is made by offering access to copy from a
+designated place, then offering equivalent access to copy the source code from
+the same place satisfies the requirement to distribute the source code, even
+though third parties are not compelled to copy the source along with the object
+code.
+
+5. A program that contains no derivative of any portion of the Library, but is
+designed to work with the Library by being compiled or linked with it, is called
+a "work that uses the Library". Such a work, in isolation, is not a derivative
+work of the Library, and therefore falls outside the scope of this License.
+
+However, linking a "work that uses the Library" with the Library creates an
+executable that is a derivative of the Library (because it contains portions of
+the Library), rather than a "work that uses the library". The executable is
+therefore covered by this License. Section 6 states terms for distribution of
+such executables.
+
+When a "work that uses the Library" uses material from a header file that is
+part of the Library, the object code for the work may be a derivative work of
+the Library even though the source code is not. Whether this is true is
+especially significant if the work can be linked without the Library, or if the
+work is itself a library. The threshold for this to be true is not precisely
+defined by law.
+
+If such an object file uses only numerical parameters, data structure layouts
+and accessors, and small macros and small inline functions (ten lines or less in
+length), then the use of the object file is unrestricted, regardless of whether
+it is legally a derivative work. (Executables containing this object code plus
+portions of the Library will still fall under Section 6.)
+
+Otherwise, if the work is a derivative of the Library, you may distribute the
+object code for the work under the terms of Section 6. Any executables
+containing that work also fall under Section 6, whether or not they are linked
+directly with the Library itself.
+
+6. As an exception to the Sections above, you may also compile or link a "work
+that uses the Library" with the Library to produce a work containing portions of
+the Library, and distribute that work under terms of your choice, provided that
+the terms permit modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+You must give prominent notice with each copy of the work that the Library is
+used in it and that the Library and its use are covered by this License. You
+must supply a copy of this License. If the work during execution displays
+copyright notices, you must include the copyright notice for the Library among
+them, as well as a reference directing the user to the copy of this License.
+Also, you must do one of these things:
+
+ a) Accompany the work with the complete corresponding machine-readable
+source code for the Library including whatever changes were used in the work
+(which must be distributed under Sections 1 and 2 above); and, if the work is an
+executable linked with the Library, with the complete machine-readable "work
+that uses the Library", as object code and/or source code, so that the user can
+modify the Library and then relink to produce a modified executable containing
+the modified Library. (It is understood that the user who changes the contents
+of definitions files in the Library will not necessarily be able to recompile
+the application to use the modified definitions.)
+ b) Accompany the work with a written offer, valid for at least three years,
+to give the same user the materials specified in Subsection 6a, above, for a
+charge no more than the cost of performing this distribution.
+ c) If distribution of the work is made by offering access to copy from a
+designated place, offer equivalent access to copy the above specified materials
+from the same place.
+ d) Verify that the user has already received a copy of these materials or
+that you have already sent this user a copy.
+
+For an executable, the required form of the "work that uses the Library" must
+include any data and utility programs needed for reproducing the executable from
+it. However, as a special exception, the source code distributed need not
+include anything that is normally distributed (in either source or binary form)
+with the major components (compiler, kernel, and so on) of the operating system
+on which the executable runs, unless that component itself accompanies the
+executable.
+
+It may happen that this requirement contradicts the license restrictions of
+other proprietary libraries that do not normally accompany the operating system.
+Such a contradiction means you cannot use both them and the Library together in
+an executable that you distribute.
+
+7. You may place library facilities that are a work based on the Library
+side-by-side in a single library together with other library facilities not
+covered by this License, and distribute such a combined library, provided that
+the separate distribution of the work based on the Library and of the other
+library facilities is otherwise permitted, and provided that you do these two
+things:
+
+ a) Accompany the combined library with a copy of the same work based on the
+Library, uncombined with any other library facilities. This must be distributed
+under the terms of the Sections above.
+ b) Give prominent notice with the combined library of the fact that part of
+it is a work based on the Library, and explaining where to find the accompanying
+uncombined form of the same work.
+
+8. You may not copy, modify, sublicense, link with, or distribute the Library
+except as expressly provided under this License. Any attempt otherwise to copy,
+modify, sublicense, link with, or distribute the Library is void, and will
+automatically terminate your rights under this License. However, parties who
+have received copies, or rights, from you under this License will not have their
+licenses terminated so long as such parties remain in full compliance.
+
+9. You are not required to accept this License, since you have not signed it.
+However, nothing else grants you permission to modify or distribute the Library
+or its derivative works. These actions are prohibited by law if you do not
+accept this License. Therefore, by modifying or distributing the Library (or any
+work based on the Library), you indicate your acceptance of this License to do
+so, and all its terms and conditions for copying, distributing or modifying the
+Library or works based on it.
+
+10. Each time you redistribute the Library (or any work based on the Library),
+the recipient automatically receives a license from the original licensor to
+copy, distribute, link with or modify the Library subject to these terms and
+conditions. You may not impose any further restrictions on the recipients'
+exercise of the rights granted herein. You are not responsible for enforcing
+compliance by third parties to this License.
+
+11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues), conditions
+are imposed on you (whether by court order, agreement or otherwise) that
+contradict the conditions of this License, they do not excuse you from the
+conditions of this License. If you cannot distribute so as to satisfy
+simultaneously your obligations under this License and any other pertinent
+obligations, then as a consequence you may not distribute the Library at all.
+For example, if a patent license would not permit royalty-free redistribution of
+the Library by all those who receive copies directly or indirectly through you,
+then the only way you could satisfy both it and this License would be to refrain
+entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply, and
+the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any patents or
+other property right claims or to contest validity of any such claims; this
+section has the sole purpose of protecting the integrity of the free software
+distribution system which is implemented by public license practices. Many
+people have made generous contributions to the wide range of software
+distributed through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing to
+distribute software through any other system and a licensee cannot impose that
+choice.
+
+This section is intended to make thoroughly clear what is believed to be a
+consequence of the rest of this License.
+
+12. If the distribution and/or use of the Library is restricted in certain
+countries either by patents or by copyrighted interfaces, the original copyright
+holder who places the Library under this License may add an explicit
+geographical distribution limitation excluding those countries, so that
+distribution is permitted only in or among countries not thus excluded. In such
+case, this License incorporates the limitation as if written in the body of this
+License.
+
+13. The Free Software Foundation may publish revised and/or new versions of the
+Library General Public License from time to time. Such new versions will be
+similar in spirit to the present version, but may differ in detail to address
+new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library specifies
+a version number of this License which applies to it and "any later version",
+you have the option of following the terms and conditions either of that version
+or of any later version published by the Free Software Foundation. If the
+Library does not specify a license version number, you may choose any version
+ever published by the Free Software Foundation.
+
+14. If you wish to incorporate parts of the Library into other free programs
+whose distribution conditions are incompatible with these, write to the author
+to ask for permission. For software which is copyrighted by the Free Software
+Foundation, write to the Free Software Foundation; we sometimes make exceptions
+for this. Our decision will be guided by the two goals of preserving the free
+status of all derivatives of our free software and of promoting the sharing and
+reuse of software generally.
+
+NO WARRANTY
+
+15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE
+LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED
+IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS
+IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT
+NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
+ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE
+LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL,
+SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY
+TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER
+PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
diff --git a/gst-v4l2/Makefile b/gst-v4l2/Makefile
new file mode 100644
index 0000000..6621c04
--- /dev/null
+++ b/gst-v4l2/Makefile
@@ -0,0 +1,70 @@
+###############################################################################
+#
+# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA Corporation and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA Corporation is strictly prohibited.
+#
+###############################################################################
+
+SO_NAME := libgstnvvideo4linux2.so
+
+TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -)
+
+NVDS_VERSION:=6.0
+
+ifeq ($(TARGET_DEVICE),aarch64)
+ GST_INSTALL_DIR?=/usr/lib/aarch64-linux-gnu/gstreamer-1.0/
+ LIB_INSTALL_DIR?=/usr/lib/aarch64-linux-gnu/tegra/
+ CFLAGS:=
+else
+ GST_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/gst-plugins/
+ LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/
+ CFLAGS:= -DUSE_V4L2_TARGET_NV_CODECSDK=1 -DUSE_V4L2_TARGET_NV_X86=1 -DUSE_V4L2_GST_HEADER_VER_1_8
+endif
+
+LIBS:= -lnvbufsurface -lnvbufsurftransform -lgstnvdsseimeta -lgstnvcustomhelper
+SRCS := $(wildcard *.c)
+
+INCLUDES += -I./ -I../
+INCLUDES += -I/usr/src/jetson_multimedia_api/include/
+
+PKGS := gstreamer-1.0 \
+ gstreamer-base-1.0 \
+ gstreamer-video-1.0 \
+ gstreamer-allocators-1.0 \
+ glib-2.0 \
+ libv4l2
+
+OBJS := $(SRCS:.c=.o)
+
+CFLAGS += -fPIC \
+ -DEXPLICITLY_ADDED=1 \
+ -DGETTEXT_PACKAGE=1 \
+ -DHAVE_LIBV4L2=1 \
+ -DUSE_V4L2_TARGET_NV=1
+
+CFLAGS += `pkg-config --cflags $(PKGS)`
+
+LDFLAGS = -Wl,--no-undefined -L$(LIB_INSTALL_DIR) -Wl,-rpath,$(LIB_INSTALL_DIR)
+
+LIBS += `pkg-config --libs $(PKGS)`
+
+all: $(SO_NAME)
+
+%.o: %.c
+ $(CC) -c $< $(CFLAGS) $(INCLUDES) -o $@
+
+$(SO_NAME): $(OBJS)
+ $(CC) -shared -o $(SO_NAME) $(OBJS) $(LIBS) $(LDFLAGS)
+
+.PHONY: install
+install: $(SO_NAME)
+ cp -vp $(SO_NAME) $(GST_INSTALL_DIR)
+
+.PHONY: clean
+clean:
+ rm -rf $(OBJS) $(SO_NAME)
diff --git a/gst-v4l2/README.txt b/gst-v4l2/README.txt
new file mode 100644
index 0000000..8265d8c
--- /dev/null
+++ b/gst-v4l2/README.txt
@@ -0,0 +1,37 @@
+###############################################################################
+#
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA Corporation and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA Corporation is strictly prohibited.
+#
+###############################################################################
+
+Steps to compile the "gst-nvvideo4linux2" sources natively:
+
+1) Install gstreamer related packages on target using the command:
+
+ sudo apt-get install libgstreamer1.0-dev \
+ gstreamer1.0-plugins-base \
+ gstreamer1.0-plugins-good \
+ libgstreamer-plugins-base1.0-dev \
+ libv4l-dev \
+ libegl1-mesa-dev
+
+2) Download and extract the package "gst-nvvideo4linux_src.tbz2" as follow:
+
+ tar -I lbzip2 -xvf gst-nvvideo4linux2_src.tbz2
+
+3) Run the following commands to build and install "libgstnvvideo4linux2.so":
+ make
+ make install
+ or
+ DEST_DIR= make install
+
+ Note: For Jetson, "make install" will copy library "libgstnvvideo4linux2.so"
+ into "/usr/lib/aarch64-linux-gnu/gstreamer-1.0" directory. For x86 platforms,
+ make install will copy the library "libgstnvvideo4linux2.so" into
+ /opt/nvidia/deepstream/deepstream-4.0/lib/gst-plugins
diff --git a/gst-v4l2/ext/types-compat.h b/gst-v4l2/ext/types-compat.h
new file mode 100644
index 0000000..ab5c809
--- /dev/null
+++ b/gst-v4l2/ext/types-compat.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include
+
+#ifndef __TYPES_COMPAT_H__
+#define __TYPES_COMPAT_H__
+
+/* From linux/types.h */
+#ifndef __bitwise__
+# ifdef __CHECKER__
+# define __bitwise__ __attribute__((bitwise))
+# else
+# define __bitwise__
+# endif
+#endif
+
+#ifndef __bitwise
+# ifdef __CHECK_ENDIAN__
+# define __bitwise __bitwise__
+# else
+# define __bitwise
+# endif
+#endif
+
+#define __u64 guint64
+#define __u32 guint32
+#define __u16 guint16
+#define __u8 guint8
+#ifdef USE_V4L2_TARGET_NV
+#define __s8 gint8
+#endif
+#define __s64 gint64
+#define __s32 gint32
+#define __le32 guint32 __bitwise
+
+#define __user
+
+#endif /* __TYPES_COMPAT_H__ */
diff --git a/gst-v4l2/ext/v4l2-common.h b/gst-v4l2/ext/v4l2-common.h
new file mode 100644
index 0000000..2a2c88a
--- /dev/null
+++ b/gst-v4l2/ext/v4l2-common.h
@@ -0,0 +1,107 @@
+/*
+ * include/linux/v4l2-common.h
+ *
+ * Common V4L2 and V4L2 subdev definitions.
+ *
+ * Users are advised to #include this file either through videodev2.h
+ * (V4L2) or through v4l2-subdev.h (V4L2 subdev) rather than to refer
+ * to this file directly.
+ *
+ * Copyright (C) 2012 Nokia Corporation
+ * Contact: Sakari Ailus
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __V4L2_COMMON__
+#define __V4L2_COMMON__
+
+#include "ext/types-compat.h"
+
+/*
+ *
+ * Selection interface definitions
+ *
+ */
+
+/* Current cropping area */
+#define V4L2_SEL_TGT_CROP 0x0000
+/* Default cropping area */
+#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
+/* Cropping bounds */
+#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
+/* Native frame size */
+#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003
+/* Current composing area */
+#define V4L2_SEL_TGT_COMPOSE 0x0100
+/* Default composing area */
+#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101
+/* Composing bounds */
+#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102
+/* Current composing area plus all padding pixels */
+#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
+
+/* Backward compatibility target definitions --- to be removed. */
+#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
+#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
+
+/* Selection flags */
+#define V4L2_SEL_FLAG_GE (1 << 0)
+#define V4L2_SEL_FLAG_LE (1 << 1)
+#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
+
+/* Backward compatibility flag definitions --- to be removed. */
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
+
+struct v4l2_edid {
+ __u32 pad;
+ __u32 start_block;
+ __u32 blocks;
+ __u32 reserved[5];
+ __u8 *edid;
+};
+
+#endif /* __V4L2_COMMON__ */
diff --git a/gst-v4l2/ext/v4l2-controls.h b/gst-v4l2/ext/v4l2-controls.h
new file mode 100644
index 0000000..31bfc68
--- /dev/null
+++ b/gst-v4l2/ext/v4l2-controls.h
@@ -0,0 +1,987 @@
+/*
+ * Video for Linux Two controls header file
+ *
+ * Copyright (C) 1999-2012 the contributors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The contents of this header was split off from videodev2.h. All control
+ * definitions should be added to this header, which is included by
+ * videodev2.h.
+ */
+
+#ifndef __LINUX_V4L2_CONTROLS_H
+#define __LINUX_V4L2_CONTROLS_H
+
+/* Control classes */
+#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
+#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
+#define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */
+#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator controls */
+#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */
+#define V4L2_CTRL_CLASS_JPEG 0x009d0000 /* JPEG-compression controls */
+#define V4L2_CTRL_CLASS_IMAGE_SOURCE 0x009e0000 /* Image source controls */
+#define V4L2_CTRL_CLASS_IMAGE_PROC 0x009f0000 /* Image processing controls */
+#define V4L2_CTRL_CLASS_DV 0x00a00000 /* Digital Video controls */
+#define V4L2_CTRL_CLASS_FM_RX 0x00a10000 /* FM Receiver controls */
+#define V4L2_CTRL_CLASS_RF_TUNER 0x00a20000 /* RF tuner controls */
+#define V4L2_CTRL_CLASS_DETECT 0x00a30000 /* Detection controls */
+
+/* User-class control IDs */
+
+#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
+#define V4L2_CID_USER_BASE V4L2_CID_BASE
+#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1)
+#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0)
+#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1)
+#define V4L2_CID_SATURATION (V4L2_CID_BASE+2)
+#define V4L2_CID_HUE (V4L2_CID_BASE+3)
+#define V4L2_CID_AUDIO_VOLUME (V4L2_CID_BASE+5)
+#define V4L2_CID_AUDIO_BALANCE (V4L2_CID_BASE+6)
+#define V4L2_CID_AUDIO_BASS (V4L2_CID_BASE+7)
+#define V4L2_CID_AUDIO_TREBLE (V4L2_CID_BASE+8)
+#define V4L2_CID_AUDIO_MUTE (V4L2_CID_BASE+9)
+#define V4L2_CID_AUDIO_LOUDNESS (V4L2_CID_BASE+10)
+#define V4L2_CID_BLACK_LEVEL (V4L2_CID_BASE+11) /* Deprecated */
+#define V4L2_CID_AUTO_WHITE_BALANCE (V4L2_CID_BASE+12)
+#define V4L2_CID_DO_WHITE_BALANCE (V4L2_CID_BASE+13)
+#define V4L2_CID_RED_BALANCE (V4L2_CID_BASE+14)
+#define V4L2_CID_BLUE_BALANCE (V4L2_CID_BASE+15)
+#define V4L2_CID_GAMMA (V4L2_CID_BASE+16)
+#define V4L2_CID_WHITENESS (V4L2_CID_GAMMA) /* Deprecated */
+#define V4L2_CID_EXPOSURE (V4L2_CID_BASE+17)
+#define V4L2_CID_AUTOGAIN (V4L2_CID_BASE+18)
+#define V4L2_CID_GAIN (V4L2_CID_BASE+19)
+#define V4L2_CID_HFLIP (V4L2_CID_BASE+20)
+#define V4L2_CID_VFLIP (V4L2_CID_BASE+21)
+
+#define V4L2_CID_POWER_LINE_FREQUENCY (V4L2_CID_BASE+24)
+enum v4l2_power_line_frequency {
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED = 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ = 1,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ = 2,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO = 3,
+};
+#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25)
+#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26)
+#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27)
+#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
+#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29)
+#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
+#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
+enum v4l2_colorfx {
+ V4L2_COLORFX_NONE = 0,
+ V4L2_COLORFX_BW = 1,
+ V4L2_COLORFX_SEPIA = 2,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_GRASS_GREEN = 7,
+ V4L2_COLORFX_SKIN_WHITEN = 8,
+ V4L2_COLORFX_VIVID = 9,
+ V4L2_COLORFX_AQUA = 10,
+ V4L2_COLORFX_ART_FREEZE = 11,
+ V4L2_COLORFX_SILHOUETTE = 12,
+ V4L2_COLORFX_SOLARIZATION = 13,
+ V4L2_COLORFX_ANTIQUE = 14,
+ V4L2_COLORFX_SET_CBCR = 15,
+};
+#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
+#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
+
+#define V4L2_CID_ROTATE (V4L2_CID_BASE+34)
+#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35)
+
+#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36)
+
+#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37)
+#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38)
+
+#define V4L2_CID_MIN_BUFFERS_FOR_CAPTURE (V4L2_CID_BASE+39)
+#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT (V4L2_CID_BASE+40)
+
+#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41)
+#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42)
+
+/* last CID + 1 */
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+43)
+
+/* USER-class private control IDs */
+
+/* The base for the meye driver controls. See linux/meye.h for the list
+ * of controls. We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000)
+
+/* The base for the bttv driver controls.
+ * We reserve 32 controls for this driver. */
+#define V4L2_CID_USER_BTTV_BASE (V4L2_CID_USER_BASE + 0x1010)
+
+
+/* The base for the s2255 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_S2255_BASE (V4L2_CID_USER_BASE + 0x1030)
+
+/*
+ * The base for the si476x driver controls. See include/media/drv-intf/si476x.h
+ * for the list of controls. Total of 16 controls is reserved for this driver
+ */
+#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040)
+
+/* The base for the TI VPE driver controls. Total of 16 controls is reserved for
+ * this driver */
+#define V4L2_CID_USER_TI_VPE_BASE (V4L2_CID_USER_BASE + 0x1050)
+
+/* The base for the saa7134 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_SAA7134_BASE (V4L2_CID_USER_BASE + 0x1060)
+
+/* The base for the adv7180 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
+
+/* The base for the tc358743 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080)
+
+/* The base for the max217x driver controls.
+ * We reserve 32 controls for this driver
+ */
+#define V4L2_CID_USER_MAX217X_BASE (V4L2_CID_USER_BASE + 0x1090)
+
+/* The base for the imx driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x1090)
+
+/* MPEG-class control IDs */
+/* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
+
+#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
+#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
+
+/* MPEG streams, specific to multiplexed streams */
+#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0)
+enum v4l2_mpeg_stream_type {
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */
+ V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */
+ V4L2_MPEG_STREAM_TYPE_MPEG1_SS = 2, /* MPEG-1 system stream */
+ V4L2_MPEG_STREAM_TYPE_MPEG2_DVD = 3, /* MPEG-2 DVD-compatible stream */
+ V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */
+ V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
+};
+#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1)
+#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2)
+#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3)
+#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
+#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
+#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
+#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
+enum v4l2_mpeg_stream_vbi_fmt {
+ V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
+ V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
+};
+
+/* MPEG audio controls specific to multiplexed streams */
+#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
+enum v4l2_mpeg_audio_sampling_freq {
+ V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
+ V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
+ V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
+};
+#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101)
+enum v4l2_mpeg_audio_encoding {
+ V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
+ V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
+ V4L2_MPEG_AUDIO_ENCODING_LAYER_3 = 2,
+ V4L2_MPEG_AUDIO_ENCODING_AAC = 3,
+ V4L2_MPEG_AUDIO_ENCODING_AC3 = 4,
+};
+#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102)
+enum v4l2_mpeg_audio_l1_bitrate {
+ V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0,
+ V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1,
+ V4L2_MPEG_AUDIO_L1_BITRATE_96K = 2,
+ V4L2_MPEG_AUDIO_L1_BITRATE_128K = 3,
+ V4L2_MPEG_AUDIO_L1_BITRATE_160K = 4,
+ V4L2_MPEG_AUDIO_L1_BITRATE_192K = 5,
+ V4L2_MPEG_AUDIO_L1_BITRATE_224K = 6,
+ V4L2_MPEG_AUDIO_L1_BITRATE_256K = 7,
+ V4L2_MPEG_AUDIO_L1_BITRATE_288K = 8,
+ V4L2_MPEG_AUDIO_L1_BITRATE_320K = 9,
+ V4L2_MPEG_AUDIO_L1_BITRATE_352K = 10,
+ V4L2_MPEG_AUDIO_L1_BITRATE_384K = 11,
+ V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
+ V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
+};
+#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103)
+enum v4l2_mpeg_audio_l2_bitrate {
+ V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0,
+ V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1,
+ V4L2_MPEG_AUDIO_L2_BITRATE_56K = 2,
+ V4L2_MPEG_AUDIO_L2_BITRATE_64K = 3,
+ V4L2_MPEG_AUDIO_L2_BITRATE_80K = 4,
+ V4L2_MPEG_AUDIO_L2_BITRATE_96K = 5,
+ V4L2_MPEG_AUDIO_L2_BITRATE_112K = 6,
+ V4L2_MPEG_AUDIO_L2_BITRATE_128K = 7,
+ V4L2_MPEG_AUDIO_L2_BITRATE_160K = 8,
+ V4L2_MPEG_AUDIO_L2_BITRATE_192K = 9,
+ V4L2_MPEG_AUDIO_L2_BITRATE_224K = 10,
+ V4L2_MPEG_AUDIO_L2_BITRATE_256K = 11,
+ V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
+ V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
+};
+#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104)
+enum v4l2_mpeg_audio_l3_bitrate {
+ V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0,
+ V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1,
+ V4L2_MPEG_AUDIO_L3_BITRATE_48K = 2,
+ V4L2_MPEG_AUDIO_L3_BITRATE_56K = 3,
+ V4L2_MPEG_AUDIO_L3_BITRATE_64K = 4,
+ V4L2_MPEG_AUDIO_L3_BITRATE_80K = 5,
+ V4L2_MPEG_AUDIO_L3_BITRATE_96K = 6,
+ V4L2_MPEG_AUDIO_L3_BITRATE_112K = 7,
+ V4L2_MPEG_AUDIO_L3_BITRATE_128K = 8,
+ V4L2_MPEG_AUDIO_L3_BITRATE_160K = 9,
+ V4L2_MPEG_AUDIO_L3_BITRATE_192K = 10,
+ V4L2_MPEG_AUDIO_L3_BITRATE_224K = 11,
+ V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
+ V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
+};
+#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105)
+enum v4l2_mpeg_audio_mode {
+ V4L2_MPEG_AUDIO_MODE_STEREO = 0,
+ V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
+ V4L2_MPEG_AUDIO_MODE_DUAL = 2,
+ V4L2_MPEG_AUDIO_MODE_MONO = 3,
+};
+#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106)
+enum v4l2_mpeg_audio_mode_extension {
+ V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0,
+ V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1,
+ V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
+ V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
+};
+#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107)
+enum v4l2_mpeg_audio_emphasis {
+ V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0,
+ V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
+ V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2,
+};
+#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108)
+enum v4l2_mpeg_audio_crc {
+ V4L2_MPEG_AUDIO_CRC_NONE = 0,
+ V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
+};
+#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109)
+#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110)
+#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111)
+enum v4l2_mpeg_audio_ac3_bitrate {
+ V4L2_MPEG_AUDIO_AC3_BITRATE_32K = 0,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_40K = 1,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_48K = 2,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_56K = 3,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_64K = 4,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_80K = 5,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_96K = 6,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_112K = 7,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_128K = 8,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_160K = 9,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_192K = 10,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_224K = 11,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_256K = 12,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_320K = 13,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_384K = 14,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_448K = 15,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_512K = 16,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17,
+ V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18,
+};
+#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_MPEG_BASE+112)
+enum v4l2_mpeg_audio_dec_playback {
+ V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO = 0,
+ V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO = 1,
+ V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT = 2,
+ V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT = 3,
+ V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO = 4,
+ V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5,
+};
+#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
+
+/* MPEG video controls specific to multiplexed streams */
+#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200)
+enum v4l2_mpeg_video_encoding {
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0,
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1,
+ V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201)
+enum v4l2_mpeg_video_aspect {
+ V4L2_MPEG_VIDEO_ASPECT_1x1 = 0,
+ V4L2_MPEG_VIDEO_ASPECT_4x3 = 1,
+ V4L2_MPEG_VIDEO_ASPECT_16x9 = 2,
+ V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
+};
+#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202)
+#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203)
+#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204)
+#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206)
+enum v4l2_mpeg_video_bitrate_mode {
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208)
+#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
+#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
+#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
+#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212)
+#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213)
+#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214)
+#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_MPEG_BASE+215)
+#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_MPEG_BASE+216)
+enum v4l2_mpeg_video_header_mode {
+ V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE = 0,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME = 1,
+
+};
+#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_MPEG_BASE+217)
+#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_MPEG_BASE+218)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_MPEG_BASE+219)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_MPEG_BASE+220)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_MPEG_BASE+221)
+enum v4l2_mpeg_video_multi_slice_mode {
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB = 1,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_MPEG_BASE+222)
+#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_MPEG_BASE+223)
+#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_MPEG_BASE+224)
+#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_MPEG_BASE+225)
+#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_MPEG_BASE+226)
+#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_MPEG_BASE+227)
+#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
+#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
+
+#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
+#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
+#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
+#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_MPEG_BASE+303)
+#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_MPEG_BASE+304)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_MPEG_BASE+350)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_MPEG_BASE+351)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_MPEG_BASE+352)
+#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_MPEG_BASE+353)
+#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_MPEG_BASE+354)
+#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_MPEG_BASE+355)
+#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_MPEG_BASE+356)
+#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_MPEG_BASE+357)
+enum v4l2_mpeg_video_h264_entropy_mode {
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC = 0,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_MPEG_BASE+358)
+#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_MPEG_BASE+359)
+enum v4l2_mpeg_video_h264_level {
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_0 = 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1B = 1,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_1 = 2,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_2 = 3,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_3 = 4,
+ V4L2_MPEG_VIDEO_H264_LEVEL_2_0 = 5,
+ V4L2_MPEG_VIDEO_H264_LEVEL_2_1 = 6,
+ V4L2_MPEG_VIDEO_H264_LEVEL_2_2 = 7,
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_0 = 8,
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_1 = 9,
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_2 = 10,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0 = 11,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_1 = 12,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2 = 13,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_0 = 14,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1 = 15,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_MPEG_BASE+360)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_MPEG_BASE+361)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE+362)
+enum v4l2_mpeg_video_h264_loop_filter_mode {
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED = 0,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED = 1,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_MPEG_BASE+363)
+enum v4l2_mpeg_video_h264_profile {
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE = 0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE = 1,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN = 2,
+ V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED = 3,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH = 4,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10 = 5,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422 = 6,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE = 7,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA = 8,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA = 9,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA = 10,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA = 11,
+ V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE = 12,
+ V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH = 13,
+ V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14,
+ V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH = 15,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+364)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+365)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_MPEG_BASE+366)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_MPEG_BASE+367)
+enum v4l2_mpeg_video_h264_vui_sar_idc {
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED = 0,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 = 1,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 = 2,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 = 3,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 = 4,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 = 5,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 = 6,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 = 7,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 = 8,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 = 9,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 = 10,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 = 11,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 = 12,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 = 13,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 = 14,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 = 15,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 = 16,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED = 17,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_MPEG_BASE+368)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_MPEG_BASE+369)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_MPEG_BASE+370)
+enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD = 0,
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN = 1,
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW = 2,
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE = 3,
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM = 4,
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL = 5,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_MPEG_BASE+371)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_MPEG_BASE+372)
+enum v4l2_mpeg_video_h264_fmo_map_type {
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES = 0,
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES = 1,
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER = 2,
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT = 3,
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN = 4,
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN = 5,
+ V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT = 6,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_MPEG_BASE+373)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_MPEG_BASE+374)
+enum v4l2_mpeg_video_h264_fmo_change_dir {
+ V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT = 0,
+ V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_MPEG_BASE+375)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_MPEG_BASE+376)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_MPEG_BASE+377)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_MPEG_BASE+378)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_MPEG_BASE+379)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_MPEG_BASE+380)
+enum v4l2_mpeg_video_h264_hierarchical_coding_type {
+ V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B = 0,
+ V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_MPEG_BASE+381)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_MPEG_BASE+382)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_MPEG_BASE+400)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_MPEG_BASE+401)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_MPEG_BASE+402)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_MPEG_BASE+403)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_MPEG_BASE+404)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_MPEG_BASE+405)
+enum v4l2_mpeg_video_mpeg4_level {
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 = 2,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 = 3,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 = 4,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B = 5,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 = 6,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 = 7,
+};
+#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_MPEG_BASE+406)
+enum v4l2_mpeg_video_mpeg4_profile {
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE = 0,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE = 1,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE = 2,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE = 3,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY = 4,
+};
+#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_MPEG_BASE+407)
+
+/* Control IDs for VP8 streams
+ * Although VP8 is not part of MPEG we add these controls to the MPEG class
+ * as that class is already handling other video compression standards
+ */
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_MPEG_BASE+500)
+enum v4l2_vp8_num_partitions {
+ V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION = 0,
+ V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS = 1,
+ V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS = 2,
+ V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS = 3,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_MPEG_BASE+501)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_MPEG_BASE+502)
+enum v4l2_vp8_num_ref_frames {
+ V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME = 0,
+ V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME = 1,
+ V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_MPEG_BASE+503)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_MPEG_BASE+504)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_MPEG_BASE+505)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_MPEG_BASE+506)
+enum v4l2_vp8_golden_frame_sel {
+ V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV = 0,
+ V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_MPEG_BASE+507)
+#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
+#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
+#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
+
+/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
+#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
+enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
+ V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
+ V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2)
+enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT = 2,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3,
+ V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3)
+enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
+ V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0,
+ V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4)
+enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
+ V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
+ V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6)
+enum v4l2_mpeg_cx2341x_video_median_filter_type {
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT = 2,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
+ V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10)
+#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11)
+
+/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
+#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100)
+
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_MPEG_MFC51_BASE+0)
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_MPEG_MFC51_BASE+1)
+#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_MPEG_MFC51_BASE+2)
+enum v4l2_mpeg_mfc51_video_frame_skip_mode {
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
+};
+#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_MPEG_MFC51_BASE+3)
+enum v4l2_mpeg_mfc51_video_force_frame_type {
+ V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED = 0,
+ V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME = 1,
+ V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED = 2,
+};
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_MPEG_MFC51_BASE+4)
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_MPEG_MFC51_BASE+5)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_MPEG_MFC51_BASE+6)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_MPEG_MFC51_BASE+7)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_MPEG_MFC51_BASE+50)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_MPEG_MFC51_BASE+51)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_MPEG_MFC51_BASE+52)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54)
+
+
+/* Camera class control IDs */
+
+#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
+#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
+
+#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1)
+enum v4l2_exposure_auto_type {
+ V4L2_EXPOSURE_AUTO = 0,
+ V4L2_EXPOSURE_MANUAL = 1,
+ V4L2_EXPOSURE_SHUTTER_PRIORITY = 2,
+ V4L2_EXPOSURE_APERTURE_PRIORITY = 3
+};
+#define V4L2_CID_EXPOSURE_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+2)
+#define V4L2_CID_EXPOSURE_AUTO_PRIORITY (V4L2_CID_CAMERA_CLASS_BASE+3)
+
+#define V4L2_CID_PAN_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+4)
+#define V4L2_CID_TILT_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+5)
+#define V4L2_CID_PAN_RESET (V4L2_CID_CAMERA_CLASS_BASE+6)
+#define V4L2_CID_TILT_RESET (V4L2_CID_CAMERA_CLASS_BASE+7)
+
+#define V4L2_CID_PAN_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+8)
+#define V4L2_CID_TILT_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+9)
+
+#define V4L2_CID_FOCUS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+10)
+#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11)
+#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12)
+
+#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13)
+#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14)
+#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15)
+
+#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
+
+#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17)
+#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18)
+
+#define V4L2_CID_AUTO_EXPOSURE_BIAS (V4L2_CID_CAMERA_CLASS_BASE+19)
+
+#define V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE (V4L2_CID_CAMERA_CLASS_BASE+20)
+enum v4l2_auto_n_preset_white_balance {
+ V4L2_WHITE_BALANCE_MANUAL = 0,
+ V4L2_WHITE_BALANCE_AUTO = 1,
+ V4L2_WHITE_BALANCE_INCANDESCENT = 2,
+ V4L2_WHITE_BALANCE_FLUORESCENT = 3,
+ V4L2_WHITE_BALANCE_FLUORESCENT_H = 4,
+ V4L2_WHITE_BALANCE_HORIZON = 5,
+ V4L2_WHITE_BALANCE_DAYLIGHT = 6,
+ V4L2_WHITE_BALANCE_FLASH = 7,
+ V4L2_WHITE_BALANCE_CLOUDY = 8,
+ V4L2_WHITE_BALANCE_SHADE = 9,
+};
+
+#define V4L2_CID_WIDE_DYNAMIC_RANGE (V4L2_CID_CAMERA_CLASS_BASE+21)
+#define V4L2_CID_IMAGE_STABILIZATION (V4L2_CID_CAMERA_CLASS_BASE+22)
+
+#define V4L2_CID_ISO_SENSITIVITY (V4L2_CID_CAMERA_CLASS_BASE+23)
+#define V4L2_CID_ISO_SENSITIVITY_AUTO (V4L2_CID_CAMERA_CLASS_BASE+24)
+enum v4l2_iso_sensitivity_auto_type {
+ V4L2_ISO_SENSITIVITY_MANUAL = 0,
+ V4L2_ISO_SENSITIVITY_AUTO = 1,
+};
+
+#define V4L2_CID_EXPOSURE_METERING (V4L2_CID_CAMERA_CLASS_BASE+25)
+enum v4l2_exposure_metering {
+ V4L2_EXPOSURE_METERING_AVERAGE = 0,
+ V4L2_EXPOSURE_METERING_CENTER_WEIGHTED = 1,
+ V4L2_EXPOSURE_METERING_SPOT = 2,
+ V4L2_EXPOSURE_METERING_MATRIX = 3,
+};
+
+#define V4L2_CID_SCENE_MODE (V4L2_CID_CAMERA_CLASS_BASE+26)
+enum v4l2_scene_mode {
+ V4L2_SCENE_MODE_NONE = 0,
+ V4L2_SCENE_MODE_BACKLIGHT = 1,
+ V4L2_SCENE_MODE_BEACH_SNOW = 2,
+ V4L2_SCENE_MODE_CANDLE_LIGHT = 3,
+ V4L2_SCENE_MODE_DAWN_DUSK = 4,
+ V4L2_SCENE_MODE_FALL_COLORS = 5,
+ V4L2_SCENE_MODE_FIREWORKS = 6,
+ V4L2_SCENE_MODE_LANDSCAPE = 7,
+ V4L2_SCENE_MODE_NIGHT = 8,
+ V4L2_SCENE_MODE_PARTY_INDOOR = 9,
+ V4L2_SCENE_MODE_PORTRAIT = 10,
+ V4L2_SCENE_MODE_SPORTS = 11,
+ V4L2_SCENE_MODE_SUNSET = 12,
+ V4L2_SCENE_MODE_TEXT = 13,
+};
+
+#define V4L2_CID_3A_LOCK (V4L2_CID_CAMERA_CLASS_BASE+27)
+#define V4L2_LOCK_EXPOSURE (1 << 0)
+#define V4L2_LOCK_WHITE_BALANCE (1 << 1)
+#define V4L2_LOCK_FOCUS (1 << 2)
+
+#define V4L2_CID_AUTO_FOCUS_START (V4L2_CID_CAMERA_CLASS_BASE+28)
+#define V4L2_CID_AUTO_FOCUS_STOP (V4L2_CID_CAMERA_CLASS_BASE+29)
+#define V4L2_CID_AUTO_FOCUS_STATUS (V4L2_CID_CAMERA_CLASS_BASE+30)
+#define V4L2_AUTO_FOCUS_STATUS_IDLE (0 << 0)
+#define V4L2_AUTO_FOCUS_STATUS_BUSY (1 << 0)
+#define V4L2_AUTO_FOCUS_STATUS_REACHED (1 << 1)
+#define V4L2_AUTO_FOCUS_STATUS_FAILED (1 << 2)
+
+#define V4L2_CID_AUTO_FOCUS_RANGE (V4L2_CID_CAMERA_CLASS_BASE+31)
+enum v4l2_auto_focus_range {
+ V4L2_AUTO_FOCUS_RANGE_AUTO = 0,
+ V4L2_AUTO_FOCUS_RANGE_NORMAL = 1,
+ V4L2_AUTO_FOCUS_RANGE_MACRO = 2,
+ V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
+};
+
+#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
+#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
+
+/* FM Modulator class control IDs */
+
+#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
+#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1)
+
+#define V4L2_CID_RDS_TX_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 1)
+#define V4L2_CID_RDS_TX_PI (V4L2_CID_FM_TX_CLASS_BASE + 2)
+#define V4L2_CID_RDS_TX_PTY (V4L2_CID_FM_TX_CLASS_BASE + 3)
+#define V4L2_CID_RDS_TX_PS_NAME (V4L2_CID_FM_TX_CLASS_BASE + 5)
+#define V4L2_CID_RDS_TX_RADIO_TEXT (V4L2_CID_FM_TX_CLASS_BASE + 6)
+#define V4L2_CID_RDS_TX_MONO_STEREO (V4L2_CID_FM_TX_CLASS_BASE + 7)
+#define V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (V4L2_CID_FM_TX_CLASS_BASE + 8)
+#define V4L2_CID_RDS_TX_COMPRESSED (V4L2_CID_FM_TX_CLASS_BASE + 9)
+#define V4L2_CID_RDS_TX_DYNAMIC_PTY (V4L2_CID_FM_TX_CLASS_BASE + 10)
+#define V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (V4L2_CID_FM_TX_CLASS_BASE + 11)
+#define V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (V4L2_CID_FM_TX_CLASS_BASE + 12)
+#define V4L2_CID_RDS_TX_MUSIC_SPEECH (V4L2_CID_FM_TX_CLASS_BASE + 13)
+#define V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (V4L2_CID_FM_TX_CLASS_BASE + 14)
+#define V4L2_CID_RDS_TX_ALT_FREQS (V4L2_CID_FM_TX_CLASS_BASE + 15)
+
+#define V4L2_CID_AUDIO_LIMITER_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 64)
+#define V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 65)
+#define V4L2_CID_AUDIO_LIMITER_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 66)
+
+#define V4L2_CID_AUDIO_COMPRESSION_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 80)
+#define V4L2_CID_AUDIO_COMPRESSION_GAIN (V4L2_CID_FM_TX_CLASS_BASE + 81)
+#define V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (V4L2_CID_FM_TX_CLASS_BASE + 82)
+#define V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (V4L2_CID_FM_TX_CLASS_BASE + 83)
+#define V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 84)
+
+#define V4L2_CID_PILOT_TONE_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 96)
+#define V4L2_CID_PILOT_TONE_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 97)
+#define V4L2_CID_PILOT_TONE_FREQUENCY (V4L2_CID_FM_TX_CLASS_BASE + 98)
+
+#define V4L2_CID_TUNE_PREEMPHASIS (V4L2_CID_FM_TX_CLASS_BASE + 112)
+enum v4l2_preemphasis {
+ V4L2_PREEMPHASIS_DISABLED = 0,
+ V4L2_PREEMPHASIS_50_uS = 1,
+ V4L2_PREEMPHASIS_75_uS = 2,
+};
+#define V4L2_CID_TUNE_POWER_LEVEL (V4L2_CID_FM_TX_CLASS_BASE + 113)
+#define V4L2_CID_TUNE_ANTENNA_CAPACITOR (V4L2_CID_FM_TX_CLASS_BASE + 114)
+
+
+/* Flash and privacy (indicator) light controls */
+
+#define V4L2_CID_FLASH_CLASS_BASE (V4L2_CTRL_CLASS_FLASH | 0x900)
+#define V4L2_CID_FLASH_CLASS (V4L2_CTRL_CLASS_FLASH | 1)
+
+#define V4L2_CID_FLASH_LED_MODE (V4L2_CID_FLASH_CLASS_BASE + 1)
+enum v4l2_flash_led_mode {
+ V4L2_FLASH_LED_MODE_NONE,
+ V4L2_FLASH_LED_MODE_FLASH,
+ V4L2_FLASH_LED_MODE_TORCH,
+};
+
+#define V4L2_CID_FLASH_STROBE_SOURCE (V4L2_CID_FLASH_CLASS_BASE + 2)
+enum v4l2_flash_strobe_source {
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE,
+ V4L2_FLASH_STROBE_SOURCE_EXTERNAL,
+};
+
+#define V4L2_CID_FLASH_STROBE (V4L2_CID_FLASH_CLASS_BASE + 3)
+#define V4L2_CID_FLASH_STROBE_STOP (V4L2_CID_FLASH_CLASS_BASE + 4)
+#define V4L2_CID_FLASH_STROBE_STATUS (V4L2_CID_FLASH_CLASS_BASE + 5)
+
+#define V4L2_CID_FLASH_TIMEOUT (V4L2_CID_FLASH_CLASS_BASE + 6)
+#define V4L2_CID_FLASH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 7)
+#define V4L2_CID_FLASH_TORCH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 8)
+#define V4L2_CID_FLASH_INDICATOR_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 9)
+
+#define V4L2_CID_FLASH_FAULT (V4L2_CID_FLASH_CLASS_BASE + 10)
+#define V4L2_FLASH_FAULT_OVER_VOLTAGE (1 << 0)
+#define V4L2_FLASH_FAULT_TIMEOUT (1 << 1)
+#define V4L2_FLASH_FAULT_OVER_TEMPERATURE (1 << 2)
+#define V4L2_FLASH_FAULT_SHORT_CIRCUIT (1 << 3)
+#define V4L2_FLASH_FAULT_OVER_CURRENT (1 << 4)
+#define V4L2_FLASH_FAULT_INDICATOR (1 << 5)
+#define V4L2_FLASH_FAULT_UNDER_VOLTAGE (1 << 6)
+#define V4L2_FLASH_FAULT_INPUT_VOLTAGE (1 << 7)
+#define V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE (1 << 8)
+
+#define V4L2_CID_FLASH_CHARGE (V4L2_CID_FLASH_CLASS_BASE + 11)
+#define V4L2_CID_FLASH_READY (V4L2_CID_FLASH_CLASS_BASE + 12)
+
+
+/* JPEG-class control IDs */
+
+#define V4L2_CID_JPEG_CLASS_BASE (V4L2_CTRL_CLASS_JPEG | 0x900)
+#define V4L2_CID_JPEG_CLASS (V4L2_CTRL_CLASS_JPEG | 1)
+
+#define V4L2_CID_JPEG_CHROMA_SUBSAMPLING (V4L2_CID_JPEG_CLASS_BASE + 1)
+enum v4l2_jpeg_chroma_subsampling {
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444 = 0,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422 = 1,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420 = 2,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_411 = 3,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_410 = 4,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY = 5,
+};
+#define V4L2_CID_JPEG_RESTART_INTERVAL (V4L2_CID_JPEG_CLASS_BASE + 2)
+#define V4L2_CID_JPEG_COMPRESSION_QUALITY (V4L2_CID_JPEG_CLASS_BASE + 3)
+
+#define V4L2_CID_JPEG_ACTIVE_MARKER (V4L2_CID_JPEG_CLASS_BASE + 4)
+#define V4L2_JPEG_ACTIVE_MARKER_APP0 (1 << 0)
+#define V4L2_JPEG_ACTIVE_MARKER_APP1 (1 << 1)
+#define V4L2_JPEG_ACTIVE_MARKER_COM (1 << 16)
+#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
+#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
+
+
+/* Image source controls */
+#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
+#define V4L2_CID_IMAGE_SOURCE_CLASS (V4L2_CTRL_CLASS_IMAGE_SOURCE | 1)
+
+#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
+#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
+#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
+#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
+#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
+#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
+#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
+
+
+/* Image processing controls */
+
+#define V4L2_CID_IMAGE_PROC_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_PROC | 0x900)
+#define V4L2_CID_IMAGE_PROC_CLASS (V4L2_CTRL_CLASS_IMAGE_PROC | 1)
+
+#define V4L2_CID_LINK_FREQ (V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
+#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
+#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
+#define V4L2_CID_DEINTERLACING_MODE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 4)
+#define V4L2_CID_DIGITAL_GAIN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 5)
+
+/* DV-class control IDs defined by V4L2 */
+#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
+#define V4L2_CID_DV_CLASS (V4L2_CTRL_CLASS_DV | 1)
+
+#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1)
+#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2)
+#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3)
+#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4)
+enum v4l2_dv_tx_mode {
+ V4L2_DV_TX_MODE_DVI_D = 0,
+ V4L2_DV_TX_MODE_HDMI = 1,
+};
+#define V4L2_CID_DV_TX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 5)
+enum v4l2_dv_rgb_range {
+ V4L2_DV_RGB_RANGE_AUTO = 0,
+ V4L2_DV_RGB_RANGE_LIMITED = 1,
+ V4L2_DV_RGB_RANGE_FULL = 2,
+};
+
+#define V4L2_CID_DV_TX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 6)
+enum v4l2_dv_it_content_type {
+ V4L2_DV_IT_CONTENT_TYPE_GRAPHICS = 0,
+ V4L2_DV_IT_CONTENT_TYPE_PHOTO = 1,
+ V4L2_DV_IT_CONTENT_TYPE_CINEMA = 2,
+ V4L2_DV_IT_CONTENT_TYPE_GAME = 3,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC = 4,
+};
+
+#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100)
+#define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101)
+#define V4L2_CID_DV_RX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 102)
+
+#define V4L2_CID_FM_RX_CLASS_BASE (V4L2_CTRL_CLASS_FM_RX | 0x900)
+#define V4L2_CID_FM_RX_CLASS (V4L2_CTRL_CLASS_FM_RX | 1)
+
+#define V4L2_CID_TUNE_DEEMPHASIS (V4L2_CID_FM_RX_CLASS_BASE + 1)
+enum v4l2_deemphasis {
+ V4L2_DEEMPHASIS_DISABLED = V4L2_PREEMPHASIS_DISABLED,
+ V4L2_DEEMPHASIS_50_uS = V4L2_PREEMPHASIS_50_uS,
+ V4L2_DEEMPHASIS_75_uS = V4L2_PREEMPHASIS_75_uS,
+};
+
+#define V4L2_CID_RDS_RECEPTION (V4L2_CID_FM_RX_CLASS_BASE + 2)
+#define V4L2_CID_RDS_RX_PTY (V4L2_CID_FM_RX_CLASS_BASE + 3)
+#define V4L2_CID_RDS_RX_PS_NAME (V4L2_CID_FM_RX_CLASS_BASE + 4)
+#define V4L2_CID_RDS_RX_RADIO_TEXT (V4L2_CID_FM_RX_CLASS_BASE + 5)
+#define V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (V4L2_CID_FM_RX_CLASS_BASE + 6)
+#define V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (V4L2_CID_FM_RX_CLASS_BASE + 7)
+#define V4L2_CID_RDS_RX_MUSIC_SPEECH (V4L2_CID_FM_RX_CLASS_BASE + 8)
+
+#define V4L2_CID_RF_TUNER_CLASS_BASE (V4L2_CTRL_CLASS_RF_TUNER | 0x900)
+#define V4L2_CID_RF_TUNER_CLASS (V4L2_CTRL_CLASS_RF_TUNER | 1)
+
+#define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 11)
+#define V4L2_CID_RF_TUNER_BANDWIDTH (V4L2_CID_RF_TUNER_CLASS_BASE + 12)
+#define V4L2_CID_RF_TUNER_RF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 32)
+#define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 41)
+#define V4L2_CID_RF_TUNER_LNA_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 42)
+#define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 51)
+#define V4L2_CID_RF_TUNER_MIXER_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 52)
+#define V4L2_CID_RF_TUNER_IF_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 61)
+#define V4L2_CID_RF_TUNER_IF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 62)
+#define V4L2_CID_RF_TUNER_PLL_LOCK (V4L2_CID_RF_TUNER_CLASS_BASE + 91)
+
+
+/* Detection-class control IDs defined by V4L2 */
+#define V4L2_CID_DETECT_CLASS_BASE (V4L2_CTRL_CLASS_DETECT | 0x900)
+#define V4L2_CID_DETECT_CLASS (V4L2_CTRL_CLASS_DETECT | 1)
+
+#define V4L2_CID_DETECT_MD_MODE (V4L2_CID_DETECT_CLASS_BASE + 1)
+enum v4l2_detect_md_mode {
+ V4L2_DETECT_MD_MODE_DISABLED = 0,
+ V4L2_DETECT_MD_MODE_GLOBAL = 1,
+ V4L2_DETECT_MD_MODE_THRESHOLD_GRID = 2,
+ V4L2_DETECT_MD_MODE_REGION_GRID = 3,
+};
+#define V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (V4L2_CID_DETECT_CLASS_BASE + 2)
+#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
+#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
+
+#endif
diff --git a/gst-v4l2/ext/videodev2.h b/gst-v4l2/ext/videodev2.h
new file mode 100644
index 0000000..c1df282
--- /dev/null
+++ b/gst-v4l2/ext/videodev2.h
@@ -0,0 +1,2410 @@
+/*
+ * Video for Linux Two header file
+ *
+ * Copyright (C) 1999-2012 the contributors
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Header file for v4l or V4L2 drivers and applications
+ * with public API.
+ * All kernel-specific stuff were moved to media/v4l2-dev.h, so
+ * no #if __KERNEL tests are allowed here
+ *
+ * See https://linuxtv.org for more info
+ *
+ * Author: Bill Dirks
+ * Justin Schoeman
+ * Hans Verkuil
+ * et al.
+ */
+#ifndef _UAPI__LINUX_VIDEODEV2_H
+#define _UAPI__LINUX_VIDEODEV2_H
+
+#include
+#include
+
+#include "ext/types-compat.h"
+#include "ext/v4l2-common.h"
+#include "ext/v4l2-controls.h"
+
+/*
+ * Common stuff for both V4L1 and V4L2
+ * Moved from videodev.h
+ */
+#ifdef USE_V4L2_TARGET_NV
+/*
+ * As video decoder base class has to queue all the decoded frames
+ * between IDR interval for reverse playback, buffers are increased
+ * to 64 to support IDR interval till 60. As per the experiments,
+ * (IDR interval + 4) buffers are required at decoder capture plane
+ * for reverse playback
+ */
+#define VIDEO_MAX_FRAME 64
+#else
+#define VIDEO_MAX_FRAME 32
+#endif
+#define VIDEO_MAX_PLANES 8
+
+/*
+ * M I S C E L L A N E O U S
+ */
+
+/* Four-character-code (FOURCC) */
+#define v4l2_fourcc(a, b, c, d)\
+ ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
+#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31))
+
+/*
+ * E N U M S
+ */
+enum v4l2_field {
+ V4L2_FIELD_ANY = 0, /* driver can choose from none,
+ top, bottom, interlaced
+ depending on whatever it thinks
+ is approximate ... */
+ V4L2_FIELD_NONE = 1, /* this device has no fields ... */
+ V4L2_FIELD_TOP = 2, /* top field only */
+ V4L2_FIELD_BOTTOM = 3, /* bottom field only */
+ V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */
+ V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one
+ buffer, top-bottom order */
+ V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */
+ V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into
+ separate buffers */
+ V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field
+ first and the top field is
+ transmitted first */
+ V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field
+ first and the bottom field is
+ transmitted first */
+};
+#define V4L2_FIELD_HAS_TOP(field) \
+ ((field) == V4L2_FIELD_TOP ||\
+ (field) == V4L2_FIELD_INTERLACED ||\
+ (field) == V4L2_FIELD_INTERLACED_TB ||\
+ (field) == V4L2_FIELD_INTERLACED_BT ||\
+ (field) == V4L2_FIELD_SEQ_TB ||\
+ (field) == V4L2_FIELD_SEQ_BT)
+#define V4L2_FIELD_HAS_BOTTOM(field) \
+ ((field) == V4L2_FIELD_BOTTOM ||\
+ (field) == V4L2_FIELD_INTERLACED ||\
+ (field) == V4L2_FIELD_INTERLACED_TB ||\
+ (field) == V4L2_FIELD_INTERLACED_BT ||\
+ (field) == V4L2_FIELD_SEQ_TB ||\
+ (field) == V4L2_FIELD_SEQ_BT)
+#define V4L2_FIELD_HAS_BOTH(field) \
+ ((field) == V4L2_FIELD_INTERLACED ||\
+ (field) == V4L2_FIELD_INTERLACED_TB ||\
+ (field) == V4L2_FIELD_INTERLACED_BT ||\
+ (field) == V4L2_FIELD_SEQ_TB ||\
+ (field) == V4L2_FIELD_SEQ_BT)
+#define V4L2_FIELD_HAS_T_OR_B(field) \
+ ((field) == V4L2_FIELD_BOTTOM ||\
+ (field) == V4L2_FIELD_TOP ||\
+ (field) == V4L2_FIELD_ALTERNATE)
+
+enum v4l2_buf_type {
+ V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
+ V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
+ V4L2_BUF_TYPE_VBI_CAPTURE = 4,
+ V4L2_BUF_TYPE_VBI_OUTPUT = 5,
+ V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
+ V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
+ V4L2_BUF_TYPE_SDR_CAPTURE = 11,
+ V4L2_BUF_TYPE_SDR_OUTPUT = 12,
+ V4L2_BUF_TYPE_META_CAPTURE = 13,
+ /* Deprecated, do not use */
+ V4L2_BUF_TYPE_PRIVATE = 0x80,
+};
+
+#define V4L2_TYPE_IS_MULTIPLANAR(type) \
+ ((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+
+#define V4L2_TYPE_IS_OUTPUT(type) \
+ ((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \
+ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
+ || (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_SDR_OUTPUT)
+
+enum v4l2_tuner_type {
+ V4L2_TUNER_RADIO = 1,
+ V4L2_TUNER_ANALOG_TV = 2,
+ V4L2_TUNER_DIGITAL_TV = 3,
+ V4L2_TUNER_SDR = 4,
+ V4L2_TUNER_RF = 5,
+};
+
+/* Deprecated, do not use */
+#define V4L2_TUNER_ADC V4L2_TUNER_SDR
+
+enum v4l2_memory {
+ V4L2_MEMORY_MMAP = 1,
+ V4L2_MEMORY_USERPTR = 2,
+ V4L2_MEMORY_OVERLAY = 3,
+ V4L2_MEMORY_DMABUF = 4,
+};
+
+/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
+enum v4l2_colorspace {
+ /*
+ * Default colorspace, i.e. let the driver figure it out.
+ * Can only be used with video capture.
+ */
+ V4L2_COLORSPACE_DEFAULT = 0,
+
+ /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
+ V4L2_COLORSPACE_SMPTE170M = 1,
+
+ /* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */
+ V4L2_COLORSPACE_SMPTE240M = 2,
+
+ /* Rec.709: used for HDTV */
+ V4L2_COLORSPACE_REC709 = 3,
+
+ /*
+ * Deprecated, do not use. No driver will ever return this. This was
+ * based on a misunderstanding of the bt878 datasheet.
+ */
+ V4L2_COLORSPACE_BT878 = 4,
+
+ /*
+ * NTSC 1953 colorspace. This only makes sense when dealing with
+ * really, really old NTSC recordings. Superseded by SMPTE 170M.
+ */
+ V4L2_COLORSPACE_470_SYSTEM_M = 5,
+
+ /*
+ * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
+ * dealing with really old PAL/SECAM recordings. Superseded by
+ * SMPTE 170M.
+ */
+ V4L2_COLORSPACE_470_SYSTEM_BG = 6,
+
+ /*
+ * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601
+ * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG.
+ */
+ V4L2_COLORSPACE_JPEG = 7,
+
+ /* For RGB colorspaces such as produces by most webcams. */
+ V4L2_COLORSPACE_SRGB = 8,
+
+ /* AdobeRGB colorspace */
+ V4L2_COLORSPACE_ADOBERGB = 9,
+
+ /* BT.2020 colorspace, used for UHDTV. */
+ V4L2_COLORSPACE_BT2020 = 10,
+
+ /* Raw colorspace: for RAW unprocessed images */
+ V4L2_COLORSPACE_RAW = 11,
+
+ /* DCI-P3 colorspace, used by cinema projectors */
+ V4L2_COLORSPACE_DCI_P3 = 12,
+};
+
+/*
+ * Determine how COLORSPACE_DEFAULT should map to a proper colorspace.
+ * This depends on whether this is a SDTV image (use SMPTE 170M), an
+ * HDTV image (use Rec. 709), or something else (use sRGB).
+ */
+#define V4L2_MAP_COLORSPACE_DEFAULT(is_sdtv, is_hdtv) \
+ ((is_sdtv) ? V4L2_COLORSPACE_SMPTE170M : \
+ ((is_hdtv) ? V4L2_COLORSPACE_REC709 : V4L2_COLORSPACE_SRGB))
+
+enum v4l2_xfer_func {
+ /*
+ * Mapping of V4L2_XFER_FUNC_DEFAULT to actual transfer functions
+ * for the various colorspaces:
+ *
+ * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
+ * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_REC709 and
+ * V4L2_COLORSPACE_BT2020: V4L2_XFER_FUNC_709
+ *
+ * V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
+ *
+ * V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
+ *
+ * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
+ *
+ * V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE
+ *
+ * V4L2_COLORSPACE_DCI_P3: V4L2_XFER_FUNC_DCI_P3
+ */
+ V4L2_XFER_FUNC_DEFAULT = 0,
+ V4L2_XFER_FUNC_709 = 1,
+ V4L2_XFER_FUNC_SRGB = 2,
+ V4L2_XFER_FUNC_ADOBERGB = 3,
+ V4L2_XFER_FUNC_SMPTE240M = 4,
+ V4L2_XFER_FUNC_NONE = 5,
+ V4L2_XFER_FUNC_DCI_P3 = 6,
+ V4L2_XFER_FUNC_SMPTE2084 = 7,
+};
+
+/*
+ * Determine how XFER_FUNC_DEFAULT should map to a proper transfer function.
+ * This depends on the colorspace.
+ */
+#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
+ ((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
+ ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
+ ((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
+ ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
+ ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
+ V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709)))))
+
+enum v4l2_ycbcr_encoding {
+ /*
+ * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
+ * various colorspaces:
+ *
+ * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
+ * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
+ * V4L2_COLORSPACE_ADOBERGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
+ *
+ * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
+ *
+ * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
+ *
+ * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
+ */
+ V4L2_YCBCR_ENC_DEFAULT = 0,
+
+ /* ITU-R 601 -- SDTV */
+ V4L2_YCBCR_ENC_601 = 1,
+
+ /* Rec. 709 -- HDTV */
+ V4L2_YCBCR_ENC_709 = 2,
+
+ /* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */
+ V4L2_YCBCR_ENC_XV601 = 3,
+
+ /* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
+ V4L2_YCBCR_ENC_XV709 = 4,
+
+#ifndef __KERNEL__
+ /*
+ * sYCC (Y'CbCr encoding of sRGB), identical to ENC_601. It was added
+ * originally due to a misunderstanding of the sYCC standard. It should
+ * not be used, instead use V4L2_YCBCR_ENC_601.
+ */
+ V4L2_YCBCR_ENC_SYCC = 5,
+#endif
+
+ /* BT.2020 Non-constant Luminance Y'CbCr */
+ V4L2_YCBCR_ENC_BT2020 = 6,
+
+ /* BT.2020 Constant Luminance Y'CbcCrc */
+ V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7,
+
+ /* SMPTE 240M -- Obsolete HDTV */
+ V4L2_YCBCR_ENC_SMPTE240M = 8,
+};
+
+/*
+ * enum v4l2_hsv_encoding values should not collide with the ones from
+ * enum v4l2_ycbcr_encoding.
+ */
+enum v4l2_hsv_encoding {
+
+ /* Hue mapped to 0 - 179 */
+ V4L2_HSV_ENC_180 = 128,
+
+ /* Hue mapped to 0-255 */
+ V4L2_HSV_ENC_256 = 129,
+};
+
+/*
+ * Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
+ * This depends on the colorspace.
+ */
+#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \
+ (((colsp) == V4L2_COLORSPACE_REC709 || \
+ (colsp) == V4L2_COLORSPACE_DCI_P3) ? V4L2_YCBCR_ENC_709 : \
+ ((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \
+ ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \
+ V4L2_YCBCR_ENC_601)))
+
+enum v4l2_quantization {
+ /*
+ * The default for R'G'B' quantization is always full range, except
+ * for the BT2020 colorspace. For Y'CbCr the quantization is always
+ * limited range, except for COLORSPACE_JPEG: this is full range.
+ */
+ V4L2_QUANTIZATION_DEFAULT = 0,
+ V4L2_QUANTIZATION_FULL_RANGE = 1,
+ V4L2_QUANTIZATION_LIM_RANGE = 2,
+};
+
+/*
+ * Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
+ * This depends on whether the image is RGB or not, the colorspace and the
+ * Y'CbCr encoding.
+ */
+#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
+ (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
+ V4L2_QUANTIZATION_LIM_RANGE : \
+ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
+
+enum v4l2_priority {
+ V4L2_PRIORITY_UNSET = 0, /* not initialized */
+ V4L2_PRIORITY_BACKGROUND = 1,
+ V4L2_PRIORITY_INTERACTIVE = 2,
+ V4L2_PRIORITY_RECORD = 3,
+ V4L2_PRIORITY_DEFAULT = V4L2_PRIORITY_INTERACTIVE,
+};
+
+struct v4l2_rect {
+ __s32 left;
+ __s32 top;
+ __u32 width;
+ __u32 height;
+};
+
+struct v4l2_fract {
+ __u32 numerator;
+ __u32 denominator;
+};
+
+/**
+ * struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
+ *
+ * @driver: name of the driver module (e.g. "bttv")
+ * @card: name of the card (e.g. "Hauppauge WinTV")
+ * @bus_info: name of the bus (e.g. "PCI:" + pci_name(pci_dev) )
+ * @version: KERNEL_VERSION
+ * @capabilities: capabilities of the physical device as a whole
+ * @device_caps: capabilities accessed via this particular device (node)
+ * @reserved: reserved fields for future extensions
+ */
+struct v4l2_capability {
+ __u8 driver[16];
+ __u8 card[32];
+ __u8 bus_info[32];
+ __u32 version;
+ __u32 capabilities;
+ __u32 device_caps;
+ __u32 reserved[3];
+};
+
+/* Values for 'capabilities' field */
+#define V4L2_CAP_VIDEO_CAPTURE 0x00000001 /* Is a video capture device */
+#define V4L2_CAP_VIDEO_OUTPUT 0x00000002 /* Is a video output device */
+#define V4L2_CAP_VIDEO_OVERLAY 0x00000004 /* Can do video overlay */
+#define V4L2_CAP_VBI_CAPTURE 0x00000010 /* Is a raw VBI capture device */
+#define V4L2_CAP_VBI_OUTPUT 0x00000020 /* Is a raw VBI output device */
+#define V4L2_CAP_SLICED_VBI_CAPTURE 0x00000040 /* Is a sliced VBI capture device */
+#define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */
+#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */
+#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000200 /* Can do video output overlay */
+#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */
+#define V4L2_CAP_RDS_OUTPUT 0x00000800 /* Is an RDS encoder */
+
+/* Is a video capture device that supports multiplanar formats */
+#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000
+/* Is a video output device that supports multiplanar formats */
+#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000
+/* Is a video mem-to-mem device that supports multiplanar formats */
+#define V4L2_CAP_VIDEO_M2M_MPLANE 0x00004000
+/* Is a video mem-to-mem device */
+#define V4L2_CAP_VIDEO_M2M 0x00008000
+
+#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */
+#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */
+#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */
+#define V4L2_CAP_MODULATOR 0x00080000 /* has a modulator */
+
+#define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */
+#define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */
+#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */
+#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
+
+#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
+#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
+#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
+
+#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
+
+#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */
+
+/*
+ * V I D E O I M A G E F O R M A T
+ */
+struct v4l2_pix_format {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ __u32 field; /* enum v4l2_field */
+ __u32 bytesperline; /* for padding, zero if unused */
+ __u32 sizeimage;
+ __u32 colorspace; /* enum v4l2_colorspace */
+ __u32 priv; /* private data, depends on pixelformat */
+ __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
+ union {
+ /* enum v4l2_ycbcr_encoding */
+ __u32 ycbcr_enc;
+ /* enum v4l2_hsv_encoding */
+ __u32 hsv_enc;
+ };
+ __u32 quantization; /* enum v4l2_quantization */
+ __u32 xfer_func; /* enum v4l2_xfer_func */
+};
+
+/* Pixel format FOURCC depth Description */
+
+/* RGB formats */
+#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
+#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
+#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */
+#define V4L2_PIX_FMT_XRGB444 v4l2_fourcc('X', 'R', '1', '2') /* 16 xxxxrrrr ggggbbbb */
+#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
+#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */
+#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
+#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
+#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
+#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
+#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
+#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
+#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
+#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
+#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
+#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
+#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */
+#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */
+#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
+#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
+#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+
+/* Grey formats */
+#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
+#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
+#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
+#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
+#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
+
+/* Grey bit-packed formats */
+#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
+
+/* Palette formats */
+#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
+
+/* Chrominance formats */
+#define V4L2_PIX_FMT_UV8 v4l2_fourcc('U', 'V', '8', ' ') /* 8 UV 4:4 */
+
+/* Luminance+Chrominance formats */
+#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
+#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
+#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
+#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
+#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
+#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
+#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
+#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
+#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+
+/* two planes -- one Y, one Cr + Cb interleaved */
+#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
+#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
+#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
+#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
+#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
+#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+
+/* two non contiguous planes - one Y, one Cr + Cb interleaved */
+#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
+#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
+#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
+#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
+#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
+#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
+
+/* three planes - Y Cb, Cr */
+#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
+#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */
+#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 12 YVU411 planar */
+#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */
+#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
+#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
+
+/* three non contiguous planes - Y, Cb, Cr */
+#define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */
+#define V4L2_PIX_FMT_YVU420M v4l2_fourcc('Y', 'M', '2', '1') /* 12 YVU420 planar */
+#define V4L2_PIX_FMT_YUV422M v4l2_fourcc('Y', 'M', '1', '6') /* 16 YUV422 planar */
+#define V4L2_PIX_FMT_YVU422M v4l2_fourcc('Y', 'M', '6', '1') /* 16 YVU422 planar */
+#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */
+#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */
+
+/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
+#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
+#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
+ /* 10bit raw bayer packed, 5 bytes for every 4 pixels */
+#define V4L2_PIX_FMT_SBGGR10P v4l2_fourcc('p', 'B', 'A', 'A')
+#define V4L2_PIX_FMT_SGBRG10P v4l2_fourcc('p', 'G', 'A', 'A')
+#define V4L2_PIX_FMT_SGRBG10P v4l2_fourcc('p', 'g', 'A', 'A')
+#define V4L2_PIX_FMT_SRGGB10P v4l2_fourcc('p', 'R', 'A', 'A')
+ /* 10bit raw bayer a-law compressed to 8 bits */
+#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8')
+#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8')
+#define V4L2_PIX_FMT_SGRBG10ALAW8 v4l2_fourcc('a', 'g', 'A', '8')
+#define V4L2_PIX_FMT_SRGGB10ALAW8 v4l2_fourcc('a', 'R', 'A', '8')
+ /* 10bit raw bayer DPCM compressed to 8 bits */
+#define V4L2_PIX_FMT_SBGGR10DPCM8 v4l2_fourcc('b', 'B', 'A', '8')
+#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
+#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
+#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8')
+#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
+#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB16 v4l2_fourcc('R', 'G', '1', '6') /* 16 RGRG.. GBGB.. */
+
+/* HSV formats */
+#define V4L2_PIX_FMT_HSV24 v4l2_fourcc('H', 'S', 'V', '3')
+#define V4L2_PIX_FMT_HSV32 v4l2_fourcc('H', 'S', 'V', '4')
+
+/* compressed formats */
+#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
+#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */
+#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */
+#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */
+#define V4L2_PIX_FMT_H264 v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */
+#define V4L2_PIX_FMT_H264_NO_SC v4l2_fourcc('A', 'V', 'C', '1') /* H264 without start codes */
+#define V4L2_PIX_FMT_H264_MVC v4l2_fourcc('M', '2', '6', '4') /* H264 MVC */
+#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */
+#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */
+#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */
+#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */
+#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */
+#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
+#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
+#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
+#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
+
+/* Vendor-specific formats */
+#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
+#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
+#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
+#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
+#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
+#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
+#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */
+#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */
+#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */
+#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
+#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
+#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
+#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
+#define V4L2_PIX_FMT_JL2005BCD v4l2_fourcc('J', 'L', '2', '0') /* compressed RGGB bayer */
+#define V4L2_PIX_FMT_SN9C2028 v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */
+#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
+#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
+#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
+#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
+#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
+#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
+#define V4L2_PIX_FMT_CIT_YYVYUY v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */
+#define V4L2_PIX_FMT_KONICA420 v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */
+#define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
+#define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
+#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
+#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
+#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
+#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
+#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
+#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
+
+/* SDR formats - used only for Software Defined Radio devices */
+#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
+#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
+#define V4L2_SDR_FMT_CS8 v4l2_fourcc('C', 'S', '0', '8') /* complex s8 */
+#define V4L2_SDR_FMT_CS14LE v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */
+#define V4L2_SDR_FMT_RU12LE v4l2_fourcc('R', 'U', '1', '2') /* real u12le */
+#define V4L2_SDR_FMT_PCU16BE v4l2_fourcc('P', 'C', '1', '6') /* planar complex u16be */
+#define V4L2_SDR_FMT_PCU18BE v4l2_fourcc('P', 'C', '1', '8') /* planar complex u18be */
+#define V4L2_SDR_FMT_PCU20BE v4l2_fourcc('P', 'C', '2', '0') /* planar complex u20be */
+
+/* Touch formats - used for Touch devices */
+#define V4L2_TCH_FMT_DELTA_TD16 v4l2_fourcc('T', 'D', '1', '6') /* 16-bit signed deltas */
+#define V4L2_TCH_FMT_DELTA_TD08 v4l2_fourcc('T', 'D', '0', '8') /* 8-bit signed deltas */
+#define V4L2_TCH_FMT_TU16 v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */
+#define V4L2_TCH_FMT_TU08 v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */
+
+/* Meta-data formats */
+#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
+#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
+
+/* priv field value to indicates that subsequent fields are valid. */
+#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
+
+/* Flags */
+#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001
+
+/*
+ * F O R M A T E N U M E R A T I O N
+ */
+struct v4l2_fmtdesc {
+ __u32 index; /* Format number */
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 flags;
+ __u8 description[32]; /* Description string */
+ __u32 pixelformat; /* Format fourcc */
+ __u32 reserved[4];
+};
+
+#define V4L2_FMT_FLAG_COMPRESSED 0x0001
+#define V4L2_FMT_FLAG_EMULATED 0x0002
+
+ /* Frame Size and frame rate enumeration */
+/*
+ * F R A M E S I Z E E N U M E R A T I O N
+ */
+enum v4l2_frmsizetypes {
+ V4L2_FRMSIZE_TYPE_DISCRETE = 1,
+ V4L2_FRMSIZE_TYPE_CONTINUOUS = 2,
+ V4L2_FRMSIZE_TYPE_STEPWISE = 3,
+};
+
+struct v4l2_frmsize_discrete {
+ __u32 width; /* Frame width [pixel] */
+ __u32 height; /* Frame height [pixel] */
+};
+
+struct v4l2_frmsize_stepwise {
+ __u32 min_width; /* Minimum frame width [pixel] */
+ __u32 max_width; /* Maximum frame width [pixel] */
+ __u32 step_width; /* Frame width step size [pixel] */
+ __u32 min_height; /* Minimum frame height [pixel] */
+ __u32 max_height; /* Maximum frame height [pixel] */
+ __u32 step_height; /* Frame height step size [pixel] */
+};
+
+struct v4l2_frmsizeenum {
+ __u32 index; /* Frame size number */
+ __u32 pixel_format; /* Pixel format */
+ __u32 type; /* Frame size type the device supports. */
+
+ union { /* Frame size */
+ struct v4l2_frmsize_discrete discrete;
+ struct v4l2_frmsize_stepwise stepwise;
+ };
+
+ __u32 reserved[2]; /* Reserved space for future use */
+};
+
+/*
+ * F R A M E R A T E E N U M E R A T I O N
+ */
+enum v4l2_frmivaltypes {
+ V4L2_FRMIVAL_TYPE_DISCRETE = 1,
+ V4L2_FRMIVAL_TYPE_CONTINUOUS = 2,
+ V4L2_FRMIVAL_TYPE_STEPWISE = 3,
+};
+
+struct v4l2_frmival_stepwise {
+ struct v4l2_fract min; /* Minimum frame interval [s] */
+ struct v4l2_fract max; /* Maximum frame interval [s] */
+ struct v4l2_fract step; /* Frame interval step size [s] */
+};
+
+struct v4l2_frmivalenum {
+ __u32 index; /* Frame format index */
+ __u32 pixel_format; /* Pixel format */
+ __u32 width; /* Frame width */
+ __u32 height; /* Frame height */
+ __u32 type; /* Frame interval type the device supports. */
+
+ union { /* Frame interval */
+ struct v4l2_fract discrete;
+ struct v4l2_frmival_stepwise stepwise;
+ };
+
+ __u32 reserved[2]; /* Reserved space for future use */
+};
+
+/*
+ * T I M E C O D E
+ */
+struct v4l2_timecode {
+ __u32 type;
+ __u32 flags;
+ __u8 frames;
+ __u8 seconds;
+ __u8 minutes;
+ __u8 hours;
+ __u8 userbits[4];
+};
+
+/* Type */
+#define V4L2_TC_TYPE_24FPS 1
+#define V4L2_TC_TYPE_25FPS 2
+#define V4L2_TC_TYPE_30FPS 3
+#define V4L2_TC_TYPE_50FPS 4
+#define V4L2_TC_TYPE_60FPS 5
+
+/* Flags */
+#define V4L2_TC_FLAG_DROPFRAME 0x0001 /* "drop-frame" mode */
+#define V4L2_TC_FLAG_COLORFRAME 0x0002
+#define V4L2_TC_USERBITS_field 0x000C
+#define V4L2_TC_USERBITS_USERDEFINED 0x0000
+#define V4L2_TC_USERBITS_8BITCHARS 0x0008
+/* The above is based on SMPTE timecodes */
+
+struct v4l2_jpegcompression {
+ int quality;
+
+ int APPn; /* Number of APP segment to be written,
+ * must be 0..15 */
+ int APP_len; /* Length of data in JPEG APPn segment */
+ char APP_data[60]; /* Data in the JPEG APPn segment. */
+
+ int COM_len; /* Length of data in JPEG COM segment */
+ char COM_data[60]; /* Data in JPEG COM segment */
+
+ __u32 jpeg_markers; /* Which markers should go into the JPEG
+ * output. Unless you exactly know what
+ * you do, leave them untouched.
+ * Including less markers will make the
+ * resulting code smaller, but there will
+ * be fewer applications which can read it.
+ * The presence of the APP and COM marker
+ * is influenced by APP_len and COM_len
+ * ONLY, not by this property! */
+
+#define V4L2_JPEG_MARKER_DHT (1<<3) /* Define Huffman Tables */
+#define V4L2_JPEG_MARKER_DQT (1<<4) /* Define Quantization Tables */
+#define V4L2_JPEG_MARKER_DRI (1<<5) /* Define Restart Interval */
+#define V4L2_JPEG_MARKER_COM (1<<6) /* Comment segment */
+#define V4L2_JPEG_MARKER_APP (1<<7) /* App segment, driver will
+ * always use APP0 */
+};
+
+/*
+ * M E M O R Y - M A P P I N G B U F F E R S
+ */
+struct v4l2_requestbuffers {
+ __u32 count;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 memory; /* enum v4l2_memory */
+ __u32 reserved[2];
+};
+
+/**
+ * struct v4l2_plane - plane info for multi-planar buffers
+ * @bytesused: number of bytes occupied by data in the plane (payload)
+ * @length: size of this plane (NOT the payload) in bytes
+ * @mem_offset: when memory in the associated struct v4l2_buffer is
+ * V4L2_MEMORY_MMAP, equals the offset from the start of
+ * the device memory for this plane (or is a "cookie" that
+ * should be passed to mmap() called on the video node)
+ * @userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
+ * pointing to this plane
+ * @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
+ * descriptor associated with this plane
+ * @data_offset: offset in the plane to the start of data; usually 0,
+ * unless there is a header in front of the data
+ *
+ * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
+ * with two planes can have one plane for Y, and another for interleaved CbCr
+ * components. Each plane can reside in a separate memory buffer, or even in
+ * a completely separate memory node (e.g. in embedded devices).
+ */
+struct v4l2_plane {
+ __u32 bytesused;
+ __u32 length;
+ union {
+ __u32 mem_offset;
+ unsigned long userptr;
+ __s32 fd;
+ } m;
+ __u32 data_offset;
+ __u32 reserved[11];
+};
+
+/**
+ * struct v4l2_buffer - video buffer info
+ * @index: id number of the buffer
+ * @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for
+ * multiplanar buffers);
+ * @bytesused: number of bytes occupied by data in the buffer (payload);
+ * unused (set to 0) for multiplanar buffers
+ * @flags: buffer informational flags
+ * @field: enum v4l2_field; field order of the image in the buffer
+ * @timestamp: frame timestamp
+ * @timecode: frame timecode
+ * @sequence: sequence count of this frame
+ * @memory: enum v4l2_memory; the method, in which the actual video data is
+ * passed
+ * @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
+ * offset from the start of the device memory for this plane,
+ * (or a "cookie" that should be passed to mmap() as offset)
+ * @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
+ * a userspace pointer pointing to this buffer
+ * @fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
+ * a userspace file descriptor associated with this buffer
+ * @planes: for multiplanar buffers; userspace pointer to the array of plane
+ * info structs for this buffer
+ * @length: size in bytes of the buffer (NOT its payload) for single-plane
+ * buffers (when type != *_MPLANE); number of elements in the
+ * planes array for multi-plane buffers
+ *
+ * Contains data exchanged by application and driver using one of the Streaming
+ * I/O methods.
+ */
+struct v4l2_buffer {
+ __u32 index;
+ __u32 type;
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field;
+ struct timeval timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory;
+ union {
+ __u32 offset;
+ unsigned long userptr;
+ struct v4l2_plane *planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ __u32 reserved;
+};
+
+/* Flags for 'flags' field */
+/* Buffer is mapped (flag) */
+#define V4L2_BUF_FLAG_MAPPED 0x00000001
+/* Buffer is queued for processing */
+#define V4L2_BUF_FLAG_QUEUED 0x00000002
+/* Buffer is ready */
+#define V4L2_BUF_FLAG_DONE 0x00000004
+/* Image is a keyframe (I-frame) */
+#define V4L2_BUF_FLAG_KEYFRAME 0x00000008
+/* Image is a P-frame */
+#define V4L2_BUF_FLAG_PFRAME 0x00000010
+/* Image is a B-frame */
+#define V4L2_BUF_FLAG_BFRAME 0x00000020
+/* Buffer is ready, but the data contained within is corrupted. */
+#define V4L2_BUF_FLAG_ERROR 0x00000040
+/* timecode field is valid */
+#define V4L2_BUF_FLAG_TIMECODE 0x00000100
+/* Buffer is prepared for queuing */
+#define V4L2_BUF_FLAG_PREPARED 0x00000400
+/* Cache handling flags */
+#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x00000800
+#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x00001000
+/* Timestamp type */
+#define V4L2_BUF_FLAG_TIMESTAMP_MASK 0x0000e000
+#define V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN 0x00000000
+#define V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 0x00002000
+#define V4L2_BUF_FLAG_TIMESTAMP_COPY 0x00004000
+/* Timestamp sources. */
+#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK 0x00070000
+#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF 0x00000000
+#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
+/* mem2mem encoder/decoder */
+#define V4L2_BUF_FLAG_LAST 0x00100000
+
+/**
+ * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
+ *
+ * @index: id number of the buffer
+ * @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for
+ * multiplanar buffers);
+ * @plane: index of the plane to be exported, 0 for single plane queues
+ * @flags: flags for newly created file, currently only O_CLOEXEC is
+ * supported, refer to manual of open syscall for more details
+ * @fd: file descriptor associated with DMABUF (set by driver)
+ *
+ * Contains data used for exporting a video buffer as DMABUF file descriptor.
+ * The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
+ * (identical to the cookie used to mmap() the buffer to userspace). All
+ * reserved fields must be set to zero. The field reserved0 is expected to
+ * become a structure 'type' allowing an alternative layout of the structure
+ * content. Therefore this field should not be used for any other extensions.
+ */
+struct v4l2_exportbuffer {
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 index;
+ __u32 plane;
+ __u32 flags;
+ __s32 fd;
+ __u32 reserved[11];
+};
+
+/*
+ * O V E R L A Y P R E V I E W
+ */
+struct v4l2_framebuffer {
+ __u32 capability;
+ __u32 flags;
+/* FIXME: in theory we should pass something like PCI device + memory
+ * region + offset instead of some physical address */
+ void *base;
+ struct {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ __u32 field; /* enum v4l2_field */
+ __u32 bytesperline; /* for padding, zero if unused */
+ __u32 sizeimage;
+ __u32 colorspace; /* enum v4l2_colorspace */
+ __u32 priv; /* reserved field, set to 0 */
+ } fmt;
+};
+/* Flags for the 'capability' field. Read only */
+#define V4L2_FBUF_CAP_EXTERNOVERLAY 0x0001
+#define V4L2_FBUF_CAP_CHROMAKEY 0x0002
+#define V4L2_FBUF_CAP_LIST_CLIPPING 0x0004
+#define V4L2_FBUF_CAP_BITMAP_CLIPPING 0x0008
+#define V4L2_FBUF_CAP_LOCAL_ALPHA 0x0010
+#define V4L2_FBUF_CAP_GLOBAL_ALPHA 0x0020
+#define V4L2_FBUF_CAP_LOCAL_INV_ALPHA 0x0040
+#define V4L2_FBUF_CAP_SRC_CHROMAKEY 0x0080
+/* Flags for the 'flags' field. */
+#define V4L2_FBUF_FLAG_PRIMARY 0x0001
+#define V4L2_FBUF_FLAG_OVERLAY 0x0002
+#define V4L2_FBUF_FLAG_CHROMAKEY 0x0004
+#define V4L2_FBUF_FLAG_LOCAL_ALPHA 0x0008
+#define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010
+#define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020
+#define V4L2_FBUF_FLAG_SRC_CHROMAKEY 0x0040
+
+struct v4l2_clip {
+ struct v4l2_rect c;
+ struct v4l2_clip __user *next;
+};
+
+struct v4l2_window {
+ struct v4l2_rect w;
+ __u32 field; /* enum v4l2_field */
+ __u32 chromakey;
+ struct v4l2_clip __user *clips;
+ __u32 clipcount;
+ void __user *bitmap;
+ __u8 global_alpha;
+};
+
+/*
+ * C A P T U R E P A R A M E T E R S
+ */
+struct v4l2_captureparm {
+ __u32 capability; /* Supported modes */
+ __u32 capturemode; /* Current mode */
+ struct v4l2_fract timeperframe; /* Time per frame in seconds */
+ __u32 extendedmode; /* Driver-specific extensions */
+ __u32 readbuffers; /* # of buffers for read */
+ __u32 reserved[4];
+};
+
+/* Flags for 'capability' and 'capturemode' fields */
+#define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */
+#define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */
+
+struct v4l2_outputparm {
+ __u32 capability; /* Supported modes */
+ __u32 outputmode; /* Current mode */
+ struct v4l2_fract timeperframe; /* Time per frame in seconds */
+ __u32 extendedmode; /* Driver-specific extensions */
+ __u32 writebuffers; /* # of buffers for write */
+ __u32 reserved[4];
+};
+
+/*
+ * I N P U T I M A G E C R O P P I N G
+ */
+struct v4l2_cropcap {
+ __u32 type; /* enum v4l2_buf_type */
+ struct v4l2_rect bounds;
+ struct v4l2_rect defrect;
+ struct v4l2_fract pixelaspect;
+};
+
+struct v4l2_crop {
+ __u32 type; /* enum v4l2_buf_type */
+ struct v4l2_rect c;
+};
+
+/**
+ * struct v4l2_selection - selection info
+ * @type: buffer type (do not use *_MPLANE types)
+ * @target: Selection target, used to choose one of possible rectangles;
+ * defined in v4l2-common.h; V4L2_SEL_TGT_* .
+ * @flags: constraints flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
+ * @r: coordinates of selection window
+ * @reserved: for future use, rounds structure size to 64 bytes, set to zero
+ *
+ * Hardware may use multiple helper windows to process a video stream.
+ * The structure is used to exchange this selection areas between
+ * an application and a driver.
+ */
+struct v4l2_selection {
+ __u32 type;
+ __u32 target;
+ __u32 flags;
+ struct v4l2_rect r;
+ __u32 reserved[9];
+};
+
+
+/*
+ * A N A L O G V I D E O S T A N D A R D
+ */
+
+typedef __u64 v4l2_std_id;
+
+/* one bit for each */
+#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001)
+#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002)
+#define V4L2_STD_PAL_G ((v4l2_std_id)0x00000004)
+#define V4L2_STD_PAL_H ((v4l2_std_id)0x00000008)
+#define V4L2_STD_PAL_I ((v4l2_std_id)0x00000010)
+#define V4L2_STD_PAL_D ((v4l2_std_id)0x00000020)
+#define V4L2_STD_PAL_D1 ((v4l2_std_id)0x00000040)
+#define V4L2_STD_PAL_K ((v4l2_std_id)0x00000080)
+
+#define V4L2_STD_PAL_M ((v4l2_std_id)0x00000100)
+#define V4L2_STD_PAL_N ((v4l2_std_id)0x00000200)
+#define V4L2_STD_PAL_Nc ((v4l2_std_id)0x00000400)
+#define V4L2_STD_PAL_60 ((v4l2_std_id)0x00000800)
+
+#define V4L2_STD_NTSC_M ((v4l2_std_id)0x00001000) /* BTSC */
+#define V4L2_STD_NTSC_M_JP ((v4l2_std_id)0x00002000) /* EIA-J */
+#define V4L2_STD_NTSC_443 ((v4l2_std_id)0x00004000)
+#define V4L2_STD_NTSC_M_KR ((v4l2_std_id)0x00008000) /* FM A2 */
+
+#define V4L2_STD_SECAM_B ((v4l2_std_id)0x00010000)
+#define V4L2_STD_SECAM_D ((v4l2_std_id)0x00020000)
+#define V4L2_STD_SECAM_G ((v4l2_std_id)0x00040000)
+#define V4L2_STD_SECAM_H ((v4l2_std_id)0x00080000)
+#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000)
+#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000)
+#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000)
+#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000)
+
+/* ATSC/HDTV */
+#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000)
+#define V4L2_STD_ATSC_16_VSB ((v4l2_std_id)0x02000000)
+
+/* FIXME:
+ Although std_id is 64 bits, there is an issue on PPC32 architecture that
+ makes switch(__u64) to break. So, there's a hack on v4l2-common.c rounding
+ this value to 32 bits.
+ As, currently, the max value is for V4L2_STD_ATSC_16_VSB (30 bits wide),
+ it should work fine. However, if needed to add more than two standards,
+ v4l2-common.c should be fixed.
+ */
+
+/*
+ * Some macros to merge video standards in order to make live easier for the
+ * drivers and V4L2 applications
+ */
+
+/*
+ * "Common" NTSC/M - It should be noticed that V4L2_STD_NTSC_443 is
+ * Missing here.
+ */
+#define V4L2_STD_NTSC (V4L2_STD_NTSC_M |\
+ V4L2_STD_NTSC_M_JP |\
+ V4L2_STD_NTSC_M_KR)
+/* Secam macros */
+#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
+ V4L2_STD_SECAM_K |\
+ V4L2_STD_SECAM_K1)
+/* All Secam Standards */
+#define V4L2_STD_SECAM (V4L2_STD_SECAM_B |\
+ V4L2_STD_SECAM_G |\
+ V4L2_STD_SECAM_H |\
+ V4L2_STD_SECAM_DK |\
+ V4L2_STD_SECAM_L |\
+ V4L2_STD_SECAM_LC)
+/* PAL macros */
+#define V4L2_STD_PAL_BG (V4L2_STD_PAL_B |\
+ V4L2_STD_PAL_B1 |\
+ V4L2_STD_PAL_G)
+#define V4L2_STD_PAL_DK (V4L2_STD_PAL_D |\
+ V4L2_STD_PAL_D1 |\
+ V4L2_STD_PAL_K)
+/*
+ * "Common" PAL - This macro is there to be compatible with the old
+ * V4L1 concept of "PAL": /BGDKHI.
+ * Several PAL standards are missing here: /M, /N and /Nc
+ */
+#define V4L2_STD_PAL (V4L2_STD_PAL_BG |\
+ V4L2_STD_PAL_DK |\
+ V4L2_STD_PAL_H |\
+ V4L2_STD_PAL_I)
+/* Chroma "agnostic" standards */
+#define V4L2_STD_B (V4L2_STD_PAL_B |\
+ V4L2_STD_PAL_B1 |\
+ V4L2_STD_SECAM_B)
+#define V4L2_STD_G (V4L2_STD_PAL_G |\
+ V4L2_STD_SECAM_G)
+#define V4L2_STD_H (V4L2_STD_PAL_H |\
+ V4L2_STD_SECAM_H)
+#define V4L2_STD_L (V4L2_STD_SECAM_L |\
+ V4L2_STD_SECAM_LC)
+#define V4L2_STD_GH (V4L2_STD_G |\
+ V4L2_STD_H)
+#define V4L2_STD_DK (V4L2_STD_PAL_DK |\
+ V4L2_STD_SECAM_DK)
+#define V4L2_STD_BG (V4L2_STD_B |\
+ V4L2_STD_G)
+#define V4L2_STD_MN (V4L2_STD_PAL_M |\
+ V4L2_STD_PAL_N |\
+ V4L2_STD_PAL_Nc |\
+ V4L2_STD_NTSC)
+
+/* Standards where MTS/BTSC stereo could be found */
+#define V4L2_STD_MTS (V4L2_STD_NTSC_M |\
+ V4L2_STD_PAL_M |\
+ V4L2_STD_PAL_N |\
+ V4L2_STD_PAL_Nc)
+
+/* Standards for Countries with 60Hz Line frequency */
+#define V4L2_STD_525_60 (V4L2_STD_PAL_M |\
+ V4L2_STD_PAL_60 |\
+ V4L2_STD_NTSC |\
+ V4L2_STD_NTSC_443)
+/* Standards for Countries with 50Hz Line frequency */
+#define V4L2_STD_625_50 (V4L2_STD_PAL |\
+ V4L2_STD_PAL_N |\
+ V4L2_STD_PAL_Nc |\
+ V4L2_STD_SECAM)
+
+#define V4L2_STD_ATSC (V4L2_STD_ATSC_8_VSB |\
+ V4L2_STD_ATSC_16_VSB)
+/* Macros with none and all analog standards */
+#define V4L2_STD_UNKNOWN 0
+#define V4L2_STD_ALL (V4L2_STD_525_60 |\
+ V4L2_STD_625_50)
+
+struct v4l2_standard {
+ __u32 index;
+ v4l2_std_id id;
+ __u8 name[24];
+ struct v4l2_fract frameperiod; /* Frames, not fields */
+ __u32 framelines;
+ __u32 reserved[4];
+};
+
+/*
+ * D V B T T I M I N G S
+ */
+
+/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
+ * @width: total width of the active video in pixels
+ * @height: total height of the active video in lines
+ * @interlaced: Interlaced or progressive
+ * @polarities: Positive or negative polarities
+ * @pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
+ * @hfrontporch:Horizontal front porch in pixels
+ * @hsync: Horizontal Sync length in pixels
+ * @hbackporch: Horizontal back porch in pixels
+ * @vfrontporch:Vertical front porch in lines
+ * @vsync: Vertical Sync length in lines
+ * @vbackporch: Vertical back porch in lines
+ * @il_vfrontporch:Vertical front porch for the even field
+ * (aka field 2) of interlaced field formats
+ * @il_vsync: Vertical Sync length for the even field
+ * (aka field 2) of interlaced field formats
+ * @il_vbackporch:Vertical back porch for the even field
+ * (aka field 2) of interlaced field formats
+ * @standards: Standards the timing belongs to
+ * @flags: Flags
+ * @picture_aspect: The picture aspect ratio (hor/vert).
+ * @cea861_vic: VIC code as per the CEA-861 standard.
+ * @hdmi_vic: VIC code as per the HDMI standard.
+ * @reserved: Reserved fields, must be zeroed.
+ *
+ * A note regarding vertical interlaced timings: height refers to the total
+ * height of the active video frame (= two fields). The blanking timings refer
+ * to the blanking of each field. So the height of the total frame is
+ * calculated as follows:
+ *
+ * tot_height = height + vfrontporch + vsync + vbackporch +
+ * il_vfrontporch + il_vsync + il_vbackporch
+ *
+ * The active height of each field is height / 2.
+ */
+struct v4l2_bt_timings {
+ __u32 width;
+ __u32 height;
+ __u32 interlaced;
+ __u32 polarities;
+ __u64 pixelclock;
+ __u32 hfrontporch;
+ __u32 hsync;
+ __u32 hbackporch;
+ __u32 vfrontporch;
+ __u32 vsync;
+ __u32 vbackporch;
+ __u32 il_vfrontporch;
+ __u32 il_vsync;
+ __u32 il_vbackporch;
+ __u32 standards;
+ __u32 flags;
+ struct v4l2_fract picture_aspect;
+ __u8 cea861_vic;
+ __u8 hdmi_vic;
+ __u8 reserved[46];
+} __attribute__ ((packed));
+
+/* Interlaced or progressive format */
+#define V4L2_DV_PROGRESSIVE 0
+#define V4L2_DV_INTERLACED 1
+
+/* Polarities. If bit is not set, it is assumed to be negative polarity */
+#define V4L2_DV_VSYNC_POS_POL 0x00000001
+#define V4L2_DV_HSYNC_POS_POL 0x00000002
+
+/* Timings standards */
+#define V4L2_DV_BT_STD_CEA861 (1 << 0) /* CEA-861 Digital TV Profile */
+#define V4L2_DV_BT_STD_DMT (1 << 1) /* VESA Discrete Monitor Timings */
+#define V4L2_DV_BT_STD_CVT (1 << 2) /* VESA Coordinated Video Timings */
+#define V4L2_DV_BT_STD_GTF (1 << 3) /* VESA Generalized Timings Formula */
+#define V4L2_DV_BT_STD_SDI (1 << 4) /* SDI Timings */
+
+/* Flags */
+
+/*
+ * CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
+ * GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
+ * intervals are reduced, allowing a higher resolution over the same
+ * bandwidth. This is a read-only flag.
+ */
+#define V4L2_DV_FL_REDUCED_BLANKING (1 << 0)
+/*
+ * CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
+ * of six. These formats can be optionally played at 1 / 1.001 speed.
+ * This is a read-only flag.
+ */
+#define V4L2_DV_FL_CAN_REDUCE_FPS (1 << 1)
+/*
+ * CEA-861 specific: only valid for video transmitters, the flag is cleared
+ * by receivers.
+ * If the framerate of the format is a multiple of six, then the pixelclock
+ * used to set up the transmitter is divided by 1.001 to make it compatible
+ * with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
+ * 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
+ * such frequencies, then the flag will also be cleared.
+ */
+#define V4L2_DV_FL_REDUCED_FPS (1 << 2)
+/*
+ * Specific to interlaced formats: if set, then field 1 is really one half-line
+ * longer and field 2 is really one half-line shorter, so each field has
+ * exactly the same number of half-lines. Whether half-lines can be detected
+ * or used depends on the hardware.
+ */
+#define V4L2_DV_FL_HALF_LINE (1 << 3)
+/*
+ * If set, then this is a Consumer Electronics (CE) video format. Such formats
+ * differ from other formats (commonly called IT formats) in that if RGB
+ * encoding is used then by default the RGB values use limited range (i.e.
+ * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
+ * except for the 640x480 format are CE formats.
+ */
+#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
+/* Some formats like SMPTE-125M have an interlaced signal with a odd
+ * total height. For these formats, if this flag is set, the first
+ * field has the extra line. If not, it is the second field.
+ */
+#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE (1 << 5)
+/*
+ * If set, then the picture_aspect field is valid. Otherwise assume that the
+ * pixels are square, so the picture aspect ratio is the same as the width to
+ * height ratio.
+ */
+#define V4L2_DV_FL_HAS_PICTURE_ASPECT (1 << 6)
+/*
+ * If set, then the cea861_vic field is valid and contains the Video
+ * Identification Code as per the CEA-861 standard.
+ */
+#define V4L2_DV_FL_HAS_CEA861_VIC (1 << 7)
+/*
+ * If set, then the hdmi_vic field is valid and contains the Video
+ * Identification Code as per the HDMI standard (HDMI Vendor Specific
+ * InfoFrame).
+ */
+#define V4L2_DV_FL_HAS_HDMI_VIC (1 << 8)
+
+/* A few useful defines to calculate the total blanking and frame sizes */
+#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
+ ((bt)->hfrontporch + (bt)->hsync + (bt)->hbackporch)
+#define V4L2_DV_BT_FRAME_WIDTH(bt) \
+ ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
+#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
+ ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
+ (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
+ ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
+
+/** struct v4l2_dv_timings - DV timings
+ * @type: the type of the timings
+ * @bt: BT656/1120 timings
+ */
+struct v4l2_dv_timings {
+ __u32 type;
+ union {
+ struct v4l2_bt_timings bt;
+ __u32 reserved[32];
+ };
+} __attribute__ ((packed));
+
+/* Values for the type field */
+#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
+
+
+/** struct v4l2_enum_dv_timings - DV timings enumeration
+ * @index: enumeration index
+ * @pad: the pad number for which to enumerate timings (used with
+ * v4l-subdev nodes only)
+ * @reserved: must be zeroed
+ * @timings: the timings for the given index
+ */
+struct v4l2_enum_dv_timings {
+ __u32 index;
+ __u32 pad;
+ __u32 reserved[2];
+ struct v4l2_dv_timings timings;
+};
+
+/** struct v4l2_bt_timings_cap - BT.656/BT.1120 timing capabilities
+ * @min_width: width in pixels
+ * @max_width: width in pixels
+ * @min_height: height in lines
+ * @max_height: height in lines
+ * @min_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
+ * @max_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
+ * @standards: Supported standards
+ * @capabilities: Supported capabilities
+ * @reserved: Must be zeroed
+ */
+struct v4l2_bt_timings_cap {
+ __u32 min_width;
+ __u32 max_width;
+ __u32 min_height;
+ __u32 max_height;
+ __u64 min_pixelclock;
+ __u64 max_pixelclock;
+ __u32 standards;
+ __u32 capabilities;
+ __u32 reserved[16];
+} __attribute__ ((packed));
+
+/* Supports interlaced formats */
+#define V4L2_DV_BT_CAP_INTERLACED (1 << 0)
+/* Supports progressive formats */
+#define V4L2_DV_BT_CAP_PROGRESSIVE (1 << 1)
+/* Supports CVT/GTF reduced blanking */
+#define V4L2_DV_BT_CAP_REDUCED_BLANKING (1 << 2)
+/* Supports custom formats */
+#define V4L2_DV_BT_CAP_CUSTOM (1 << 3)
+
+/** struct v4l2_dv_timings_cap - DV timings capabilities
+ * @type: the type of the timings (same as in struct v4l2_dv_timings)
+ * @pad: the pad number for which to query capabilities (used with
+ * v4l-subdev nodes only)
+ * @bt: the BT656/1120 timings capabilities
+ */
+struct v4l2_dv_timings_cap {
+ __u32 type;
+ __u32 pad;
+ __u32 reserved[2];
+ union {
+ struct v4l2_bt_timings_cap bt;
+ __u32 raw_data[32];
+ };
+};
+
+
+/*
+ * V I D E O I N P U T S
+ */
+struct v4l2_input {
+ __u32 index; /* Which input */
+ __u8 name[32]; /* Label */
+ __u32 type; /* Type of input */
+ __u32 audioset; /* Associated audios (bitfield) */
+ __u32 tuner; /* enum v4l2_tuner_type */
+ v4l2_std_id std;
+ __u32 status;
+ __u32 capabilities;
+ __u32 reserved[3];
+};
+
+/* Values for the 'type' field */
+#define V4L2_INPUT_TYPE_TUNER 1
+#define V4L2_INPUT_TYPE_CAMERA 2
+#define V4L2_INPUT_TYPE_TOUCH 3
+
+/* field 'status' - general */
+#define V4L2_IN_ST_NO_POWER 0x00000001 /* Attached device is off */
+#define V4L2_IN_ST_NO_SIGNAL 0x00000002
+#define V4L2_IN_ST_NO_COLOR 0x00000004
+
+/* field 'status' - sensor orientation */
+/* If sensor is mounted upside down set both bits */
+#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */
+#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */
+
+/* field 'status' - analog */
+#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */
+#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */
+#define V4L2_IN_ST_NO_V_LOCK 0x00000400 /* No vertical sync lock */
+#define V4L2_IN_ST_NO_STD_LOCK 0x00000800 /* No standard format lock */
+
+/* field 'status' - digital */
+#define V4L2_IN_ST_NO_SYNC 0x00010000 /* No synchronization lock */
+#define V4L2_IN_ST_NO_EQU 0x00020000 /* No equalizer lock */
+#define V4L2_IN_ST_NO_CARRIER 0x00040000 /* Carrier recovery failed */
+
+/* field 'status' - VCR and set-top box */
+#define V4L2_IN_ST_MACROVISION 0x01000000 /* Macrovision detected */
+#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
+#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
+
+/* capabilities flags */
+#define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */
+#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
+
+/*
+ * V I D E O O U T P U T S
+ */
+struct v4l2_output {
+ __u32 index; /* Which output */
+ __u8 name[32]; /* Label */
+ __u32 type; /* Type of output */
+ __u32 audioset; /* Associated audios (bitfield) */
+ __u32 modulator; /* Associated modulator */
+ v4l2_std_id std;
+ __u32 capabilities;
+ __u32 reserved[3];
+};
+/* Values for the 'type' field */
+#define V4L2_OUTPUT_TYPE_MODULATOR 1
+#define V4L2_OUTPUT_TYPE_ANALOG 2
+#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
+
+/* capabilities flags */
+#define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */
+#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
+
+/*
+ * C O N T R O L S
+ */
+struct v4l2_control {
+ __u32 id;
+ __s32 value;
+};
+
+struct v4l2_ext_control {
+ __u32 id;
+ __u32 size;
+ __u32 reserved2[1];
+ union {
+ __s32 value;
+ __s64 value64;
+ char __user *string;
+ __u8 __user *p_u8;
+ __u16 __user *p_u16;
+ __u32 __user *p_u32;
+ void __user *ptr;
+ };
+} __attribute__ ((packed));
+
+struct v4l2_ext_controls {
+ union {
+#ifndef __KERNEL__
+ __u32 ctrl_class;
+#endif
+ __u32 which;
+ };
+ __u32 count;
+ __u32 error_idx;
+ __u32 reserved[2];
+ struct v4l2_ext_control *controls;
+};
+
+#define V4L2_CTRL_ID_MASK (0x0fffffff)
+#ifndef __KERNEL__
+#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
+#endif
+#define V4L2_CTRL_ID2WHICH(id) ((id) & 0x0fff0000UL)
+#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000)
+#define V4L2_CTRL_MAX_DIMS (4)
+#define V4L2_CTRL_WHICH_CUR_VAL 0
+#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000
+
+enum v4l2_ctrl_type {
+ V4L2_CTRL_TYPE_INTEGER = 1,
+ V4L2_CTRL_TYPE_BOOLEAN = 2,
+ V4L2_CTRL_TYPE_MENU = 3,
+ V4L2_CTRL_TYPE_BUTTON = 4,
+ V4L2_CTRL_TYPE_INTEGER64 = 5,
+ V4L2_CTRL_TYPE_CTRL_CLASS = 6,
+ V4L2_CTRL_TYPE_STRING = 7,
+ V4L2_CTRL_TYPE_BITMASK = 8,
+ V4L2_CTRL_TYPE_INTEGER_MENU = 9,
+
+ /* Compound types are >= 0x0100 */
+ V4L2_CTRL_COMPOUND_TYPES = 0x0100,
+ V4L2_CTRL_TYPE_U8 = 0x0100,
+ V4L2_CTRL_TYPE_U16 = 0x0101,
+ V4L2_CTRL_TYPE_U32 = 0x0102,
+};
+
+/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
+struct v4l2_queryctrl {
+ __u32 id;
+ __u32 type; /* enum v4l2_ctrl_type */
+ __u8 name[32]; /* Whatever */
+ __s32 minimum; /* Note signedness */
+ __s32 maximum;
+ __s32 step;
+ __s32 default_value;
+ __u32 flags;
+ __u32 reserved[2];
+};
+
+/* Used in the VIDIOC_QUERY_EXT_CTRL ioctl for querying extended controls */
+struct v4l2_query_ext_ctrl {
+ __u32 id;
+ __u32 type;
+ char name[32];
+ __s64 minimum;
+ __s64 maximum;
+ __u64 step;
+ __s64 default_value;
+ __u32 flags;
+ __u32 elem_size;
+ __u32 elems;
+ __u32 nr_of_dims;
+ __u32 dims[V4L2_CTRL_MAX_DIMS];
+ __u32 reserved[32];
+};
+
+/* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */
+struct v4l2_querymenu {
+ __u32 id;
+ __u32 index;
+ union {
+ __u8 name[32]; /* Whatever */
+ __s64 value;
+ };
+ __u32 reserved;
+} __attribute__ ((packed));
+
+/* Control flags */
+#define V4L2_CTRL_FLAG_DISABLED 0x0001
+#define V4L2_CTRL_FLAG_GRABBED 0x0002
+#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
+#define V4L2_CTRL_FLAG_UPDATE 0x0008
+#define V4L2_CTRL_FLAG_INACTIVE 0x0010
+#define V4L2_CTRL_FLAG_SLIDER 0x0020
+#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
+#define V4L2_CTRL_FLAG_VOLATILE 0x0080
+#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
+#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
+#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
+
+/* Query flags, to be ORed with the control ID */
+#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
+#define V4L2_CTRL_FLAG_NEXT_COMPOUND 0x40000000
+
+/* User-class control IDs defined by V4L2 */
+#define V4L2_CID_MAX_CTRLS 1024
+/* IDs reserved for driver specific controls */
+#define V4L2_CID_PRIVATE_BASE 0x08000000
+
+
+/*
+ * T U N I N G
+ */
+struct v4l2_tuner {
+ __u32 index;
+ __u8 name[32];
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 capability;
+ __u32 rangelow;
+ __u32 rangehigh;
+ __u32 rxsubchans;
+ __u32 audmode;
+ __s32 signal;
+ __s32 afc;
+ __u32 reserved[4];
+};
+
+struct v4l2_modulator {
+ __u32 index;
+ __u8 name[32];
+ __u32 capability;
+ __u32 rangelow;
+ __u32 rangehigh;
+ __u32 txsubchans;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 reserved[3];
+};
+
+/* Flags for the 'capability' field */
+#define V4L2_TUNER_CAP_LOW 0x0001
+#define V4L2_TUNER_CAP_NORM 0x0002
+#define V4L2_TUNER_CAP_HWSEEK_BOUNDED 0x0004
+#define V4L2_TUNER_CAP_HWSEEK_WRAP 0x0008
+#define V4L2_TUNER_CAP_STEREO 0x0010
+#define V4L2_TUNER_CAP_LANG2 0x0020
+#define V4L2_TUNER_CAP_SAP 0x0020
+#define V4L2_TUNER_CAP_LANG1 0x0040
+#define V4L2_TUNER_CAP_RDS 0x0080
+#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100
+#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200
+#define V4L2_TUNER_CAP_FREQ_BANDS 0x0400
+#define V4L2_TUNER_CAP_HWSEEK_PROG_LIM 0x0800
+#define V4L2_TUNER_CAP_1HZ 0x1000
+
+/* Flags for the 'rxsubchans' field */
+#define V4L2_TUNER_SUB_MONO 0x0001
+#define V4L2_TUNER_SUB_STEREO 0x0002
+#define V4L2_TUNER_SUB_LANG2 0x0004
+#define V4L2_TUNER_SUB_SAP 0x0004
+#define V4L2_TUNER_SUB_LANG1 0x0008
+#define V4L2_TUNER_SUB_RDS 0x0010
+
+/* Values for the 'audmode' field */
+#define V4L2_TUNER_MODE_MONO 0x0000
+#define V4L2_TUNER_MODE_STEREO 0x0001
+#define V4L2_TUNER_MODE_LANG2 0x0002
+#define V4L2_TUNER_MODE_SAP 0x0002
+#define V4L2_TUNER_MODE_LANG1 0x0003
+#define V4L2_TUNER_MODE_LANG1_LANG2 0x0004
+
+struct v4l2_frequency {
+ __u32 tuner;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 frequency;
+ __u32 reserved[8];
+};
+
+#define V4L2_BAND_MODULATION_VSB (1 << 1)
+#define V4L2_BAND_MODULATION_FM (1 << 2)
+#define V4L2_BAND_MODULATION_AM (1 << 3)
+
+struct v4l2_frequency_band {
+ __u32 tuner;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 index;
+ __u32 capability;
+ __u32 rangelow;
+ __u32 rangehigh;
+ __u32 modulation;
+ __u32 reserved[9];
+};
+
+struct v4l2_hw_freq_seek {
+ __u32 tuner;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 seek_upward;
+ __u32 wrap_around;
+ __u32 spacing;
+ __u32 rangelow;
+ __u32 rangehigh;
+ __u32 reserved[5];
+};
+
+/*
+ * R D S
+ */
+
+struct v4l2_rds_data {
+ __u8 lsb;
+ __u8 msb;
+ __u8 block;
+} __attribute__ ((packed));
+
+#define V4L2_RDS_BLOCK_MSK 0x7
+#define V4L2_RDS_BLOCK_A 0
+#define V4L2_RDS_BLOCK_B 1
+#define V4L2_RDS_BLOCK_C 2
+#define V4L2_RDS_BLOCK_D 3
+#define V4L2_RDS_BLOCK_C_ALT 4
+#define V4L2_RDS_BLOCK_INVALID 7
+
+#define V4L2_RDS_BLOCK_CORRECTED 0x40
+#define V4L2_RDS_BLOCK_ERROR 0x80
+
+/*
+ * A U D I O
+ */
+struct v4l2_audio {
+ __u32 index;
+ __u8 name[32];
+ __u32 capability;
+ __u32 mode;
+ __u32 reserved[2];
+};
+
+/* Flags for the 'capability' field */
+#define V4L2_AUDCAP_STEREO 0x00001
+#define V4L2_AUDCAP_AVL 0x00002
+
+/* Flags for the 'mode' field */
+#define V4L2_AUDMODE_AVL 0x00001
+
+struct v4l2_audioout {
+ __u32 index;
+ __u8 name[32];
+ __u32 capability;
+ __u32 mode;
+ __u32 reserved[2];
+};
+
+/*
+ * M P E G S E R V I C E S
+ */
+#if 1
+#define V4L2_ENC_IDX_FRAME_I (0)
+#define V4L2_ENC_IDX_FRAME_P (1)
+#define V4L2_ENC_IDX_FRAME_B (2)
+#define V4L2_ENC_IDX_FRAME_MASK (0xf)
+
+struct v4l2_enc_idx_entry {
+ __u64 offset;
+ __u64 pts;
+ __u32 length;
+ __u32 flags;
+ __u32 reserved[2];
+};
+
+#define V4L2_ENC_IDX_ENTRIES (64)
+struct v4l2_enc_idx {
+ __u32 entries;
+ __u32 entries_cap;
+ __u32 reserved[4];
+ struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES];
+};
+
+
+#define V4L2_ENC_CMD_START (0)
+#define V4L2_ENC_CMD_STOP (1)
+#define V4L2_ENC_CMD_PAUSE (2)
+#define V4L2_ENC_CMD_RESUME (3)
+
+/* Flags for V4L2_ENC_CMD_STOP */
+#define V4L2_ENC_CMD_STOP_AT_GOP_END (1 << 0)
+
+struct v4l2_encoder_cmd {
+ __u32 cmd;
+ __u32 flags;
+ union {
+ struct {
+ __u32 data[8];
+ } raw;
+ };
+};
+
+/* Decoder commands */
+#define V4L2_DEC_CMD_START (0)
+#define V4L2_DEC_CMD_STOP (1)
+#define V4L2_DEC_CMD_PAUSE (2)
+#define V4L2_DEC_CMD_RESUME (3)
+
+/* Flags for V4L2_DEC_CMD_START */
+#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
+
+/* Flags for V4L2_DEC_CMD_PAUSE */
+#define V4L2_DEC_CMD_PAUSE_TO_BLACK (1 << 0)
+
+/* Flags for V4L2_DEC_CMD_STOP */
+#define V4L2_DEC_CMD_STOP_TO_BLACK (1 << 0)
+#define V4L2_DEC_CMD_STOP_IMMEDIATELY (1 << 1)
+
+/* Play format requirements (returned by the driver): */
+
+/* The decoder has no special format requirements */
+#define V4L2_DEC_START_FMT_NONE (0)
+/* The decoder requires full GOPs */
+#define V4L2_DEC_START_FMT_GOP (1)
+
+/* The structure must be zeroed before use by the application
+ This ensures it can be extended safely in the future. */
+struct v4l2_decoder_cmd {
+ __u32 cmd;
+ __u32 flags;
+ union {
+ struct {
+ __u64 pts;
+ } stop;
+
+ struct {
+ /* 0 or 1000 specifies normal speed,
+ 1 specifies forward single stepping,
+ -1 specifies backward single stepping,
+ >1: playback at speed/1000 of the normal speed,
+ <-1: reverse playback at (-speed/1000) of the normal speed. */
+ __s32 speed;
+ __u32 format;
+ } start;
+
+ struct {
+ __u32 data[16];
+ } raw;
+ };
+};
+#endif
+
+
+/*
+ * D A T A S E R V I C E S ( V B I )
+ *
+ * Data services API by Michael Schimek
+ */
+
+/* Raw VBI */
+struct v4l2_vbi_format {
+ __u32 sampling_rate; /* in 1 Hz */
+ __u32 offset;
+ __u32 samples_per_line;
+ __u32 sample_format; /* V4L2_PIX_FMT_* */
+ __s32 start[2];
+ __u32 count[2];
+ __u32 flags; /* V4L2_VBI_* */
+ __u32 reserved[2]; /* must be zero */
+};
+
+/* VBI flags */
+#define V4L2_VBI_UNSYNC (1 << 0)
+#define V4L2_VBI_INTERLACED (1 << 1)
+
+/* ITU-R start lines for each field */
+#define V4L2_VBI_ITU_525_F1_START (1)
+#define V4L2_VBI_ITU_525_F2_START (264)
+#define V4L2_VBI_ITU_625_F1_START (1)
+#define V4L2_VBI_ITU_625_F2_START (314)
+
+/* Sliced VBI
+ *
+ * This implements is a proposal V4L2 API to allow SLICED VBI
+ * required for some hardware encoders. It should change without
+ * notice in the definitive implementation.
+ */
+
+struct v4l2_sliced_vbi_format {
+ __u16 service_set;
+ /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
+ service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
+ (equals frame lines 313-336 for 625 line video
+ standards, 263-286 for 525 line standards) */
+ __u16 service_lines[2][24];
+ __u32 io_size;
+ __u32 reserved[2]; /* must be zero */
+};
+
+/* Teletext World System Teletext
+ (WST), defined on ITU-R BT.653-2 */
+#define V4L2_SLICED_TELETEXT_B (0x0001)
+/* Video Program System, defined on ETS 300 231*/
+#define V4L2_SLICED_VPS (0x0400)
+/* Closed Caption, defined on EIA-608 */
+#define V4L2_SLICED_CAPTION_525 (0x1000)
+/* Wide Screen System, defined on ITU-R BT1119.1 */
+#define V4L2_SLICED_WSS_625 (0x4000)
+
+#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525)
+#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
+
+struct v4l2_sliced_vbi_cap {
+ __u16 service_set;
+ /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
+ service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
+ (equals frame lines 313-336 for 625 line video
+ standards, 263-286 for 525 line standards) */
+ __u16 service_lines[2][24];
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 reserved[3]; /* must be 0 */
+};
+
+struct v4l2_sliced_vbi_data {
+ __u32 id;
+ __u32 field; /* 0: first field, 1: second field */
+ __u32 line; /* 1-23 */
+ __u32 reserved; /* must be 0 */
+ __u8 data[48];
+};
+
+/*
+ * Sliced VBI data inserted into MPEG Streams
+ */
+
+/*
+ * V4L2_MPEG_STREAM_VBI_FMT_IVTV:
+ *
+ * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an
+ * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI
+ * data
+ *
+ * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header
+ * definitions are not included here. See the MPEG-2 specifications for details
+ * on these headers.
+ */
+
+/* Line type IDs */
+#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1)
+#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4)
+#define V4L2_MPEG_VBI_IVTV_WSS_625 (5)
+#define V4L2_MPEG_VBI_IVTV_VPS (7)
+
+struct v4l2_mpeg_vbi_itv0_line {
+ __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */
+ __u8 data[42]; /* Sliced VBI data for the line */
+} __attribute__ ((packed));
+
+struct v4l2_mpeg_vbi_itv0 {
+ __le32 linemask[2]; /* Bitmasks of VBI service lines present */
+ struct v4l2_mpeg_vbi_itv0_line line[35];
+} __attribute__ ((packed));
+
+struct v4l2_mpeg_vbi_ITV0 {
+ struct v4l2_mpeg_vbi_itv0_line line[36];
+} __attribute__ ((packed));
+
+#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0"
+#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0"
+
+struct v4l2_mpeg_vbi_fmt_ivtv {
+ __u8 magic[4];
+ union {
+ struct v4l2_mpeg_vbi_itv0 itv0;
+ struct v4l2_mpeg_vbi_ITV0 ITV0;
+ };
+} __attribute__ ((packed));
+
+/*
+ * A G G R E G A T E S T R U C T U R E S
+ */
+
+/**
+ * struct v4l2_plane_pix_format - additional, per-plane format definition
+ * @sizeimage: maximum size in bytes required for data, for which
+ * this plane will be used
+ * @bytesperline: distance in bytes between the leftmost pixels in two
+ * adjacent lines
+ */
+struct v4l2_plane_pix_format {
+ __u32 sizeimage;
+ __u32 bytesperline;
+ __u16 reserved[6];
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_pix_format_mplane - multiplanar format definition
+ * @width: image width in pixels
+ * @height: image height in pixels
+ * @pixelformat: little endian four character code (fourcc)
+ * @field: enum v4l2_field; field order (for interlaced video)
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @plane_fmt: per-plane information
+ * @num_planes: number of planes for this format
+ * @flags: format flags (V4L2_PIX_FMT_FLAG_*)
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ */
+struct v4l2_pix_format_mplane {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ __u32 field;
+ __u32 colorspace;
+
+ struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
+ __u8 num_planes;
+ __u8 flags;
+ union {
+ __u8 ycbcr_enc;
+ __u8 hsv_enc;
+ };
+ __u8 quantization;
+ __u8 xfer_func;
+ __u8 reserved[7];
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_sdr_format - SDR format definition
+ * @pixelformat: little endian four character code (fourcc)
+ * @buffersize: maximum size in bytes required for data
+ */
+struct v4l2_sdr_format {
+ __u32 pixelformat;
+ __u32 buffersize;
+ __u8 reserved[24];
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_meta_format - metadata format definition
+ * @dataformat: little endian four character code (fourcc)
+ * @buffersize: maximum size in bytes required for data
+ */
+struct v4l2_meta_format {
+ __u32 dataformat;
+ __u32 buffersize;
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_format - stream data format
+ * @type: enum v4l2_buf_type; type of the data stream
+ * @pix: definition of an image format
+ * @pix_mp: definition of a multiplanar image format
+ * @win: definition of an overlaid image
+ * @vbi: raw VBI capture or output parameters
+ * @sliced: sliced VBI capture or output parameters
+ * @raw_data: placeholder for future extensions and custom formats
+ */
+struct v4l2_format {
+ __u32 type;
+ union {
+ struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
+ struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
+ struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
+ struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
+ struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
+ struct v4l2_sdr_format sdr; /* V4L2_BUF_TYPE_SDR_CAPTURE */
+ struct v4l2_meta_format meta; /* V4L2_BUF_TYPE_META_CAPTURE */
+ __u8 raw_data[200]; /* user-defined */
+ } fmt;
+};
+
+/* Stream type-dependent parameters
+ */
+struct v4l2_streamparm {
+ __u32 type; /* enum v4l2_buf_type */
+ union {
+ struct v4l2_captureparm capture;
+ struct v4l2_outputparm output;
+ __u8 raw_data[200]; /* user-defined */
+ } parm;
+};
+
+/*
+ * E V E N T S
+ */
+
+#define V4L2_EVENT_ALL 0
+#define V4L2_EVENT_VSYNC 1
+#define V4L2_EVENT_EOS 2
+#define V4L2_EVENT_CTRL 3
+#define V4L2_EVENT_FRAME_SYNC 4
+#define V4L2_EVENT_SOURCE_CHANGE 5
+#define V4L2_EVENT_MOTION_DET 6
+#define V4L2_EVENT_PRIVATE_START 0x08000000
+
+/* Payload for V4L2_EVENT_VSYNC */
+struct v4l2_event_vsync {
+ /* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
+ __u8 field;
+} __attribute__ ((packed));
+
+/* Payload for V4L2_EVENT_CTRL */
+#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
+#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
+#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
+
+struct v4l2_event_ctrl {
+ __u32 changes;
+ __u32 type;
+ union {
+ __s32 value;
+ __s64 value64;
+ };
+ __u32 flags;
+ __s32 minimum;
+ __s32 maximum;
+ __s32 step;
+ __s32 default_value;
+};
+
+struct v4l2_event_frame_sync {
+ __u32 frame_sequence;
+};
+
+#define V4L2_EVENT_SRC_CH_RESOLUTION (1 << 0)
+
+struct v4l2_event_src_change {
+ __u32 changes;
+};
+
+#define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ (1 << 0)
+
+/**
+ * struct v4l2_event_motion_det - motion detection event
+ * @flags: if V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ is set, then the
+ * frame_sequence field is valid.
+ * @frame_sequence: the frame sequence number associated with this event.
+ * @region_mask: which regions detected motion.
+ */
+struct v4l2_event_motion_det {
+ __u32 flags;
+ __u32 frame_sequence;
+ __u32 region_mask;
+};
+
+struct v4l2_event {
+ __u32 type;
+ union {
+ struct v4l2_event_vsync vsync;
+ struct v4l2_event_ctrl ctrl;
+ struct v4l2_event_frame_sync frame_sync;
+ struct v4l2_event_src_change src_change;
+ struct v4l2_event_motion_det motion_det;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct timespec timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+#define V4L2_EVENT_SUB_FL_SEND_INITIAL (1 << 0)
+#define V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK (1 << 1)
+
+struct v4l2_event_subscription {
+ __u32 type;
+ __u32 id;
+ __u32 flags;
+ __u32 reserved[5];
+};
+
+/*
+ * A D V A N C E D D E B U G G I N G
+ *
+ * NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS!
+ * FOR DEBUGGING, TESTING AND INTERNAL USE ONLY!
+ */
+
+/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */
+
+#define V4L2_CHIP_MATCH_BRIDGE 0 /* Match against chip ID on the bridge (0 for the bridge) */
+#define V4L2_CHIP_MATCH_SUBDEV 4 /* Match against subdev index */
+
+/* The following four defines are no longer in use */
+#define V4L2_CHIP_MATCH_HOST V4L2_CHIP_MATCH_BRIDGE
+#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver name */
+#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
+#define V4L2_CHIP_MATCH_AC97 3 /* Match against ancillary AC97 chip */
+
+struct v4l2_dbg_match {
+ __u32 type; /* Match type */
+ union { /* Match this chip, meaning determined by type */
+ __u32 addr;
+ char name[32];
+ };
+} __attribute__ ((packed));
+
+struct v4l2_dbg_register {
+ struct v4l2_dbg_match match;
+ __u32 size; /* register size in bytes */
+ __u64 reg;
+ __u64 val;
+} __attribute__ ((packed));
+
+#define V4L2_CHIP_FL_READABLE (1 << 0)
+#define V4L2_CHIP_FL_WRITABLE (1 << 1)
+
+/* VIDIOC_DBG_G_CHIP_INFO */
+struct v4l2_dbg_chip_info {
+ struct v4l2_dbg_match match;
+ char name[32];
+ __u32 flags;
+ __u32 reserved[32];
+} __attribute__ ((packed));
+
+/**
+ * struct v4l2_create_buffers - VIDIOC_CREATE_BUFS argument
+ * @index: on return, index of the first created buffer
+ * @count: entry: number of requested buffers,
+ * return: number of created buffers
+ * @memory: enum v4l2_memory; buffer memory type
+ * @format: frame format, for which buffers are requested
+ * @reserved: future extensions
+ */
+struct v4l2_create_buffers {
+ __u32 index;
+ __u32 count;
+ __u32 memory;
+ struct v4l2_format format;
+ __u32 reserved[8];
+};
+
+/*
+ * I O C T L C O D E S F O R V I D E O D E V I C E S
+ *
+ */
+#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
+#define VIDIOC_RESERVED _IO('V', 1)
+#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
+#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
+#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
+#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers)
+#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer)
+#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer)
+#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer)
+#define VIDIOC_OVERLAY _IOW('V', 14, int)
+#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer)
+#define VIDIOC_EXPBUF _IOWR('V', 16, struct v4l2_exportbuffer)
+#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer)
+#define VIDIOC_STREAMON _IOW('V', 18, int)
+#define VIDIOC_STREAMOFF _IOW('V', 19, int)
+#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm)
+#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm)
+#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id)
+#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id)
+#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
+#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input)
+#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control)
+#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control)
+#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner)
+#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner)
+#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio)
+#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio)
+#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl)
+#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu)
+#define VIDIOC_G_INPUT _IOR('V', 38, int)
+#define VIDIOC_S_INPUT _IOWR('V', 39, int)
+#define VIDIOC_G_EDID _IOWR('V', 40, struct v4l2_edid)
+#define VIDIOC_S_EDID _IOWR('V', 41, struct v4l2_edid)
+#define VIDIOC_G_OUTPUT _IOR('V', 46, int)
+#define VIDIOC_S_OUTPUT _IOWR('V', 47, int)
+#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output)
+#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout)
+#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout)
+#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator)
+#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator)
+#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency)
+#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency)
+#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap)
+#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop)
+#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop)
+#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression)
+#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression)
+#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
+#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
+#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
+#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
+#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */
+#define VIDIOC_S_PRIORITY _IOW('V', 68, __u32) /* enum v4l2_priority */
+#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap)
+#define VIDIOC_LOG_STATUS _IO('V', 70)
+#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls)
+#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls)
+#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls)
+#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum)
+#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum)
+#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx)
+#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd)
+#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd)
+
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
+ * You must be root to use these ioctls. Never use these in applications!
+ */
+#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
+
+#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
+#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
+#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
+#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
+#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
+#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers)
+#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer)
+#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection)
+#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection)
+#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd)
+#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd)
+#define VIDIOC_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
+#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
+#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
+#define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band)
+
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Never use this in applications!
+ */
+#define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info)
+
+#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl)
+
+/* Reminder: when adding new ioctls please add support for them to
+ drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */
+
+#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
+
+#endif /* _UAPI__LINUX_VIDEODEV2_H */
diff --git a/gst-v4l2/gst/gettext.h b/gst-v4l2/gst/gettext.h
new file mode 100644
index 0000000..fc70ab7
--- /dev/null
+++ b/gst-v4l2/gst/gettext.h
@@ -0,0 +1,69 @@
+/* Convenience header for conditional use of GNU .
+ Copyright (C) 1995-1998, 2000-2002 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Library General Public License as published
+ by the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
+ USA. */
+
+#ifndef _LIBGETTEXT_H
+#define _LIBGETTEXT_H 1
+
+/* NLS can be disabled through the configure --disable-nls option. */
+#ifdef ENABLE_NLS
+
+/* Get declarations of GNU message catalog functions. */
+# include
+
+#else
+
+/* Solaris /usr/include/locale.h includes /usr/include/libintl.h, which
+ chokes if dcgettext is defined as a macro. So include it now, to make
+ later inclusions of a NOP. We don't include
+ as well because people using "gettext.h" will not include ,
+ and also including would fail on SunOS 4, whereas
+ is OK. */
+#if defined(__sun)
+# include
+#endif
+
+/* Disabled NLS.
+ The casts to 'const char *' serve the purpose of producing warnings
+ for invalid uses of the value returned from these functions.
+ On pre-ANSI systems without 'const', the config.h file is supposed to
+ contain "#define const". */
+# define gettext(Msgid) ((const char *) (Msgid))
+# define dgettext(Domainname, Msgid) ((const char *) (Msgid))
+# define dcgettext(Domainname, Msgid, Category) ((const char *) (Msgid))
+# define ngettext(Msgid1, Msgid2, N) \
+ ((N) == 1 ? (const char *) (Msgid1) : (const char *) (Msgid2))
+# define dngettext(Domainname, Msgid1, Msgid2, N) \
+ ((N) == 1 ? (const char *) (Msgid1) : (const char *) (Msgid2))
+# define dcngettext(Domainname, Msgid1, Msgid2, N, Category) \
+ ((N) == 1 ? (const char *) (Msgid1) : (const char *) (Msgid2))
+# define textdomain(Domainname) ((const char *) (Domainname))
+# define bindtextdomain(Domainname, Dirname) ((const char *) (Dirname))
+# define bind_textdomain_codeset(Domainname, Codeset) ((const char *) (Codeset))
+
+#endif
+
+/* A pseudo function call that serves as a marker for the automated
+ extraction of messages, but does not call gettext(). The run-time
+ translation is done at a different place in the code.
+ The argument, String, should be a literal string. Concatenated strings
+ and other string expressions won't work.
+ The macro's expansion is not parenthesized, so that it is suitable as
+ initializer for static 'char[]' or 'const char[]' variables. */
+#define gettext_noop(String) String
+
+#endif /* _LIBGETTEXT_H */
diff --git a/gst-v4l2/gst/glib-compat-private.h b/gst-v4l2/gst/glib-compat-private.h
new file mode 100644
index 0000000..8f37de2
--- /dev/null
+++ b/gst-v4l2/gst/glib-compat-private.h
@@ -0,0 +1,36 @@
+/*
+ * glib-compat.c
+ * Functions copied from glib 2.10
+ *
+ * Copyright 2005 David Schleef
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __GLIB_COMPAT_PRIVATE_H__
+#define __GLIB_COMPAT_PRIVATE_H__
+
+#include
+
+G_BEGIN_DECLS
+
+/* copies */
+
+/* adaptations */
+
+G_END_DECLS
+
+#endif
diff --git a/gst-v4l2/gst/gst-i18n-plugin.h b/gst-v4l2/gst/gst-i18n-plugin.h
new file mode 100644
index 0000000..ff40ce2
--- /dev/null
+++ b/gst-v4l2/gst/gst-i18n-plugin.h
@@ -0,0 +1,47 @@
+/* GStreamer
+ * Copyright (C) 2004 Thomas Vander Stichele
+ *
+ * gst-i18n-plugins.h: internationalization macros for the GStreamer plugins
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __GST_I18N_PLUGIN_H__
+#define __GST_I18N_PLUGIN_H__
+
+#ifndef GETTEXT_PACKAGE
+#error You must define GETTEXT_PACKAGE before including this header.
+#endif
+
+#ifdef ENABLE_NLS
+
+#include
+
+#include "gettext.h" /* included with gettext distribution and copied */
+
+/* we want to use shorthand _() for translating and N_() for marking */
+#define _(String) dgettext (GETTEXT_PACKAGE, String)
+#define N_(String) gettext_noop (String)
+/* FIXME: if we need it, we can add Q_ as well, like in glib */
+
+#else
+#define _(String) String
+#define N_(String) String
+#define ngettext(Singular,Plural,Count) ((Count>1)?Plural:Singular)
+
+#endif
+
+#endif /* __GST_I18N_PLUGIN_H__ */
diff --git a/gst-v4l2/gstv4l2.c b/gst-v4l2/gstv4l2.c
new file mode 100644
index 0000000..9122bea
--- /dev/null
+++ b/gst-v4l2/gstv4l2.c
@@ -0,0 +1,491 @@
+/* GStreamer
+ *
+ * Copyright (C) 2001-2002 Ronald Bultje
+ * 2006 Edgard Lima
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * gstv4l2.c: plugin for v4l2 elements
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE /* O_CLOEXEC */
+#endif
+
+#include "gst/gst-i18n-plugin.h"
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "linux/videodev2.h"
+#include "v4l2-utils.h"
+
+#include "gstv4l2object.h"
+
+#ifndef USE_V4L2_TARGET_NV
+#include "gstv4l2src.h"
+#include "gstv4l2sink.h"
+#include "gstv4l2radio.h"
+#include "gstv4l2h263enc.h"
+#include "gstv4l2mpeg4enc.h"
+#include "gstv4l2deviceprovider.h"
+#include "gstv4l2transform.h"
+#endif
+
+#include "gstv4l2videodec.h"
+#include "gstv4l2h264enc.h"
+#include "gstv4l2h265enc.h"
+#include "gstv4l2vp8enc.h"
+#include "gstv4l2vp9enc.h"
+#include "gstv4l2av1enc.h"
+
+/* used in gstv4l2object.c and v4l2_calls.c */
+GST_DEBUG_CATEGORY (v4l2_debug);
+#define GST_CAT_DEFAULT v4l2_debug
+
+#ifndef USE_V4L2_TARGET_NV_X86
+gboolean is_cuvid;
+#else
+gboolean is_cuvid = TRUE;
+#endif
+
+#ifdef GST_V4L2_ENABLE_PROBE
+/* This is a minimalist probe, for speed, we only enumerate formats */
+static GstCaps *
+gst_v4l2_probe_template_caps (const gchar * device, gint video_fd,
+ enum v4l2_buf_type type)
+{
+ gint n;
+ struct v4l2_fmtdesc format;
+ GstCaps *caps;
+
+ GST_DEBUG ("Getting %s format enumerations", device);
+ caps = gst_caps_new_empty ();
+
+ for (n = 0;; n++) {
+ GstStructure *template;
+
+ memset (&format, 0, sizeof (format));
+
+ format.index = n;
+ format.type = type;
+
+ if (ioctl (video_fd, VIDIOC_ENUM_FMT, &format) < 0)
+ break; /* end of enumeration */
+
+ GST_LOG ("index: %u", format.index);
+ GST_LOG ("type: %d", format.type);
+ GST_LOG ("flags: %08x", format.flags);
+ GST_LOG ("description: '%s'", format.description);
+ GST_LOG ("pixelformat: %" GST_FOURCC_FORMAT,
+ GST_FOURCC_ARGS (format.pixelformat));
+
+ template = gst_v4l2_object_v4l2fourcc_to_structure (format.pixelformat);
+
+ if (template) {
+ GstStructure *alt_t = NULL;
+
+ switch (format.pixelformat) {
+ case V4L2_PIX_FMT_RGB32:
+ alt_t = gst_structure_copy (template);
+ gst_structure_set (alt_t, "format", G_TYPE_STRING, "ARGB", NULL);
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ alt_t = gst_structure_copy (template);
+ gst_structure_set (alt_t, "format", G_TYPE_STRING, "BGRA", NULL);
+ default:
+ break;
+ }
+
+ gst_caps_append_structure (caps, template);
+
+ if (alt_t)
+ gst_caps_append_structure (caps, alt_t);
+ }
+ }
+
+ return gst_caps_simplify (caps);
+}
+
+static gboolean
+gst_v4l2_probe_and_register (GstPlugin * plugin)
+{
+ GstV4l2Iterator *it;
+ gint video_fd = -1;
+ struct v4l2_capability vcap;
+ guint32 device_caps;
+
+ it = gst_v4l2_iterator_new ();
+
+ while (gst_v4l2_iterator_next (it)) {
+ GstCaps *src_caps, *sink_caps;
+ gchar *basename;
+
+ if (video_fd >= 0)
+ close (video_fd);
+
+ video_fd = open (it->device_path, O_RDWR | O_CLOEXEC);
+
+ if (video_fd == -1) {
+ GST_DEBUG ("Failed to open %s: %s", it->device_path, g_strerror (errno));
+ continue;
+ }
+
+ memset (&vcap, 0, sizeof (vcap));
+
+ if (ioctl (video_fd, VIDIOC_QUERYCAP, &vcap) < 0) {
+ GST_DEBUG ("Failed to get device capabilities: %s", g_strerror (errno));
+ continue;
+ }
+
+ if (vcap.capabilities & V4L2_CAP_DEVICE_CAPS)
+ device_caps = vcap.device_caps;
+ else
+ device_caps = vcap.capabilities;
+
+ if (!((device_caps & (V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE)) ||
+ /* But legacy driver may expose both CAPTURE and OUTPUT */
+ ((device_caps &
+ (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_CAPTURE_MPLANE)) &&
+ (device_caps &
+ (V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE)))))
+ continue;
+
+ GST_DEBUG ("Probing '%s' located at '%s'",
+ it->device_name ? it->device_name : (const gchar *) vcap.driver,
+ it->device_path);
+
+ /* get sink supported format (no MPLANE for codec) */
+ sink_caps = gst_caps_merge (gst_v4l2_probe_template_caps (it->device_path,
+ video_fd, V4L2_BUF_TYPE_VIDEO_OUTPUT),
+ gst_v4l2_probe_template_caps (it->device_path, video_fd,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE));
+
+ /* get src supported format */
+ src_caps = gst_caps_merge (gst_v4l2_probe_template_caps (it->device_path,
+ video_fd, V4L2_BUF_TYPE_VIDEO_CAPTURE),
+ gst_v4l2_probe_template_caps (it->device_path, video_fd,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE));
+
+ /* Skip devices without any supported formats */
+ if (gst_caps_is_empty (sink_caps) || gst_caps_is_empty (src_caps)) {
+ gst_caps_unref (sink_caps);
+ gst_caps_unref (src_caps);
+ continue;
+ }
+
+ basename = g_path_get_basename (it->device_path);
+
+ if (gst_v4l2_is_video_dec (sink_caps, src_caps)) {
+ gst_v4l2_video_dec_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+ } else if (gst_v4l2_is_video_enc (sink_caps, src_caps, NULL)) {
+ if (gst_v4l2_is_h264_enc (sink_caps, src_caps))
+ gst_v4l2_h264_enc_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+
+ if (gst_v4l2_is_mpeg4_enc (sink_caps, src_caps))
+ gst_v4l2_mpeg4_enc_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+
+ if (gst_v4l2_is_h263_enc (sink_caps, src_caps))
+ gst_v4l2_h263_enc_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+
+ if (gst_v4l2_is_vp8_enc (sink_caps, src_caps))
+ gst_v4l2_vp8_enc_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+
+ if (gst_v4l2_is_vp9_enc (sink_caps, src_caps))
+ gst_v4l2_vp9_enc_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+ if (gst_v4l2_is_av1_enc (sink_caps, src_caps))
+ gst_v4l2_av1_enc_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+ } else if (gst_v4l2_is_transform (sink_caps, src_caps)) {
+ gst_v4l2_transform_register (plugin, basename, it->device_path,
+ sink_caps, src_caps);
+ }
+ /* else if ( ... etc. */
+
+ gst_caps_unref (sink_caps);
+ gst_caps_unref (src_caps);
+ g_free (basename);
+ }
+
+ if (video_fd >= 0)
+ close (video_fd);
+
+ gst_v4l2_iterator_free (it);
+
+ return TRUE;
+}
+#endif
+
+#ifndef USE_V4L2_TARGET_NV
+static gboolean
+plugin_init (GstPlugin * plugin)
+{
+ const gchar *paths[] = { "/dev", "/dev/v4l2", NULL };
+ const gchar *names[] = { "video", NULL };
+
+ GST_DEBUG_CATEGORY_INIT (v4l2_debug, "v4l2", 0, "V4L2 API calls");
+
+ /* Add some depedency, so the dynamic features get updated upon changes in
+ * /dev/video* */
+ gst_plugin_add_dependency (plugin,
+ NULL, paths, names, GST_PLUGIN_DEPENDENCY_FLAG_FILE_NAME_IS_PREFIX);
+
+ if (!gst_element_register (plugin, "v4l2src", GST_RANK_PRIMARY,
+ GST_TYPE_V4L2SRC) ||
+ !gst_element_register (plugin, "v4l2sink", GST_RANK_NONE,
+ GST_TYPE_V4L2SINK) ||
+ !gst_element_register (plugin, "v4l2radio", GST_RANK_NONE,
+ GST_TYPE_V4L2RADIO) ||
+ !gst_device_provider_register (plugin, "v4l2deviceprovider",
+ GST_RANK_PRIMARY, GST_TYPE_V4L2_DEVICE_PROVIDER)
+ /* etc. */
+#ifdef GST_V4L2_ENABLE_PROBE
+ || !gst_v4l2_probe_and_register (plugin)
+#endif
+ )
+ return FALSE;
+
+#ifdef ENABLE_NLS
+ bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR);
+ bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8");
+#endif /* ENABLE_NLS */
+
+ return TRUE;
+}
+
+GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
+ GST_VERSION_MINOR,
+ video4linux2,
+ "elements for Video 4 Linux",
+ plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
+
+#else
+
+static gboolean
+gst_v4l2_has_vp8_encoder(void)
+{
+ gboolean ret = FALSE;
+ int fd = -1;
+ long len = -1;
+ struct stat statbuf;
+ char info[128];
+
+ if (access (V4L2_DEVICE_PATH_TEGRA_INFO, F_OK) == 0) {
+ stat(V4L2_DEVICE_PATH_TEGRA_INFO, &statbuf);
+ if (statbuf.st_size > 0 && statbuf.st_size < 128)
+ {
+ fd = open(V4L2_DEVICE_PATH_TEGRA_INFO, O_RDONLY);
+ read(fd, info, statbuf.st_size);
+ len = statbuf.st_size - 8;
+ for (int i = 0; i < len; i ++)
+ {
+ if (strncmp(&info[i], "tegra", 5) == 0)
+ {
+ if (strncmp(&info[i], "tegra186", 8) == 0 ||
+ strncmp(&info[i], "tegra210", 8) == 0)
+ ret = TRUE;
+ break;
+ }
+ }
+ close(fd);
+ }
+ }
+ return ret;
+}
+
+static gboolean
+gst_v4l2_is_v4l2_nvenc_present(void)
+{
+ gboolean ret = TRUE;
+ int fd = -1;
+ long len = -1;
+ struct stat statbuf;
+ char info[128];
+
+ if (access (V4L2_DEVICE_PATH_TEGRA_INFO, F_OK) == 0) {
+ stat(V4L2_DEVICE_PATH_TEGRA_INFO, &statbuf);
+ if (statbuf.st_size > 0 && statbuf.st_size < 128)
+ {
+ fd = open(V4L2_DEVICE_PATH_TEGRA_INFO, O_RDONLY);
+ read(fd, info, statbuf.st_size);
+ len = statbuf.st_size - 10;
+ for (int i = 0; i < len; i ++)
+ {
+ if (strncmp(&info[i], "p3767", 5) == 0)
+ {
+ /*
+ Jetson Orin Nano 8GB (P3767-0003) Commercial module
+ Jetson Orin Nano 4GB (P3767-0004) Commercial module
+ Jetson Orin Nano 8GB with SD card slot (P3767-0005) For the Developer Kit only
+ */
+ if (strncmp(&info[i + 6], "0003", 4) == 0 ||
+ strncmp(&info[i + 6], "0004", 4) == 0 ||
+ strncmp(&info[i + 6], "0005", 4) == 0)
+ ret = FALSE;
+ break;
+ }
+ }
+ close(fd);
+ }
+ }
+ return ret;
+}
+
+static gboolean
+plugin_init (GstPlugin * plugin)
+{
+ gboolean ret = TRUE;
+
+ g_setenv ("GST_V4L2_USE_LIBV4L2", "1", FALSE);
+
+ GST_DEBUG_CATEGORY_INIT (v4l2_debug, "v4l2", 0, "V4L2 API calls");
+
+#ifndef USE_V4L2_TARGET_NV_X86
+ int igpu = -1, dgpu = -1;
+ igpu = system("lsmod | grep 'nvgpu' > /dev/null");
+ dgpu = system("modprobe -D -q nvidia | grep 'dkms' > /dev/null");
+ if (igpu == -1 || dgpu == -1)
+ return FALSE;
+ else if (dgpu == 0)
+ is_cuvid = TRUE;
+ else
+ is_cuvid = FALSE;
+
+ if (getenv("AARCH64_DGPU"))
+ is_cuvid = TRUE;
+ else if (getenv("AARCH64_IGPU"))
+ is_cuvid = FALSE;
+#endif
+
+ if (is_cuvid == TRUE)
+ gst_v4l2_video_dec_register (plugin,
+ V4L2_DEVICE_BASENAME_NVDEC,
+ V4L2_DEVICE_PATH_NVDEC_MCCOY,
+ NULL,
+ NULL);
+ else if (access (V4L2_DEVICE_PATH_NVDEC, F_OK) == 0)
+ gst_v4l2_video_dec_register (plugin,
+ V4L2_DEVICE_BASENAME_NVDEC,
+ V4L2_DEVICE_PATH_NVDEC,
+ NULL,
+ NULL);
+ else
+ gst_v4l2_video_dec_register (plugin,
+ V4L2_DEVICE_BASENAME_NVDEC,
+ V4L2_DEVICE_PATH_NVDEC_ALT,
+ NULL,
+ NULL);
+
+ if (access (V4L2_DEVICE_PATH_NVENC, F_OK) == 0) {
+ gst_v4l2_h264_enc_register(plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC,
+ NULL,
+ NULL);
+ gst_v4l2_h265_enc_register(plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC,
+ NULL,
+ NULL);
+ } else {
+ if (!gst_v4l2_is_v4l2_nvenc_present()) {
+ // Orin Nano does not have HW encoders, so early return here.
+ return ret;
+ }
+ gst_v4l2_h264_enc_register(plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC_ALT,
+ NULL,
+ NULL);
+ gst_v4l2_h265_enc_register(plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC_ALT,
+ NULL,
+ NULL);
+ }
+
+ if (is_cuvid == FALSE) {
+ if (access (V4L2_DEVICE_PATH_NVENC, F_OK) == 0) {
+ if (gst_v4l2_has_vp8_encoder()) {
+ gst_v4l2_vp8_enc_register (plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC,
+ NULL,
+ NULL);
+ }
+ gst_v4l2_vp9_enc_register (plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC,
+ NULL,
+ NULL);
+ gst_v4l2_av1_enc_register (plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC,
+ NULL,
+ NULL);
+ } else {
+ gst_v4l2_vp8_enc_register (plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC_ALT,
+ NULL,
+ NULL);
+ gst_v4l2_vp9_enc_register (plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC_ALT,
+ NULL,
+ NULL);
+ gst_v4l2_av1_enc_register (plugin,
+ V4L2_DEVICE_BASENAME_NVENC,
+ V4L2_DEVICE_PATH_NVENC_ALT,
+ NULL,
+ NULL);
+ }
+ }
+
+ return ret;
+}
+
+#ifndef PACKAGE
+#define PACKAGE "nvvideo4linux2"
+#endif
+
+GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
+ GST_VERSION_MINOR,
+ nvvideo4linux2,
+ "Nvidia elements for Video 4 Linux",
+ plugin_init,
+ "1.14.0",
+ "LGPL",
+ "nvvideo4linux2",
+ "http://nvidia.com/")
+#endif
diff --git a/gst-v4l2/gstv4l2allocator.c b/gst-v4l2/gstv4l2allocator.c
new file mode 100644
index 0000000..3fc4c39
--- /dev/null
+++ b/gst-v4l2/gstv4l2allocator.c
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE /* O_CLOEXEC */
+#endif
+
+#include "linux/videodev2.h"
+
+#include "gstv4l2object.h"
+#include "gstv4l2allocator.h"
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define GST_V4L2_MEMORY_TYPE "V4l2Memory"
+
+#define gst_v4l2_allocator_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2Allocator, gst_v4l2_allocator, GST_TYPE_ALLOCATOR);
+
+GST_DEBUG_CATEGORY_STATIC (v4l2allocator_debug);
+#define GST_CAT_DEFAULT v4l2allocator_debug
+
+#define UNSET_QUEUED(buffer) \
+ ((buffer).flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
+
+#define SET_QUEUED(buffer) ((buffer).flags |= V4L2_BUF_FLAG_QUEUED)
+
+#define IS_QUEUED(buffer) \
+ ((buffer).flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
+
+enum
+{
+ GROUP_RELEASED,
+ LAST_SIGNAL
+};
+
+static guint gst_v4l2_allocator_signals[LAST_SIGNAL] = { 0 };
+
+static void gst_v4l2_allocator_release (GstV4l2Allocator * allocator,
+ GstV4l2Memory * mem);
+
+static const gchar *
+memory_type_to_str (guint32 memory)
+{
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ return "mmap";
+ case V4L2_MEMORY_USERPTR:
+ return "userptr";
+ case V4L2_MEMORY_DMABUF:
+ return "dmabuf";
+ default:
+ return "unknown";
+ }
+}
+
+/*************************************/
+/* GstV4lMemory implementation */
+/*************************************/
+
+static gpointer
+_v4l2mem_map (GstV4l2Memory * mem, gsize maxsize, GstMapFlags flags)
+{
+ gpointer data = NULL;
+
+ switch (mem->group->buffer.memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_USERPTR:
+ data = mem->data;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ /* v4l2 dmabuf memory are not shared with downstream */
+ g_assert_not_reached ();
+ break;
+ default:
+ GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
+ break;
+ }
+ return data;
+}
+
+static gboolean
+_v4l2mem_unmap (GstV4l2Memory * mem)
+{
+ gboolean ret = FALSE;
+
+ switch (mem->group->buffer.memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_USERPTR:
+ ret = TRUE;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ /* v4l2 dmabuf memory are not share with downstream */
+ g_assert_not_reached ();
+ break;
+ default:
+ GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
+ break;
+ }
+ return ret;
+}
+
+static gboolean
+_v4l2mem_dispose (GstV4l2Memory * mem)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) mem->mem.allocator;
+ GstV4l2MemoryGroup *group = mem->group;
+ gboolean ret;
+
+ if (group->mem[mem->plane]) {
+ /* We may have a dmabuf, replace it with returned original memory */
+ group->mem[mem->plane] = gst_memory_ref ((GstMemory *) mem);
+ gst_v4l2_allocator_release (allocator, mem);
+ ret = FALSE;
+ } else {
+ gst_object_ref (allocator);
+ ret = TRUE;
+ }
+
+ return ret;
+}
+
+static inline GstV4l2Memory *
+_v4l2mem_new (GstMemoryFlags flags, GstAllocator * allocator,
+ GstMemory * parent, gsize maxsize, gsize align, gsize offset, gsize size,
+ gint plane, gpointer data, int dmafd, GstV4l2MemoryGroup * group)
+{
+ GstV4l2Memory *mem;
+
+ mem = g_slice_new0 (GstV4l2Memory);
+ gst_memory_init (GST_MEMORY_CAST (mem),
+ flags, allocator, parent, maxsize, align, offset, size);
+
+ if (parent == NULL)
+ mem->mem.mini_object.dispose =
+ (GstMiniObjectDisposeFunction) _v4l2mem_dispose;
+
+ mem->plane = plane;
+ mem->data = data;
+ mem->dmafd = dmafd;
+ mem->group = group;
+
+ return mem;
+}
+
+static GstV4l2Memory *
+#ifndef USE_V4L2_TARGET_NV
+_v4l2mem_share (GstV4l2Memory * mem, gssize offset, gsize size)
+#else
+_v4l2mem_share (GstV4l2Memory * mem, gssize offset, gssize size)
+#endif
+{
+ GstV4l2Memory *sub;
+ GstMemory *parent;
+
+ /* find the real parent */
+ if ((parent = mem->mem.parent) == NULL)
+ parent = (GstMemory *) mem;
+
+ if (size == -1)
+ size = mem->mem.size - offset;
+
+ /* the shared memory is always readonly */
+ sub = _v4l2mem_new (GST_MINI_OBJECT_FLAGS (parent) |
+ GST_MINI_OBJECT_FLAG_LOCK_READONLY, mem->mem.allocator, parent,
+ mem->mem.maxsize, mem->mem.align, offset, size, mem->plane, mem->data,
+ -1, mem->group);
+
+ return sub;
+}
+
+static gboolean
+_v4l2mem_is_span (GstV4l2Memory * mem1, GstV4l2Memory * mem2, gsize * offset)
+{
+ if (offset)
+ *offset = mem1->mem.offset - mem1->mem.parent->offset;
+
+ /* and memory is contiguous */
+ return mem1->mem.offset + mem1->mem.size == mem2->mem.offset;
+}
+
+gboolean
+gst_is_v4l2_memory (GstMemory * mem)
+{
+ return gst_memory_is_type (mem, GST_V4L2_MEMORY_TYPE);
+}
+
+GQuark
+gst_v4l2_memory_quark (void)
+{
+ static GQuark quark = 0;
+
+ if (quark == 0)
+ quark = g_quark_from_string ("GstV4l2Memory");
+
+ return quark;
+}
+
+
+/*************************************/
+/* GstV4l2MemoryGroup implementation */
+/*************************************/
+
+static void
+gst_v4l2_memory_group_free (GstV4l2MemoryGroup * group, GstV4l2Object *obj)
+{
+ gint i;
+
+ for (i = 0; i < group->n_mem; i++) {
+ GstMemory *mem = group->mem[i];
+ group->mem[i] = NULL;
+
+#if defined(USE_V4L2_TARGET_NV)
+ if (mem) {
+ if (is_cuvid == TRUE) {
+ g_slice_free (GstV4l2Memory, (GstV4l2Memory *)mem);
+ } else {
+ if (!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT))
+ gst_memory_unref (mem);
+ else
+ g_slice_free (GstV4l2Memory, (GstV4l2Memory *)mem);
+ }
+ }
+#else
+ if (mem)
+ gst_memory_unref (mem);
+#endif
+ }
+
+ g_slice_free (GstV4l2MemoryGroup, group);
+}
+
+static GstV4l2MemoryGroup *
+gst_v4l2_memory_group_new (GstV4l2Allocator * allocator, guint32 index)
+{
+ GstV4l2Object *obj = allocator->obj;
+ guint32 memory = allocator->memory;
+ struct v4l2_format *format = &obj->format;
+ GstV4l2MemoryGroup *group;
+ gsize img_size, buf_size;
+
+ group = g_slice_new0 (GstV4l2MemoryGroup);
+
+ group->buffer.type = format->type;
+ group->buffer.index = index;
+ group->buffer.memory = memory;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
+ group->n_mem = group->buffer.length = format->fmt.pix_mp.num_planes;
+ group->buffer.m.planes = group->planes;
+#ifdef USE_V4L2_TARGET_NV
+ /* Having multiple memories causes buffer copy issues when these buffers are
+ * mapped. Also, even with two memories, both memories map to the same NvBufSurface.
+ * Need to take similar care in the is_buffer_valid function. */
+ if (!V4L2_TYPE_IS_OUTPUT (obj->type) &&
+ (((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) && (is_cuvid == TRUE))))
+ group->n_mem = 1;
+#endif
+ } else {
+ group->n_mem = 1;
+ }
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_QUERYBUF, &group->buffer) < 0)
+ goto querybuf_failed;
+
+ if (group->buffer.index != index) {
+ GST_ERROR_OBJECT (allocator, "Buffer index returned by VIDIOC_QUERYBUF "
+ "didn't match, this indicate the presence of a bug in your driver or "
+ "libv4l2");
+ g_slice_free (GstV4l2MemoryGroup, group);
+ return NULL;
+ }
+
+ /* Check that provided size matches the format we have negotiation. Failing
+ * there usually means a driver of libv4l bug. */
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ gint i;
+
+ for (i = 0; i < group->n_mem; i++) {
+ img_size = obj->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ buf_size = group->planes[i].length;
+ if (buf_size < img_size)
+ goto buffer_too_short;
+ }
+ } else {
+ img_size = obj->format.fmt.pix.sizeimage;
+ buf_size = group->buffer.length;
+ if (buf_size < img_size)
+ goto buffer_too_short;
+ }
+
+ /* We save non planar buffer information into the multi-planar plane array
+ * to avoid duplicating the code later */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
+ group->planes[0].bytesused = group->buffer.bytesused;
+ group->planes[0].length = group->buffer.length;
+ group->planes[0].data_offset = 0;
+ g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
+ memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
+ }
+
+ GST_LOG_OBJECT (allocator, "Got %s buffer", memory_type_to_str (memory));
+ GST_LOG_OBJECT (allocator, " index: %u", group->buffer.index);
+ GST_LOG_OBJECT (allocator, " type: %d", group->buffer.type);
+ GST_LOG_OBJECT (allocator, " flags: %08x", group->buffer.flags);
+ GST_LOG_OBJECT (allocator, " field: %d", group->buffer.field);
+ GST_LOG_OBJECT (allocator, " memory: %d", group->buffer.memory);
+ GST_LOG_OBJECT (allocator, " planes: %d", group->n_mem);
+
+#ifndef GST_DISABLE_GST_DEBUG
+ if (memory == V4L2_MEMORY_MMAP) {
+ gint i;
+ for (i = 0; i < group->n_mem; i++) {
+ GST_LOG_OBJECT (allocator,
+ " [%u] bytesused: %u, length: %u, offset: %u", i,
+ group->planes[i].bytesused, group->planes[i].length,
+ group->planes[i].data_offset);
+ GST_LOG_OBJECT (allocator, " [%u] MMAP offset: %u", i,
+ group->planes[i].m.mem_offset);
+ }
+ }
+#endif
+
+ return group;
+
+querybuf_failed:
+ {
+ GST_ERROR ("error querying buffer %d: %s", index, g_strerror (errno));
+ goto failed;
+ }
+buffer_too_short:
+ {
+ GST_ERROR ("buffer size %" G_GSIZE_FORMAT
+ " is smaller then negotiated size %" G_GSIZE_FORMAT
+ ", this is usually the result of a bug in the v4l2 driver or libv4l.",
+ buf_size, img_size);
+ goto failed;
+ }
+failed:
+ gst_v4l2_memory_group_free (group, obj);
+ return NULL;
+}
+
+
+/*************************************/
+/* GstV4lAllocator implementation */
+/*************************************/
+
+static void
+gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
+{
+ GstV4l2MemoryGroup *group = mem->group;
+
+ GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
+ mem->plane, group->buffer.index);
+
+ switch (allocator->memory) {
+ case V4L2_MEMORY_DMABUF:
+#ifndef USE_V4L2_TARGET_NV
+ close (mem->dmafd);
+ mem->dmafd = -1;
+#endif
+ break;
+ case V4L2_MEMORY_USERPTR:
+ mem->data = NULL;
+ break;
+ default:
+ break;
+ }
+
+ /* When all memory are back, put the group back in the free queue */
+ if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
+ GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
+ gst_atomic_queue_push (allocator->free_queue, group);
+ g_signal_emit (allocator, gst_v4l2_allocator_signals[GROUP_RELEASED], 0);
+ }
+
+ /* Keep last, allocator may be freed after this call */
+ g_object_unref (allocator);
+}
+
+static void
+gst_v4l2_allocator_free (GstAllocator * gallocator, GstMemory * gmem)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) gallocator;
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2Memory *mem = (GstV4l2Memory *) gmem;
+ GstV4l2MemoryGroup *group = mem->group;
+
+ /* Only free unparented memory */
+ if (mem->mem.parent == NULL) {
+ GST_LOG_OBJECT (allocator, "freeing plane %i of buffer %u",
+ mem->plane, group->buffer.index);
+
+ if (allocator->memory == V4L2_MEMORY_MMAP) {
+ if (mem->data) {
+#ifdef USE_V4L2_TARGET_NV
+ if ((V4L2_TYPE_IS_OUTPUT (obj->type)) ||
+ (strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC)) ||
+ (strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) ||
+ (strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)))
+#endif
+ obj->munmap (mem->data, group->planes[mem->plane].length);
+ }
+ }
+
+ /* TODO : Handle this either in libtegrav4l2 or here, currently
+ * this is handled here by not closing the FD below */
+#ifndef USE_V4L2_TARGET_NV
+ /* This apply for both mmap with expbuf, and dmabuf imported memory */
+ if (mem->dmafd >= 0)
+ close (mem->dmafd);
+#endif
+ }
+
+ g_slice_free (GstV4l2Memory, mem);
+}
+
+static void
+gst_v4l2_allocator_dispose (GObject * obj)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
+#ifndef USE_V4L2_TARGET_NV
+ gint i;
+#else
+ guint i;
+#endif
+
+ GST_LOG_OBJECT (obj, "called");
+
+ for (i = 0; i < allocator->count; i++) {
+ GstV4l2MemoryGroup *group = allocator->groups[i];
+ allocator->groups[i] = NULL;
+ if (group)
+ gst_v4l2_memory_group_free (group, allocator->obj);
+ }
+
+ G_OBJECT_CLASS (parent_class)->dispose (obj);
+}
+
+static void
+gst_v4l2_allocator_finalize (GObject * obj)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
+
+ GST_LOG_OBJECT (obj, "called");
+
+ gst_atomic_queue_unref (allocator->free_queue);
+ gst_object_unref (allocator->obj->element);
+
+ G_OBJECT_CLASS (parent_class)->finalize (obj);
+}
+
+static void
+gst_v4l2_allocator_class_init (GstV4l2AllocatorClass * klass)
+{
+ GObjectClass *object_class;
+ GstAllocatorClass *allocator_class;
+
+ allocator_class = (GstAllocatorClass *) klass;
+ object_class = (GObjectClass *) klass;
+
+ allocator_class->alloc = NULL;
+ allocator_class->free = gst_v4l2_allocator_free;
+
+ object_class->dispose = gst_v4l2_allocator_dispose;
+ object_class->finalize = gst_v4l2_allocator_finalize;
+
+ gst_v4l2_allocator_signals[GROUP_RELEASED] = g_signal_new ("group-released",
+ G_TYPE_FROM_CLASS (object_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, NULL,
+ G_TYPE_NONE, 0);
+
+ GST_DEBUG_CATEGORY_INIT (v4l2allocator_debug, "v4l2allocator", 0,
+ "V4L2 Allocator");
+}
+
+static void
+gst_v4l2_allocator_init (GstV4l2Allocator * allocator)
+{
+ GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
+
+ alloc->mem_type = GST_V4L2_MEMORY_TYPE;
+ alloc->mem_map = (GstMemoryMapFunction) _v4l2mem_map;
+ alloc->mem_unmap = (GstMemoryUnmapFunction) _v4l2mem_unmap;
+ alloc->mem_share = (GstMemoryShareFunction) _v4l2mem_share;
+ alloc->mem_is_span = (GstMemoryIsSpanFunction) _v4l2mem_is_span;
+ /* Use the default, fallback copy function */
+
+#ifdef USE_V4L2_TARGET_NV
+ allocator->free_queue = gst_atomic_queue_new (NV_VIDEO_MAX_FRAME);
+#else
+ allocator->free_queue = gst_atomic_queue_new (VIDEO_MAX_FRAME);
+#endif
+
+ GST_OBJECT_FLAG_SET (allocator, GST_ALLOCATOR_FLAG_CUSTOM_ALLOC);
+}
+
+#define GST_V4L2_ALLOCATOR_PROBE(obj,type) \
+ gst_v4l2_allocator_probe ((obj), V4L2_MEMORY_ ## type, \
+ GST_V4L2_ALLOCATOR_FLAG_ ## type ## _REQBUFS, \
+ GST_V4L2_ALLOCATOR_FLAG_ ## type ## _CREATE_BUFS)
+static guint32
+gst_v4l2_allocator_probe (GstV4l2Allocator * allocator, guint32 memory,
+ guint32 breq_flag, guint32 bcreate_flag)
+{
+ GstV4l2Object *obj = allocator->obj;
+ struct v4l2_requestbuffers breq = { 0 };
+ guint32 flags = 0;
+
+ breq.type = obj->type;
+ breq.count = 0;
+ breq.memory = memory;
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) == 0) {
+ struct v4l2_create_buffers bcreate = { 0 };
+
+ flags |= breq_flag;
+
+ bcreate.memory = memory;
+ bcreate.format = obj->format;
+
+ if ((obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) == 0))
+ flags |= bcreate_flag;
+ }
+
+ return flags;
+}
+
+static GstV4l2MemoryGroup *
+gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
+{
+ GstV4l2Object *obj = allocator->obj;
+ struct v4l2_create_buffers bcreate = { 0 };
+ GstV4l2MemoryGroup *group = NULL;
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (!g_atomic_int_get (&allocator->active))
+ goto done;
+
+ bcreate.memory = allocator->memory;
+ bcreate.format = obj->format;
+ bcreate.count = 1;
+
+ if (!allocator->can_allocate)
+ goto done;
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
+ goto create_bufs_failed;
+
+ if (allocator->groups[bcreate.index] != NULL)
+ goto create_bufs_bug;
+
+ group = gst_v4l2_memory_group_new (allocator, bcreate.index);
+
+ if (group) {
+ allocator->groups[bcreate.index] = group;
+ allocator->count++;
+ }
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+ return group;
+
+create_bufs_failed:
+ {
+ GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
+ g_strerror (errno));
+ goto done;
+ }
+create_bufs_bug:
+ {
+ GST_ERROR_OBJECT (allocator, "created buffer has already used buffer "
+ "index %i, this means there is an bug in your driver or libv4l2",
+ bcreate.index);
+ goto done;
+ }
+}
+
+static GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
+{
+ GstV4l2MemoryGroup *group;
+
+ if (!g_atomic_int_get (&allocator->active))
+ return NULL;
+
+ group = gst_atomic_queue_pop (allocator->free_queue);
+
+ if (group == NULL) {
+#ifdef USE_V4L2_TARGET_NV
+ if (allocator->can_allocate && allocator->enable_dynamic_allocation) {
+#else
+ if (allocator->can_allocate) {
+#endif
+ group = gst_v4l2_allocator_create_buf (allocator);
+
+ /* Don't hammer on CREATE_BUFS */
+ if (group == NULL)
+ allocator->can_allocate = FALSE;
+ }
+ }
+
+ return group;
+}
+
+static void
+gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ gint i;
+ for (i = 0; i < group->n_mem; i++) {
+ /* TODO: Fix to handle length/maxsize appropriately as per the spec */
+#ifdef USE_V4L2_TARGET_NV
+ group->planes[i].length = group->mem[i]->maxsize;
+#endif
+ group->mem[i]->maxsize = group->planes[i].length;
+ group->mem[i]->offset = 0;
+ group->mem[i]->size = group->planes[i].length;
+ }
+}
+
+static void
+_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
+{
+ if (group->mems_allocated > 0) {
+ gint i;
+ /* If one or more mmap worked, we need to unref the memory, otherwise
+ * they will keep a ref on the allocator and leak it. This will put back
+ * the group into the free_queue */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_unref (group->mem[i]);
+ } else {
+ /* Otherwise, group has to be on free queue for _stop() to work */
+ gst_atomic_queue_push (allocator->free_queue, group);
+ }
+}
+
+
+
+GstV4l2Allocator *
+gst_v4l2_allocator_new (GstObject * parent, GstV4l2Object * v4l2object)
+{
+ GstV4l2Allocator *allocator;
+ guint32 flags = 0;
+ gchar *name, *parent_name;
+
+ parent_name = gst_object_get_name (parent);
+ name = g_strconcat (parent_name, ":allocator", NULL);
+ g_free (parent_name);
+
+ allocator = g_object_new (GST_TYPE_V4L2_ALLOCATOR, "name", name, NULL);
+ gst_object_ref_sink (allocator);
+ g_free (name);
+
+ /* Save everything */
+ allocator->obj = v4l2object;
+
+ /* Keep a ref on the elemnt so obj does not disapear */
+ gst_object_ref (allocator->obj->element);
+
+ flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, MMAP);
+ flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, USERPTR);
+ flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, DMABUF);
+
+ if (flags == 0) {
+ /* Drivers not ported from videobuf to videbuf2 don't allow freeing buffers
+ * using REQBUFS(0). This is a workaround to still support these drivers,
+ * which are known to have MMAP support. */
+ GST_WARNING_OBJECT (allocator, "Could not probe supported memory type, "
+ "assuming MMAP is supported, this is expected for older drivers not "
+ " yet ported to videobuf2 framework");
+ flags = GST_V4L2_ALLOCATOR_FLAG_MMAP_REQBUFS;
+ }
+
+ GST_OBJECT_FLAG_SET (allocator, flags);
+
+ return allocator;
+}
+
+guint
+gst_v4l2_allocator_start (GstV4l2Allocator * allocator, guint32 count,
+ guint32 memory)
+{
+ GstV4l2Object *obj = allocator->obj;
+ struct v4l2_requestbuffers breq = { count, obj->type, memory };
+ gboolean can_allocate;
+#ifndef USE_V4L2_TARGET_NV
+ gint i;
+#else
+ guint i;
+#endif
+
+ g_return_val_if_fail (count != 0, 0);
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (g_atomic_int_get (&allocator->active))
+ goto already_active;
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
+ goto reqbufs_failed;
+
+ if (breq.count < 1)
+ goto out_of_memory;
+
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, MMAP);
+ break;
+ case V4L2_MEMORY_USERPTR:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, USERPTR);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, DMABUF);
+ break;
+ default:
+ can_allocate = FALSE;
+ break;
+ }
+
+ GST_DEBUG_OBJECT (allocator, "allocated %u %s buffers out of %u requested",
+ breq.count, memory_type_to_str (memory), count);
+
+ allocator->can_allocate = can_allocate;
+ allocator->count = breq.count;
+ allocator->memory = memory;
+
+ /* Create memory groups */
+ for (i = 0; i < allocator->count; i++) {
+ allocator->groups[i] = gst_v4l2_memory_group_new (allocator, i);
+ if (allocator->groups[i] == NULL)
+ goto error;
+
+ gst_atomic_queue_push (allocator->free_queue, allocator->groups[i]);
+ }
+
+ g_atomic_int_set (&allocator->active, TRUE);
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+ return breq.count;
+
+already_active:
+ {
+ GST_ERROR_OBJECT (allocator, "allocator already active");
+ goto error;
+ }
+reqbufs_failed:
+ {
+ GST_ERROR_OBJECT (allocator,
+ "error requesting %d buffers: %s", count, g_strerror (errno));
+ goto error;
+ }
+out_of_memory:
+ {
+ GST_ERROR_OBJECT (allocator, "Not enough memory to allocate buffers");
+ goto error;
+ }
+error:
+ {
+ breq.count = 0;
+ goto done;
+ }
+}
+
+GstV4l2Return
+gst_v4l2_allocator_stop (GstV4l2Allocator * allocator)
+{
+ GstV4l2Object *obj = allocator->obj;
+ struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
+#ifndef USE_V4L2_TARGET_NV
+ gint i = 0;
+#else
+ guint i = 0;
+#endif
+ GstV4l2Return ret = GST_V4L2_OK;
+
+ GST_DEBUG_OBJECT (allocator, "stop allocator");
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (!g_atomic_int_get (&allocator->active))
+ goto done;
+
+ if (gst_atomic_queue_length (allocator->free_queue) != allocator->count) {
+ GST_DEBUG_OBJECT (allocator, "allocator is still in use");
+ ret = GST_V4L2_BUSY;
+ goto done;
+ }
+
+ while (gst_atomic_queue_pop (allocator->free_queue)) {
+ /* nothing */
+ };
+
+ for (i = 0; i < allocator->count; i++) {
+ GstV4l2MemoryGroup *group = allocator->groups[i];
+ allocator->groups[i] = NULL;
+ if (group)
+ gst_v4l2_memory_group_free (group, allocator->obj);
+ }
+
+ /* Not all drivers support rebufs(0), so warn only */
+ if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
+ GST_WARNING_OBJECT (allocator,
+ "error releasing buffers buffers: %s", g_strerror (errno));
+
+ allocator->count = 0;
+
+ g_atomic_int_set (&allocator->active, FALSE);
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+ return ret;
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
+{
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2MemoryGroup *group;
+ gint i;
+#ifdef USE_V4L2_TARGET_NV
+ gint retval = 0;
+ NvBufSurface *nvbuf_surf = 0;
+#endif
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
+
+ group = gst_v4l2_allocator_alloc (allocator);
+
+ if (group == NULL)
+ return NULL;
+
+ for (i = 0; i < group->n_mem; i++) {
+ if (group->mem[i] == NULL) {
+ gpointer data = NULL;
+#ifndef USE_V4L2_TARGET_NV
+ data = obj->mmap (NULL, group->planes[i].length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, obj->video_fd, group->planes[i].m.mem_offset);
+#else
+ struct v4l2_exportbuffer expbuf = { 0 };
+
+ expbuf.type = obj->type;
+ expbuf.index = group->buffer.index;
+ expbuf.plane = i;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
+
+ if (v4l2_ioctl (obj->video_fd, VIDIOC_EXPBUF, &expbuf) < 0)
+ GST_ERROR_OBJECT (allocator, "expbuf_failed");
+
+ if ((!V4L2_TYPE_IS_OUTPUT (obj->type)) &&
+ ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC) && (is_cuvid == FALSE)) ||
+ (!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT) && (is_cuvid == FALSE)) ||
+ (!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY) && (is_cuvid == TRUE))))
+ {
+ retval = NvBufSurfaceFromFd(expbuf.fd, (void**)(&nvbuf_surf));
+ if (retval != 0) {
+ g_print ("Failed to get surface from fd = %d\n", expbuf.fd);
+ data = MAP_FAILED;
+ } else {
+ data = nvbuf_surf;
+ }
+ } else {
+ if (is_cuvid == TRUE) {
+ retval = NvBufSurfaceFromFd(expbuf.fd, (void**)(&nvbuf_surf));
+ data = nvbuf_surf->surfaceList->dataPtr;
+ } else if (is_cuvid == FALSE) {
+ data = obj->mmap (NULL, group->planes[i].length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, expbuf.fd, group->planes[i].m.mem_offset);
+ }
+ }
+
+ group->planes[0].m.fd = expbuf.fd;
+#endif
+ if (data == MAP_FAILED)
+ goto mmap_failed;
+
+ GST_LOG_OBJECT (allocator,
+ "mmap buffer length %d, data offset %d, plane %d",
+ group->planes[i].length, group->planes[i].data_offset, i);
+
+#ifndef USE_V4L2_TARGET_NV
+ group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
+ NULL, group->planes[i].length, 0, 0, group->planes[i].length, i, data,
+ -1, group);
+#else
+ group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
+ NULL, group->planes[i].length, 0, 0, group->planes[i].length, i, data,
+ expbuf.fd, group);
+#endif
+ } else {
+ /* Take back the allocator reference */
+ gst_object_ref (allocator);
+ }
+
+ group->mems_allocated++;
+ }
+
+ /* Ensure group size. Unlike GST, v4l2 have size (bytesused) initially set
+ * to 0. As length might be bigger then the expected size exposed in the
+ * format, we simply set bytesused initially and reset it here for
+ * simplicity */
+ gst_v4l2_allocator_reset_size (allocator, group);
+
+ return group;
+
+mmap_failed:
+ {
+ GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
+ g_strerror (errno));
+ _cleanup_failed_alloc (allocator, group);
+ return NULL;
+ }
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
+ GstAllocator * dmabuf_allocator)
+{
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2MemoryGroup *group;
+ gint i;
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
+
+ group = gst_v4l2_allocator_alloc (allocator);
+
+ if (group == NULL)
+ return NULL;
+
+ for (i = 0; i < group->n_mem; i++) {
+ GstV4l2Memory *mem;
+ GstMemory *dma_mem;
+ gint dmafd;
+
+ if (group->mem[i] == NULL) {
+ struct v4l2_exportbuffer expbuf = { 0 };
+
+ expbuf.type = obj->type;
+ expbuf.index = group->buffer.index;
+ expbuf.plane = i;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_EXPBUF, &expbuf) < 0)
+ goto expbuf_failed;
+
+ GST_LOG_OBJECT (allocator, "exported DMABUF as fd %i plane %d",
+ expbuf.fd, i);
+
+ group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
+ NULL, group->planes[i].length, 0, group->planes[i].data_offset,
+ group->planes[i].length - group->planes[i].data_offset, i, NULL,
+ expbuf.fd, group);
+ } else {
+ /* Take back the allocator reference */
+ gst_object_ref (allocator);
+ }
+
+ group->mems_allocated++;
+
+ g_assert (gst_is_v4l2_memory (group->mem[i]));
+ mem = (GstV4l2Memory *) group->mem[i];
+
+ if ((dmafd = dup (mem->dmafd)) < 0)
+ goto dup_failed;
+
+ dma_mem = gst_dmabuf_allocator_alloc (dmabuf_allocator, dmafd,
+ group->planes[i].length);
+ gst_memory_resize (dma_mem, group->planes[i].data_offset,
+ group->planes[i].length - group->planes[i].data_offset);
+
+ gst_mini_object_set_qdata (GST_MINI_OBJECT (dma_mem),
+ GST_V4L2_MEMORY_QUARK, mem, (GDestroyNotify) gst_memory_unref);
+
+ group->mem[i] = dma_mem;
+ }
+
+ gst_v4l2_allocator_reset_size (allocator, group);
+
+ return group;
+
+expbuf_failed:
+ {
+ GST_ERROR_OBJECT (allocator, "Failed to export DMABUF: %s",
+ g_strerror (errno));
+ goto cleanup;
+ }
+dup_failed:
+ {
+ GST_ERROR_OBJECT (allocator, "Failed to dup DMABUF descriptor: %s",
+ g_strerror (errno));
+ goto cleanup;
+ }
+cleanup:
+ {
+ _cleanup_failed_alloc (allocator, group);
+ return NULL;
+ }
+}
+
+static void
+gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2Memory *mem;
+ gint i;
+
+ g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
+
+ for (i = 0; i < group->n_mem; i++) {
+
+ mem = (GstV4l2Memory *) group->mem[i];
+
+ GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
+ mem->dmafd, i);
+
+#ifndef USE_V4L2_TARGET_NV
+ if (mem->dmafd >= 0)
+ close (mem->dmafd);
+#endif
+
+ /* Update memory */
+ mem->mem.maxsize = 0;
+ mem->mem.offset = 0;
+ mem->mem.size = 0;
+ mem->dmafd = -1;
+
+ /* Update v4l2 structure */
+ group->planes[i].length = 0;
+ group->planes[i].bytesused = 0;
+ group->planes[i].m.fd = -1;
+ group->planes[i].data_offset = 0;
+ }
+
+ if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ group->buffer.bytesused = 0;
+ group->buffer.length = 0;
+ group->buffer.m.fd = -1;
+ }
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
+{
+ GstV4l2MemoryGroup *group;
+ gint i;
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, NULL);
+
+ group = gst_v4l2_allocator_alloc (allocator);
+
+ if (group == NULL)
+ return NULL;
+
+ GST_LOG_OBJECT (allocator, "allocating empty DMABUF import group");
+
+ for (i = 0; i < group->n_mem; i++) {
+ if (group->mem[i] == NULL) {
+ group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
+ NULL, 0, 0, 0, 0, i, NULL, -1, group);
+ } else {
+ /* Take back the allocator reference */
+ gst_object_ref (allocator);
+ }
+
+ group->mems_allocated++;
+ }
+
+ gst_v4l2_allocator_clear_dmabufin (allocator, group);
+
+ return group;
+}
+
+static void
+gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2Memory *mem;
+ gint i;
+
+ g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
+
+ for (i = 0; i < group->n_mem; i++) {
+ mem = (GstV4l2Memory *) group->mem[i];
+
+ GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
+ G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
+
+ mem->mem.maxsize = 0;
+ mem->mem.size = 0;
+ mem->data = NULL;
+
+ group->planes[i].length = 0;
+ group->planes[i].bytesused = 0;
+ group->planes[i].m.userptr = 0;
+ }
+
+ if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ group->buffer.bytesused = 0;
+ group->buffer.length = 0;
+ group->buffer.m.userptr = 0;
+ }
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
+{
+ GstV4l2MemoryGroup *group;
+ gint i;
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, NULL);
+
+ group = gst_v4l2_allocator_alloc (allocator);
+
+ if (group == NULL)
+ return NULL;
+
+ GST_LOG_OBJECT (allocator, "allocating empty USERPTR group");
+
+ for (i = 0; i < group->n_mem; i++) {
+
+ if (group->mem[i] == NULL) {
+ group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
+ NULL, 0, 0, 0, 0, i, NULL, -1, group);
+ } else {
+ /* Take back the allocator reference */
+ gst_object_ref (allocator);
+ }
+
+ group->mems_allocated++;
+ }
+
+ gst_v4l2_allocator_clear_userptr (allocator, group);
+
+ return group;
+}
+
+gboolean
+gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group, gint n_mem, GstMemory ** dma_mem)
+{
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2Memory *mem;
+ gint i;
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, FALSE);
+
+ if (group->n_mem != n_mem)
+ goto n_mem_missmatch;
+
+ for (i = 0; i < group->n_mem; i++) {
+ gint dmafd;
+ gsize size, offset, maxsize;
+
+ if (!gst_is_dmabuf_memory (dma_mem[i]))
+ goto not_dmabuf;
+
+ size = gst_memory_get_sizes (dma_mem[i], &offset, &maxsize);
+
+ if ((dmafd = dup (gst_dmabuf_memory_get_fd (dma_mem[i]))) < 0)
+ goto dup_failed;
+
+ GST_LOG_OBJECT (allocator, "imported DMABUF as fd %i plane %d", dmafd, i);
+
+ mem = (GstV4l2Memory *) group->mem[i];
+
+ /* Update memory */
+ mem->mem.maxsize = maxsize;
+ mem->mem.offset = offset;
+ mem->mem.size = size;
+ mem->dmafd = dmafd;
+
+ /* Update v4l2 structure */
+ group->planes[i].length = maxsize;
+ group->planes[i].bytesused = size + offset;
+ group->planes[i].m.fd = dmafd;
+ group->planes[i].data_offset = offset;
+ }
+
+ /* Copy into buffer structure if not using planes */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ group->buffer.bytesused = group->planes[0].bytesused;
+ group->buffer.length = group->planes[0].length;
+ group->buffer.m.fd = group->planes[0].m.userptr;
+
+ /* FIXME Check if data_offset > 0 and fail for non-multi-planar */
+ g_assert (group->planes[0].data_offset == 0);
+ } else {
+ group->buffer.length = group->n_mem;
+ }
+
+ return TRUE;
+
+n_mem_missmatch:
+ {
+ GST_ERROR_OBJECT (allocator, "Got %i dmabuf but needed %i", n_mem,
+ group->n_mem);
+ return FALSE;
+ }
+not_dmabuf:
+ {
+ GST_ERROR_OBJECT (allocator, "Memory %i is not of DMABUF", i);
+ return FALSE;
+ }
+dup_failed:
+ {
+ GST_ERROR_OBJECT (allocator, "Failed to dup DMABUF descriptor: %s",
+ g_strerror (errno));
+ return FALSE;
+ }
+}
+
+gboolean
+gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
+ gpointer * data, gsize * size)
+{
+ GstV4l2Object *obj = allocator->obj;
+ GstV4l2Memory *mem;
+ gint i;
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, FALSE);
+
+ /* TODO Support passing N plane from 1 memory to MPLANE v4l2 format */
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type) && n_planes != group->n_mem)
+ goto n_mem_missmatch;
+
+ for (i = 0; i < group->n_mem; i++) {
+ gsize maxsize, psize;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ struct v4l2_pix_format_mplane *pix = &obj->format.fmt.pix_mp;
+ maxsize = pix->plane_fmt[i].sizeimage;
+ psize = size[i];
+ } else {
+ maxsize = obj->format.fmt.pix.sizeimage;
+ psize = img_size;
+ }
+
+ g_assert (psize <= img_size);
+
+ GST_LOG_OBJECT (allocator, "imported USERPTR %p plane %d size %"
+ G_GSIZE_FORMAT, data[i], i, psize);
+
+ mem = (GstV4l2Memory *) group->mem[i];
+
+ mem->mem.maxsize = maxsize;
+ mem->mem.size = psize;
+ mem->data = data[i];
+
+ group->planes[i].length = maxsize;
+ group->planes[i].bytesused = psize;
+ group->planes[i].m.userptr = (unsigned long) data[i];
+ group->planes[i].data_offset = 0;
+ }
+
+ /* Copy into buffer structure if not using planes */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ group->buffer.bytesused = group->planes[0].bytesused;
+ group->buffer.length = group->planes[0].length;
+ group->buffer.m.userptr = group->planes[0].m.userptr;
+ } else {
+ group->buffer.length = group->n_mem;
+ }
+
+ return TRUE;
+
+n_mem_missmatch:
+ {
+ GST_ERROR_OBJECT (allocator, "Got %i userptr plane while driver need %i",
+ n_planes, group->n_mem);
+ return FALSE;
+ }
+}
+
+void
+gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
+{
+#ifndef USE_V4L2_TARGET_NV
+ gint i;
+#else
+ guint i;
+#endif
+ GST_OBJECT_LOCK (allocator);
+
+ if (!g_atomic_int_get (&allocator->active))
+ goto done;
+
+ for (i = 0; i < allocator->count; i++) {
+ GstV4l2MemoryGroup *group = allocator->groups[i];
+ gint n;
+
+ if (IS_QUEUED (group->buffer)) {
+ UNSET_QUEUED (group->buffer);
+
+ gst_v4l2_allocator_reset_group (allocator, group);
+
+ for (n = 0; n < group->n_mem; n++)
+ gst_memory_unref (group->mem[n]);
+ }
+ }
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+}
+
+gboolean
+gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ GstV4l2Object *obj = allocator->obj;
+ gboolean ret = TRUE;
+ gint i;
+
+ g_return_val_if_fail (g_atomic_int_get (&allocator->active), FALSE);
+
+ /* update sizes */
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ for (i = 0; i < group->n_mem; i++)
+ group->planes[i].bytesused =
+ gst_memory_get_sizes (group->mem[i], NULL, NULL);
+ } else {
+ group->buffer.bytesused = gst_memory_get_sizes (group->mem[0], NULL, NULL);
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if (V4L2_TYPE_IS_OUTPUT(obj->type))
+ group->buffer.m.planes[0].bytesused = gst_memory_get_sizes (group->mem[0], NULL, NULL);
+#endif
+
+ if (obj->is_encode) {
+ if ((is_cuvid == true) && (obj->sei_payload != NULL)) {
+ gint ret;
+ struct v4l2_ext_control ctl;
+ struct v4l2_ext_controls ctrls;
+ ctl.id = V4L2_CID_MPEG_VIDEOENC_DS_SEI_DATA;
+ ctl.ptr = obj->sei_payload;
+ ctl.size = obj->sei_payload_size;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ret = obj->ioctl (obj->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ {
+ printf ("Passing DS SEI data in ext ctrl failed\n");
+ }
+ }
+ }
+
+ /* Ensure the memory will stay around and is RO */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_ref (group->mem[i]);
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_QBUF, &group->buffer) < 0) {
+ GST_ERROR_OBJECT (allocator, "failed queueing buffer %i: %s",
+ group->buffer.index, g_strerror (errno));
+
+ /* Release the memory, possibly making it RW again */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_unref (group->mem[i]);
+
+ ret = FALSE;
+ if (IS_QUEUED (group->buffer)) {
+ GST_DEBUG_OBJECT (allocator,
+ "driver pretends buffer is queued even if queue failed");
+ UNSET_QUEUED (group->buffer);
+ }
+ goto done;
+ }
+
+ GST_LOG_OBJECT (allocator, "queued buffer %i (flags 0x%X)",
+ group->buffer.index, group->buffer.flags);
+
+ if (!IS_QUEUED (group->buffer)) {
+ GST_DEBUG_OBJECT (allocator,
+ "driver pretends buffer is not queued even if queue succeeded");
+ SET_QUEUED (group->buffer);
+ }
+
+done:
+ return ret;
+}
+
+GstFlowReturn
+gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup ** group_out)
+{
+ GstV4l2Object *obj = allocator->obj;
+ struct v4l2_buffer buffer = { 0 };
+ struct v4l2_plane planes[VIDEO_MAX_PLANES] = { {0} };
+ gint i;
+
+ GstV4l2MemoryGroup *group = NULL;
+
+ g_return_val_if_fail (g_atomic_int_get (&allocator->active), GST_FLOW_ERROR);
+
+ buffer.type = obj->type;
+ buffer.memory = allocator->memory;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ buffer.length = obj->format.fmt.pix_mp.num_planes;
+ buffer.m.planes = planes;
+ }
+
+#ifndef USE_V4L2_TARGET_NV
+ if (obj->ioctl (obj->video_fd, VIDIOC_DQBUF, &buffer) < 0)
+ goto error;
+#else
+ /* TODO: This could a possible bug in library */
+ while (1)
+ {
+ if (v4l2_ioctl (obj->video_fd, VIDIOC_DQBUF, &buffer) == 0)
+ break;
+ else if (errno == EPIPE)
+ goto error;
+ }
+#endif
+
+ group = allocator->groups[buffer.index];
+
+ if (!IS_QUEUED (group->buffer)) {
+ GST_ERROR_OBJECT (allocator,
+ "buffer %i was not queued, this indicate a driver bug.", buffer.index);
+ return GST_FLOW_ERROR;
+ }
+
+ group->buffer = buffer;
+
+ GST_LOG_OBJECT (allocator, "dequeued buffer %i (flags 0x%X)", buffer.index,
+ buffer.flags);
+
+ if (IS_QUEUED (group->buffer)) {
+ GST_DEBUG_OBJECT (allocator,
+ "driver pretends buffer is queued even if dequeue succeeded");
+ UNSET_QUEUED (group->buffer);
+ }
+
+ /* TODO: Need to resolve below WAR */
+#ifdef USE_V4L2_TARGET_NV
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type) &&
+ ((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC) && (is_cuvid == FALSE)) ||
+ (!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT) && (is_cuvid == FALSE)) ||
+ (!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY) && (is_cuvid == TRUE)))) {
+ buffer.m.planes[0].bytesused = sizeof(NvBufSurface);
+ }
+#endif
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
+ group->buffer.m.planes = group->planes;
+ memcpy (group->planes, buffer.m.planes, sizeof (planes));
+ } else {
+ group->planes[0].bytesused = group->buffer.bytesused;
+ group->planes[0].length = group->buffer.length;
+ g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
+ memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
+ }
+
+ /* And update memory size */
+ if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ gst_v4l2_allocator_reset_size (allocator, group);
+ } else {
+ /* for capture, simply read the size */
+ for (i = 0; i < group->n_mem; i++) {
+ gsize size, offset;
+
+ GST_LOG_OBJECT (allocator,
+ "Dequeued capture buffer, length: %u bytesused: %u data_offset: %u",
+ group->planes[i].length, group->planes[i].bytesused,
+ group->planes[i].data_offset);
+
+ offset = group->planes[i].data_offset;
+
+ if (group->planes[i].bytesused > group->planes[i].data_offset) {
+ size = group->planes[i].bytesused - group->planes[i].data_offset;
+ } else {
+ GST_WARNING_OBJECT (allocator, "V4L2 provided buffer has bytesused %"
+ G_GUINT32_FORMAT " which is too small to include data_offset %"
+ G_GUINT32_FORMAT, group->planes[i].bytesused,
+ group->planes[i].data_offset);
+ size = group->planes[i].bytesused;
+ }
+
+ if (G_LIKELY (size + offset <= group->mem[i]->maxsize))
+ gst_memory_resize (group->mem[i], offset, size);
+ else {
+ GST_WARNING_OBJECT (allocator,
+ "v4l2 provided buffer that is too big for the memory it was "
+ "writing into. v4l2 claims %" G_GSIZE_FORMAT " bytes used but "
+ "memory is only %" G_GSIZE_FORMAT "B. This is probably a driver "
+ "bug.", size, group->mem[i]->maxsize);
+ gst_memory_resize (group->mem[i], 0, group->mem[i]->maxsize);
+ }
+ }
+ }
+
+ /* Release the memory, possibly making it RW again */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_unref (group->mem[i]);
+
+ *group_out = group;
+ return GST_FLOW_OK;
+
+error:
+ if (errno == EPIPE) {
+ GST_DEBUG_OBJECT (allocator, "broken pipe signals last buffer");
+ return GST_FLOW_EOS;
+ }
+
+ GST_ERROR_OBJECT (allocator, "failed dequeuing a %s buffer: %s",
+ memory_type_to_str (allocator->memory), g_strerror (errno));
+
+ switch (errno) {
+ case EAGAIN:
+ GST_WARNING_OBJECT (allocator,
+ "Non-blocking I/O has been selected using O_NONBLOCK and"
+ " no buffer was in the outgoing queue.");
+ break;
+ case EINVAL:
+ GST_ERROR_OBJECT (allocator,
+ "The buffer type is not supported, or the index is out of bounds, "
+ "or no buffers have been allocated yet, or the userptr "
+ "or length are invalid.");
+ break;
+ case ENOMEM:
+ GST_ERROR_OBJECT (allocator,
+ "insufficient memory to enqueue a user pointer buffer");
+ break;
+ case EIO:
+ GST_INFO_OBJECT (allocator,
+ "VIDIOC_DQBUF failed due to an internal error."
+ " Can also indicate temporary problems like signal loss."
+ " Note the driver might dequeue an (empty) buffer despite"
+ " returning an error, or even stop capturing.");
+ /* have we de-queued a buffer ? */
+ if (!IS_QUEUED (buffer)) {
+ GST_DEBUG_OBJECT (allocator, "reenqueueing buffer");
+ /* FIXME ... should we do something here? */
+ }
+ break;
+ case EINTR:
+ GST_WARNING_OBJECT (allocator, "could not sync on a buffer on device");
+ break;
+ default:
+ GST_WARNING_OBJECT (allocator,
+ "Grabbing frame got interrupted unexpectedly. %d: %s.", errno,
+ g_strerror (errno));
+ break;
+ }
+
+ return GST_FLOW_ERROR;
+}
+
+void
+gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ switch (allocator->memory) {
+ case V4L2_MEMORY_USERPTR:
+ gst_v4l2_allocator_clear_userptr (allocator, group);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ gst_v4l2_allocator_clear_dmabufin (allocator, group);
+ break;
+ case V4L2_MEMORY_MMAP:
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+
+ gst_v4l2_allocator_reset_size (allocator, group);
+}
+
+#ifdef USE_V4L2_TARGET_NV
+void
+gst_v4l2_allocator_enable_dynamic_allocation (GstV4l2Allocator * allocator,
+ gboolean enable_dynamic_allocation)
+{
+ GST_DEBUG_OBJECT (allocator, "dynamic allocation enable %d", enable_dynamic_allocation);
+
+ GST_OBJECT_LOCK (allocator);
+ allocator->enable_dynamic_allocation = enable_dynamic_allocation;
+ GST_OBJECT_UNLOCK (allocator);
+}
+#endif
diff --git a/gst-v4l2/gstv4l2allocator.h b/gst-v4l2/gstv4l2allocator.h
new file mode 100644
index 0000000..ff6c9b4
--- /dev/null
+++ b/gst-v4l2/gstv4l2allocator.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+#ifndef __GST_V4L2_ALLOCATOR_H__
+#define __GST_V4L2_ALLOCATOR_H__
+
+#include "linux/videodev2.h"
+#include
+#include
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_V4L2_ALLOCATOR (gst_v4l2_allocator_get_type())
+#define GST_IS_V4L2_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_V4L2_ALLOCATOR))
+#define GST_IS_V4L2_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_V4L2_ALLOCATOR))
+#define GST_V4L2_ALLOCATOR_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), GST_TYPE_V4L2_ALLOCATOR, GstV4l2AllocatorClass))
+#define GST_V4L2_ALLOCATOR(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_V4L2_ALLOCATOR, GstV4l2Allocator))
+#define GST_V4L2_ALLOCATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), GST_TYPE_V4L2_ALLOCATOR, GstV4l2AllocatorClass))
+#define GST_V4L2_ALLOCATOR_CAST(obj) ((GstV4l2Allocator *)(obj))
+
+#define GST_V4L2_ALLOCATOR_CAN_REQUEST(obj,type) \
+ (GST_OBJECT_FLAG_IS_SET (obj, GST_V4L2_ALLOCATOR_FLAG_ ## type ## _REQBUFS))
+#define GST_V4L2_ALLOCATOR_CAN_ALLOCATE(obj,type) \
+ (GST_OBJECT_FLAG_IS_SET (obj, GST_V4L2_ALLOCATOR_FLAG_ ## type ## _CREATE_BUFS))
+
+#define GST_V4L2_MEMORY_QUARK gst_v4l2_memory_quark ()
+
+/* The structures are renamed as the name conflicts with the
+ * OSS v4l2 library structures. */
+#ifdef USE_V4L2_TARGET_NV
+#define GstV4l2Allocator GstNvV4l2Allocator
+#define GstV4l2AllocatorClass GstNvV4l2AllocatorClass
+#endif
+
+#ifdef USE_V4L2_TARGET_NV
+#define NV_VIDEO_MAX_FRAME 64
+#endif
+
+typedef struct _GstV4l2Allocator GstV4l2Allocator;
+typedef struct _GstV4l2AllocatorClass GstV4l2AllocatorClass;
+typedef struct _GstV4l2MemoryGroup GstV4l2MemoryGroup;
+typedef struct _GstV4l2Memory GstV4l2Memory;
+typedef enum _GstV4l2Capabilities GstV4l2Capabilities;
+typedef enum _GstV4l2Return GstV4l2Return;
+typedef struct _GstV4l2Object GstV4l2Object;
+
+enum _GstV4l2AllocatorFlags
+{
+ GST_V4L2_ALLOCATOR_FLAG_MMAP_REQBUFS = (GST_ALLOCATOR_FLAG_LAST << 0),
+ GST_V4L2_ALLOCATOR_FLAG_MMAP_CREATE_BUFS = (GST_ALLOCATOR_FLAG_LAST << 1),
+ GST_V4L2_ALLOCATOR_FLAG_USERPTR_REQBUFS = (GST_ALLOCATOR_FLAG_LAST << 2),
+ GST_V4L2_ALLOCATOR_FLAG_USERPTR_CREATE_BUFS = (GST_ALLOCATOR_FLAG_LAST << 3),
+ GST_V4L2_ALLOCATOR_FLAG_DMABUF_REQBUFS = (GST_ALLOCATOR_FLAG_LAST << 4),
+ GST_V4L2_ALLOCATOR_FLAG_DMABUF_CREATE_BUFS = (GST_ALLOCATOR_FLAG_LAST << 5),
+};
+
+enum _GstV4l2Return
+{
+ GST_V4L2_OK = 0,
+ GST_V4L2_ERROR = -1,
+ GST_V4L2_BUSY = -2
+};
+
+struct _GstV4l2Memory
+{
+ GstMemory mem;
+ gint plane;
+ GstV4l2MemoryGroup *group;
+ gpointer data;
+ gint dmafd;
+};
+
+struct _GstV4l2MemoryGroup
+{
+ gint n_mem;
+ GstMemory * mem[VIDEO_MAX_PLANES];
+ gint mems_allocated;
+ struct v4l2_buffer buffer;
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+};
+
+struct _GstV4l2Allocator
+{
+ GstAllocator parent;
+ GstV4l2Object *obj;
+ guint32 count;
+ guint32 memory;
+ gboolean can_allocate;
+ gboolean active;
+
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2MemoryGroup * groups[NV_VIDEO_MAX_FRAME];
+#else
+ GstV4l2MemoryGroup * groups[VIDEO_MAX_FRAME];
+#endif
+ GstAtomicQueue *free_queue;
+ GstAtomicQueue *pending_queue;
+
+#ifdef USE_V4L2_TARGET_NV
+ gboolean enable_dynamic_allocation; /* If dynamic_allocation should be set */
+#endif
+};
+
+struct _GstV4l2AllocatorClass {
+ GstAllocatorClass parent_class;
+};
+
+GType gst_v4l2_allocator_get_type(void);
+
+gboolean gst_is_v4l2_memory (GstMemory * mem);
+
+GQuark gst_v4l2_memory_quark (void);
+
+gboolean gst_v4l2_allocator_is_active (GstV4l2Allocator * allocator);
+
+guint gst_v4l2_allocator_get_size (GstV4l2Allocator * allocator);
+
+GstV4l2Allocator* gst_v4l2_allocator_new (GstObject *parent, GstV4l2Object * obj);
+
+guint gst_v4l2_allocator_start (GstV4l2Allocator * allocator,
+ guint32 count, guint32 memory);
+
+GstV4l2Return gst_v4l2_allocator_stop (GstV4l2Allocator * allocator);
+
+GstV4l2MemoryGroup* gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator);
+
+GstV4l2MemoryGroup* gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
+ GstAllocator * dmabuf_allocator);
+
+GstV4l2MemoryGroup * gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator);
+
+GstV4l2MemoryGroup * gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator);
+
+gboolean gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup *group,
+ gint n_mem, GstMemory ** dma_mem);
+
+gboolean gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup *group,
+ gsize img_size, int n_planes,
+ gpointer * data, gsize * size);
+
+void gst_v4l2_allocator_flush (GstV4l2Allocator * allocator);
+
+gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group);
+
+GstFlowReturn gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup ** group);
+
+void gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group);
+#ifdef USE_V4L2_TARGET_NV
+void
+gst_v4l2_allocator_enable_dynamic_allocation (GstV4l2Allocator * allocator,
+ gboolean enable_dynamic_allocation);
+#endif
+
+G_END_DECLS
+
+#endif /* __GST_V4L2_ALLOCATOR_H__ */
diff --git a/gst-v4l2/gstv4l2av1enc.c b/gst-v4l2/gstv4l2av1enc.c
new file mode 100644
index 0000000..8fc2ec9
--- /dev/null
+++ b/gst-v4l2/gstv4l2av1enc.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "gstv4l2object.h"
+#include "gstv4l2av1enc.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_av1_enc_debug);
+#define GST_CAT_DEFAULT gst_v4l2_av1_enc_debug
+
+static GstStaticCaps src_template_caps =
+GST_STATIC_CAPS ("video/x-av1");
+
+/* prototypes */
+gboolean gst_v4l2_av1_enc_tile_configuration (GstV4l2Object * v4l2object,
+ gboolean enable_tile, guint32 log2_tile_rows, guint32 log2_tile_cols);
+static gboolean gst_v4l2_video_enc_parse_tile_configuration (GstV4l2Av1Enc * self,
+ const gchar * arr);
+gboolean set_v4l2_av1_encoder_properties (GstVideoEncoder * encoder);
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+ PROP_ENABLE_HEADER,
+ PROP_ENABLE_TILE_CONFIG,
+ PROP_DISABLE_CDF,
+ PROP_ENABLE_SSIMRDO,
+ PROP_NUM_REFERENCE_FRAMES,
+};
+
+#define DEFAULT_NUM_REFERENCE_FRAMES 0
+#define MAX_NUM_REFERENCE_FRAMES 4
+
+#define gst_v4l2_av1_enc_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2Av1Enc, gst_v4l2_av1_enc, GST_TYPE_V4L2_VIDEO_ENC);
+
+static void
+gst_v4l2_av1_enc_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstV4l2Av1Enc *self = GST_V4L2_AV1_ENC (object);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_ENABLE_HEADER:
+ self->EnableHeaders = g_value_get_boolean (value);
+ video_enc->v4l2capture->Enable_headers = g_value_get_boolean (value);
+ break;
+ case PROP_ENABLE_TILE_CONFIG:
+ gst_v4l2_video_enc_parse_tile_configuration (self,
+ g_value_get_string (value));
+ self->EnableTileConfig = TRUE;
+ break;
+ case PROP_DISABLE_CDF:
+ self->DisableCDFUpdate = g_value_get_boolean (value);
+ break;
+ case PROP_ENABLE_SSIMRDO:
+ self->EnableSsimRdo = g_value_get_boolean (value);
+ break;
+ case PROP_NUM_REFERENCE_FRAMES:
+ self->nRefFrames = g_value_get_uint (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_v4l2_av1_enc_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstV4l2Av1Enc *self = GST_V4L2_AV1_ENC (object);
+
+ switch (prop_id) {
+ case PROP_ENABLE_HEADER:
+ g_value_set_boolean (value, self->EnableHeaders);
+ break;
+ case PROP_ENABLE_TILE_CONFIG:
+ break;
+ case PROP_DISABLE_CDF:
+ g_value_set_boolean (value, self->DisableCDFUpdate);
+ break;
+ case PROP_ENABLE_SSIMRDO:
+ g_value_set_boolean (value, self->EnableSsimRdo);
+ break;
+ case PROP_NUM_REFERENCE_FRAMES:
+ g_value_set_uint (value, self->nRefFrames);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static gint
+v4l2_profile_from_string (const gchar * profile)
+{
+ gint v4l2_profile = -1;
+
+ if (g_str_equal (profile, "0"))
+ v4l2_profile = 0;
+ else if (g_str_equal (profile, "1"))
+ v4l2_profile = 1;
+ else if (g_str_equal (profile, "2"))
+ v4l2_profile = 2;
+ else if (g_str_equal (profile, "3"))
+ v4l2_profile = 3;
+ else
+ GST_WARNING ("Unsupported profile string '%s'", profile);
+
+ return v4l2_profile;
+}
+
+static const gchar *
+v4l2_profile_to_string (gint v4l2_profile)
+{
+ switch (v4l2_profile) {
+ case 0:
+ return "0";
+ case 1:
+ return "1";
+ case 2:
+ return "2";
+ case 3:
+ return "3";
+ default:
+ GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
+ break;
+ }
+
+ return NULL;
+}
+
+static gboolean
+gst_v4l2_video_enc_parse_tile_configuration (GstV4l2Av1Enc * self,
+ const gchar * arr)
+{
+ gchar *str;
+ self->Log2TileRows = atoi (arr);
+ str = g_strstr_len (arr, -1, ",");
+ self->Log2TileCols = atoi (str + 1);
+ return TRUE;
+}
+
+gboolean
+gst_v4l2_av1_enc_tile_configuration (GstV4l2Object * v4l2object,
+ gboolean enable_tile, guint32 log2_tile_rows, guint32 log2_tile_cols)
+{
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+
+ v4l2_enc_av1_tile_config param =
+ {enable_tile, log2_tile_rows, log2_tile_cols};
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = V4L2_CID_MPEG_VIDEOENC_AV1_TILE_CONFIGURATION;
+ control.string = (gchar *) ¶m;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting tile configuration\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+gboolean
+set_v4l2_av1_encoder_properties (GstVideoEncoder * encoder)
+{
+ GstV4l2Av1Enc *self = GST_V4L2_AV1_ENC (encoder);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (encoder);
+
+ if (!GST_V4L2_IS_OPEN (video_enc->v4l2output)) {
+ g_print ("V4L2 device is not open\n");
+ return FALSE;
+ }
+
+ if (self->EnableTileConfig) {
+ if (!gst_v4l2_av1_enc_tile_configuration (video_enc->v4l2output,
+ self->EnableTileConfig, self->Log2TileRows, self->Log2TileCols)) {
+ g_print ("S_EXT_CTRLS for Tile Configuration failed\n");
+ return FALSE;
+ }
+ }
+
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_AV1_DISABLE_CDF_UPDATE, self->DisableCDFUpdate)) {
+ g_print ("S_EXT_CTRLS for DisableCDF Update failed\n");
+ return FALSE;
+ }
+
+ if (self->EnableSsimRdo) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_AV1_ENABLE_SSIMRDO, self->EnableSsimRdo)) {
+ g_print ("S_EXT_CTRLS for SSIM RDO failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->nRefFrames) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES,
+ self->nRefFrames)) {
+ g_print ("S_EXT_CTRLS for NUM_REFERENCE_FRAMES failed\n");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+static void
+gst_v4l2_av1_enc_init (GstV4l2Av1Enc * self)
+{
+ self->EnableTileConfig = FALSE;
+ self->DisableCDFUpdate = TRUE;
+ self->EnableSsimRdo = FALSE;
+ self->Log2TileRows= 0;
+ self->Log2TileCols= 0;
+}
+
+static void
+gst_v4l2_av1_enc_class_init (GstV4l2Av1EncClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstV4l2VideoEncClass *baseclass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ baseclass = (GstV4l2VideoEncClass *) (klass);
+
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_av1_enc_debug, "v4l2av1enc", 0,
+ "V4L2 AV1 Encoder");
+
+ gst_element_class_set_static_metadata (element_class,
+ "V4L2 AV1 Encoder",
+ "Codec/Encoder/Video",
+ "Encode AV1 video streams via V4L2 API",
+ "Anuma Rathore ");
+
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_av1_enc_set_property);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_av1_enc_get_property);
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_HEADER,
+ g_param_spec_boolean ("enable-headers", "Enable AV1 headers",
+ "Enable AV1 file and frame headers, if enabled, dump elementary stream",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+ g_object_class_install_property (gobject_class, PROP_ENABLE_TILE_CONFIG,
+ g_param_spec_string ("tiles", "AV1 Log2 Tile Configuration",
+ "Use string with values of Tile Configuration"
+ "in Log2Rows:Log2Cols. Eg: \"1,0\"",
+ "0,0", G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+ g_object_class_install_property (gobject_class, PROP_DISABLE_CDF,
+ g_param_spec_boolean ("disable-cdf", "Disable CDF Update",
+ "Flag to control Disable CDF Update, enabled by default",
+ TRUE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+ g_object_class_install_property (gobject_class, PROP_ENABLE_SSIMRDO,
+ g_param_spec_boolean ("enable-srdo", "Enable SSIM RDO",
+ "Enable SSIM RDO",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+ g_object_class_install_property (gobject_class, PROP_NUM_REFERENCE_FRAMES,
+ g_param_spec_uint ("num-Ref-Frames",
+ "Sets the number of reference frames for encoder",
+ "Number of Reference Frames for encoder, default set by encoder",
+ 0, MAX_NUM_REFERENCE_FRAMES, DEFAULT_NUM_REFERENCE_FRAMES,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ baseclass->codec_name = "AV1";
+ baseclass->profile_cid = 0; /* Only single profile supported */
+ baseclass->profile_to_string = v4l2_profile_to_string;
+ baseclass->profile_from_string = v4l2_profile_from_string;
+ baseclass->set_encoder_properties = set_v4l2_av1_encoder_properties;
+}
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_av1_enc (GstCaps * sink_caps, GstCaps * src_caps)
+{
+ return gst_v4l2_is_video_enc (sink_caps, src_caps,
+ gst_static_caps_get (&src_template_caps));
+}
+
+void
+gst_v4l2_av1_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gst_v4l2_video_enc_register (plugin, GST_TYPE_V4L2_AV1_ENC,
+ "av1", basename, device_path, sink_caps,
+ gst_static_caps_get (&src_template_caps), src_caps);
+}
diff --git a/gst-v4l2/gstv4l2av1enc.h b/gst-v4l2/gstv4l2av1enc.h
new file mode 100644
index 0000000..5333375
--- /dev/null
+++ b/gst-v4l2/gstv4l2av1enc.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_AV1_ENC_H__
+#define __GST_V4L2_AV1_ENC_H__
+
+#include
+#include "gstv4l2videoenc.h"
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_AV1_ENC \
+ (gst_v4l2_av1_enc_get_type())
+#define GST_V4L2_AV1_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_AV1_ENC,GstV4l2Av1Enc))
+#define GST_V4L2_AV1_ENC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_AV1_ENC,GstV4l2Av1EncClass))
+#define GST_IS_V4L2_AV1_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_AV1_ENC))
+#define GST_IS_V4L2_AV1_ENC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_AV1_ENC))
+typedef struct _GstV4l2Av1Enc GstV4l2Av1Enc;
+typedef struct _GstV4l2Av1EncClass GstV4l2Av1EncClass;
+
+struct _GstV4l2Av1Enc
+{
+ GstV4l2VideoEnc parent;
+ gboolean EnableHeaders;
+ gboolean EnableTileConfig;
+ gboolean DisableCDFUpdate;
+ gboolean EnableSsimRdo;
+ guint32 Log2TileRows;
+ guint32 Log2TileCols;
+ guint32 nRefFrames;
+};
+
+struct _GstV4l2Av1EncClass
+{
+ GstV4l2VideoEncClass parent_class;
+};
+
+GType gst_v4l2_av1_enc_get_type (void);
+
+gboolean gst_v4l2_is_av1_enc (GstCaps * sink_caps, GstCaps * src_caps);
+
+void gst_v4l2_av1_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);
+
+G_END_DECLS
+#endif /* __GST_V4L2_AV1_ENC_H__ */
diff --git a/gst-v4l2/gstv4l2bufferpool.c b/gst-v4l2/gstv4l2bufferpool.c
new file mode 100644
index 0000000..35cbb91
--- /dev/null
+++ b/gst-v4l2/gstv4l2bufferpool.c
@@ -0,0 +1,2565 @@
+/* GStreamer
+ *
+ * Copyright (C) 2001-2002 Ronald Bultje
+ * 2006 Edgard Lima
+ * 2009 Texas Instruments, Inc - http://www.ti.com/
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * gstv4l2bufferpool.c V4L2 buffer pool class
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include
+#endif
+
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE /* O_CLOEXEC */
+#endif
+#include
+
+#include
+#include
+#include
+#include "gst/video/video.h"
+#include "gst/video/gstvideometa.h"
+#include "gst/video/gstvideopool.h"
+#include "gst/allocators/gstdmabuf.h"
+
+#include
+
+#include "gstv4l2object.h"
+#include "nvbufsurftransform.h"
+#include "gst/gst-i18n-plugin.h"
+#include
+#ifdef USE_V4L2_TARGET_NV
+#include
+#endif
+
+GST_DEBUG_CATEGORY_STATIC (v4l2bufferpool_debug);
+GST_DEBUG_CATEGORY_STATIC (CAT_PERFORMANCE);
+#define GST_CAT_DEFAULT v4l2bufferpool_debug
+
+#define GST_V4L2_IMPORT_QUARK gst_v4l2_buffer_pool_import_quark ()
+
+
+/*
+ * GstV4l2BufferPool:
+ */
+#define gst_v4l2_buffer_pool_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2BufferPool, gst_v4l2_buffer_pool, GST_TYPE_BUFFER_POOL);
+
+enum _GstV4l2BufferPoolAcquireFlags
+{
+ GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT =
+ GST_BUFFER_POOL_ACQUIRE_FLAG_LAST,
+ GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_LAST
+};
+
+static void gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool,
+ GstBuffer * buffer);
+#ifdef USE_V4L2_TARGET_NV
+#define VPx_FRAME_HEADER_SIZE 12
+static void
+report_metadata (GstV4l2Object * obj, guint32 buffer_index,
+ v4l2_ctrl_videodec_outputbuf_metadata * metadata);
+static void
+v4l2_video_dec_get_enable_frame_type_reporting (GstV4l2Object * obj,
+ guint32 buffer_index, v4l2_ctrl_videodec_outputbuf_metadata * dec_metadata);
+#endif
+
+static gboolean
+#ifdef USE_V4L2_TARGET_NV
+gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group, gboolean is_encode)
+#else
+gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group)
+#endif
+{
+ GstMemory *mem = gst_buffer_peek_memory (buffer, 0);
+ gboolean valid = FALSE;
+
+ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY))
+ goto done;
+
+ if (gst_is_dmabuf_memory (mem))
+ mem = gst_mini_object_get_qdata (GST_MINI_OBJECT (mem),
+ GST_V4L2_MEMORY_QUARK);
+
+ if (mem && gst_is_v4l2_memory (mem)) {
+ GstV4l2Memory *vmem = (GstV4l2Memory *) mem;
+ GstV4l2MemoryGroup *group = vmem->group;
+ gint i, n_mem;
+
+#ifndef USE_V4L2_TARGET_NV
+ n_mem = group->n_mem;
+#else
+ /* We appended just one memory in the alloc_buffer function. */
+ if (is_encode)
+ n_mem = group->n_mem;
+ else
+ n_mem = 1;
+#endif
+
+ if (n_mem != (gint)gst_buffer_n_memory (buffer))
+ goto done;
+
+ for (i = 0; i < n_mem; i++) {
+ if (group->mem[i] != gst_buffer_peek_memory (buffer, i))
+ goto done;
+
+ if (!gst_memory_is_writable (group->mem[i]))
+ goto done;
+ }
+
+ valid = TRUE;
+ if (out_group)
+ *out_group = group;
+ }
+
+done:
+ return valid;
+}
+
+static NvBufSurfTransform_Error CopySurfTransform(NvBufSurface* src, NvBufSurface* dest)
+{
+ NvBufSurfTransform_Error status;
+ NvBufSurfTransformParams transformParams;
+ NvBufSurfTransformRect srcRect;
+ NvBufSurfTransformRect destRect;
+ status = NvBufSurfTransformSetDefaultSession();
+ if (status != NvBufSurfTransformError_Success)
+ {
+ return status;
+ }
+ srcRect.top = srcRect.left = 0;
+ destRect.top = destRect.left = 0;
+ srcRect.width = src->surfaceList[0].width;
+ srcRect.height = src->surfaceList[0].height;
+ destRect.width = dest->surfaceList[0].width;
+ destRect.height = dest->surfaceList[0].height;
+ transformParams.src_rect = &srcRect;
+ transformParams.dst_rect = &destRect;
+ transformParams.transform_flag = NVBUFSURF_TRANSFORM_FILTER;
+ transformParams.transform_flip = NvBufSurfTransform_None;
+ transformParams.transform_filter = NvBufSurfTransformInter_Nearest;
+ status = NvBufSurfTransform(src, dest, &transformParams);
+ return status;
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest,
+ GstBuffer * src)
+{
+ const GstVideoFormatInfo *finfo = pool->caps_info.finfo;
+
+ GST_LOG_OBJECT (pool, "copying buffer");
+
+#ifdef USE_V4L2_TARGET_NV
+ gboolean ret;
+ gint retn = 0;
+#endif
+
+ if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
+ finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
+#ifndef USE_V4L2_TARGET_NV
+ GstVideoFrame src_frame, dest_frame;
+
+ GST_DEBUG_OBJECT (pool, "copy video frame");
+
+ /* we have raw video, use videoframe copy to get strides right */
+ if (!gst_video_frame_map (&src_frame, &pool->caps_info, src, GST_MAP_READ))
+ goto invalid_buffer;
+
+ if (!gst_video_frame_map (&dest_frame, &pool->caps_info, dest,
+ GST_MAP_WRITE)) {
+ gst_video_frame_unmap (&src_frame);
+ goto invalid_buffer;
+ }
+
+ gst_video_frame_copy (&dest_frame, &src_frame);
+
+ gst_video_frame_unmap (&src_frame);
+ gst_video_frame_unmap (&dest_frame);
+#endif
+ } else {
+ GstMapInfo map;
+
+ GST_DEBUG_OBJECT (pool, "copy raw bytes");
+
+ if (!gst_buffer_map (src, &map, GST_MAP_READ))
+ goto invalid_buffer;
+
+ gst_buffer_fill (dest, 0, map.data, gst_buffer_get_size (src));
+
+ gst_buffer_unmap (src, &map);
+ gst_buffer_resize (dest, 0, gst_buffer_get_size (src));
+ }
+
+#ifndef USE_V4L2_TARGET_NV
+ gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+#else
+ if (((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVDEC)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) && (is_cuvid == TRUE)) ||
+ ((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) && (!V4L2_TYPE_IS_OUTPUT (pool->obj->type))))
+ {
+ ret = gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+ if (ret == FALSE)
+ GST_ERROR_OBJECT (src,"Copy Failed");
+ }
+
+ if ((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) && !V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ {
+ GstMapInfo outmap = { NULL, (GstMapFlags) 0, NULL, 0, 0, };
+ void *sBaseAddr = NULL;
+ GstV4l2Memory *outmemory = NULL;
+ outmemory = (GstV4l2Memory *)gst_buffer_peek_memory (src, 0);
+ NvBufSurface *nvbuf_surf = NULL;
+ gboolean already_mapped = 0;
+
+ retn = NvBufSurfaceFromFd(outmemory->dmafd, (void**)(&nvbuf_surf));
+ if (retn != 0) {
+ GST_ERROR_OBJECT (src,"NvBufSurfaceFromFd Failed for fd = %d", outmemory->dmafd);
+ return FALSE;
+ }
+
+ if (!nvbuf_surf->surfaceList[0].mappedAddr.addr[0])
+ retn = NvBufSurfaceMap(nvbuf_surf, 0, 0, NVBUF_MAP_READ_WRITE);
+ else
+ //Dont do unmapping
+ already_mapped = TRUE;
+ if (retn != 0) {
+ GST_ERROR_OBJECT (src,"NvBufSurfaceMap Failed for fd = %d", outmemory->dmafd);
+ return FALSE;
+ }
+ sBaseAddr = (void*)nvbuf_surf->surfaceList[0].mappedAddr.addr[0];
+ if (!gst_buffer_map (dest, &outmap, GST_MAP_WRITE))
+ goto invalid_buffer;
+
+ memcpy (outmap.data, sBaseAddr, gst_buffer_get_size (src));
+ //Unmap only if we have mapped it
+ if (already_mapped == FALSE)
+ retn = NvBufSurfaceUnMap(nvbuf_surf, 0, 0);
+ if (retn != 0) {
+ GST_ERROR_OBJECT (src,"NvBufSurfaceUnMap Failed for fd = %d", outmemory->dmafd);
+ gst_buffer_unmap (dest, &outmap);
+ return FALSE;
+ }
+
+ gst_buffer_unmap (dest, &outmap);
+ }
+
+ if ((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT))
+ && V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ {
+ GstV4l2Memory *inmemory = NULL;
+ GstMapInfo inmap = { NULL, (GstMapFlags) 0, NULL, 0, 0, };
+ if (is_cuvid == FALSE) {
+#ifndef USE_V4L2_TARGET_NV_X86
+ NvBufSurfTransformParams transform_params;
+
+ memset(&transform_params, 0, sizeof(NvBufSurfTransformParams));
+
+ if (!gst_buffer_map (src, &inmap, GST_MAP_READ))
+ goto invalid_buffer;
+
+ NvBufSurface *src_buf = (NvBufSurface *)inmap.data;
+
+ inmemory = (GstV4l2Memory *)gst_buffer_peek_memory (dest, 0);
+
+ NvBufSurface *nvbuf_surf = 0;
+ NvBufSurfaceFromFd(inmemory->dmafd, (void**)(&nvbuf_surf));
+ retn = NvBufSurfTransform (src_buf, nvbuf_surf, &transform_params);
+ if (retn != 0) {
+ GST_ERROR_OBJECT(src, "NvBufSurfTransform Failed");
+ gst_buffer_unmap(src, &inmap);
+ return GST_FLOW_ERROR;
+ }
+
+ gst_buffer_unmap(src, &inmap);
+#endif
+ }
+ if (is_cuvid == TRUE){
+ NvBufSurface *src_bufsurf = NULL;
+ NvBufSurface *dst_bufsurf = NULL;
+
+ if (!gst_buffer_map (src, &inmap, GST_MAP_READ))
+ goto invalid_buffer;
+ src_bufsurf = (NvBufSurface*)inmap.data;
+
+ inmemory = (GstV4l2Memory *)gst_buffer_peek_memory (dest, 0);
+
+ retn = NvBufSurfaceFromFd(inmemory->dmafd, (void**)(&dst_bufsurf));
+ if (retn != 0) {
+ GST_ERROR_OBJECT(src, "NvBufSurfaceFromFd Failed");
+ gst_buffer_unmap(src, &inmap);
+ return GST_FLOW_ERROR;
+ }
+
+ if (CopySurfTransform(src_bufsurf, dst_bufsurf) != NvBufSurfTransformError_Success)
+ {
+ GST_ERROR_OBJECT(src, "ERROR in BufSurfacecopy \n");
+ gst_buffer_unmap(src, &inmap);
+ return GST_FLOW_ERROR;
+ }
+ gst_buffer_unmap(src, &inmap);
+ }
+ }
+#endif
+
+ GST_CAT_LOG_OBJECT (CAT_PERFORMANCE, pool, "slow copy into buffer %p", dest);
+
+ return GST_FLOW_OK;
+
+invalid_buffer:
+ {
+ GST_ERROR_OBJECT (pool, "could not map buffer");
+ return GST_FLOW_ERROR;
+ }
+}
+
+struct UserPtrData
+{
+ GstBuffer *buffer;
+ gboolean is_frame;
+ GstVideoFrame frame;
+ GstMapInfo map;
+};
+
+static GQuark
+gst_v4l2_buffer_pool_import_quark (void)
+{
+ static GQuark quark = 0;
+
+ if (quark == 0)
+ quark = g_quark_from_string ("GstV4l2BufferPoolUsePtrData");
+
+ return quark;
+}
+
+static void
+_unmap_userptr_frame (struct UserPtrData *data)
+{
+ if (data->is_frame)
+ gst_video_frame_unmap (&data->frame);
+ else
+ gst_buffer_unmap (data->buffer, &data->map);
+
+ if (data->buffer)
+ gst_buffer_unref (data->buffer);
+
+ g_slice_free (struct UserPtrData, data);
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_import_userptr (GstV4l2BufferPool * pool,
+ GstBuffer * dest, GstBuffer * src)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstV4l2MemoryGroup *group = NULL;
+ GstMapFlags flags;
+ const GstVideoFormatInfo *finfo = pool->caps_info.finfo;
+ struct UserPtrData *data = NULL;
+
+ GST_LOG_OBJECT (pool, "importing userptr");
+
+ /* get the group */
+#ifdef USE_V4L2_TARGET_NV
+ if (!gst_v4l2_is_buffer_valid (dest, &group, pool->obj->is_encode))
+ goto not_our_buffer;
+#else
+ if (!gst_v4l2_is_buffer_valid (dest, &group))
+ goto not_our_buffer;
+#endif
+
+ if (V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ flags = GST_MAP_READ;
+ else
+ flags = GST_MAP_WRITE;
+
+ data = g_slice_new0 (struct UserPtrData);
+
+ if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
+ finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
+ gsize size[GST_VIDEO_MAX_PLANES] = { 0, };
+#ifndef USE_V4L2_TARGET_NV
+ gint i;
+#else
+ guint i;
+#endif
+
+ data->is_frame = TRUE;
+
+ if (!gst_video_frame_map (&data->frame, &pool->caps_info, src, flags))
+ goto invalid_buffer;
+
+ for (i = 0; i < GST_VIDEO_FORMAT_INFO_N_PLANES (finfo); i++) {
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
+ gint tinfo = GST_VIDEO_FRAME_PLANE_STRIDE (&data->frame, i);
+ gint pstride;
+ guint pheight;
+
+ pstride = GST_VIDEO_TILE_X_TILES (tinfo) <<
+ GST_VIDEO_FORMAT_INFO_TILE_WS (finfo);
+
+ pheight = GST_VIDEO_TILE_Y_TILES (tinfo) <<
+ GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
+
+ size[i] = pstride * pheight;
+ } else {
+ size[i] = GST_VIDEO_FRAME_PLANE_STRIDE (&data->frame, i) *
+ GST_VIDEO_FRAME_COMP_HEIGHT (&data->frame, i);
+ }
+ }
+
+ /* In the single planar API, planes must be contiguous in memory and
+ * therefore they must have expected size. ie: no padding.
+ * To check these conditions, we check that plane 'i' start address
+ * + plane 'i' size equals to plane 'i+1' start address */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (pool->obj->type)) {
+ for (i = 0; i < (GST_VIDEO_FORMAT_INFO_N_PLANES (finfo) - 1); i++) {
+ const struct v4l2_pix_format *pix_fmt = &pool->obj->format.fmt.pix;
+ gpointer tmp;
+ gint estride = gst_v4l2_object_extrapolate_stride (finfo, i,
+ pix_fmt->bytesperline);
+ guint eheight = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (finfo, i,
+ pix_fmt->height);
+
+ tmp = ((guint8 *) data->frame.data[i]) + estride * eheight;
+ if (tmp != data->frame.data[i + 1])
+ goto non_contiguous_mem;
+ }
+ }
+
+ if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
+ data->frame.info.size, finfo->n_planes, data->frame.data, size))
+ goto import_failed;
+ } else {
+ gpointer ptr[1];
+ gsize size[1];
+
+ data->is_frame = FALSE;
+
+ if (!gst_buffer_map (src, &data->map, flags))
+ goto invalid_buffer;
+
+ ptr[0] = data->map.data;
+ size[0] = data->map.size;
+
+ if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
+ data->map.size, 1, ptr, size))
+ goto import_failed;
+ }
+
+ data->buffer = gst_buffer_ref (src);
+
+ gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
+ data, (GDestroyNotify) _unmap_userptr_frame);
+
+ gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ return ret;
+
+not_our_buffer:
+ {
+ GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool");
+ return GST_FLOW_ERROR;
+ }
+invalid_buffer:
+ {
+ GST_ERROR_OBJECT (pool, "could not map buffer");
+ g_slice_free (struct UserPtrData, data);
+ return GST_FLOW_ERROR;
+ }
+non_contiguous_mem:
+ {
+ GST_ERROR_OBJECT (pool, "memory is not contiguous or plane size mismatch");
+ _unmap_userptr_frame (data);
+ return GST_FLOW_ERROR;
+ }
+import_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to import data");
+ _unmap_userptr_frame (data);
+ return GST_FLOW_ERROR;
+ }
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_import_dmabuf (GstV4l2BufferPool * pool,
+ GstBuffer * dest, GstBuffer * src)
+{
+ GstV4l2MemoryGroup *group = NULL;
+ guint n_mem = gst_buffer_n_memory (src);
+#ifndef USE_V4L2_TARGET_NV
+ gint i;
+ GstMemory *dma_mem[GST_VIDEO_MAX_PLANES] = { 0 };
+#else
+ guint i;
+#endif
+
+ GST_LOG_OBJECT (pool, "importing dmabuf");
+
+#ifdef USE_V4L2_TARGET_NV
+ if (!gst_v4l2_is_buffer_valid (dest, &group, pool->obj->is_encode))
+ goto not_our_buffer;
+#else
+ if (!gst_v4l2_is_buffer_valid (dest, &group))
+ goto not_our_buffer;
+#endif
+
+ if (n_mem > GST_VIDEO_MAX_PLANES)
+ goto too_many_mems;
+
+#ifndef USE_V4L2_TARGET_NV
+ for (i = 0; i < n_mem; i++)
+ dma_mem[i] = gst_buffer_peek_memory (src, i);
+
+ if (!gst_v4l2_allocator_import_dmabuf (pool->vallocator, group, n_mem,
+ dma_mem))
+ goto import_failed;
+#else
+ g_return_val_if_fail (pool->vallocator->memory == V4L2_MEMORY_DMABUF, FALSE);
+
+ if ((!strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (pool->obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT))
+ && V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ {
+ gint dmafd = -1;
+ GstV4l2Memory *mem = NULL;
+ GstMemory *inmemory = NULL;
+ GstMapInfo inmap = { NULL, (GstMapFlags) 0, NULL, 0, 0, };
+
+ if (!gst_buffer_map (src, &inmap, GST_MAP_READ))
+ {
+ GST_ERROR_OBJECT (pool, "could not map input buffer");
+ return GST_FLOW_ERROR;
+ }
+
+ NvBufSurface *src_bufsurf = (NvBufSurface*)inmap.data;
+ if ((is_cuvid == FALSE) && ((src_bufsurf->memType == NVBUF_MEM_CUDA_PINNED) ||
+ (src_bufsurf->memType == NVBUF_MEM_CUDA_DEVICE) ||
+ (src_bufsurf->memType == NVBUF_MEM_CUDA_UNIFIED)))
+ {
+ GST_ERROR_OBJECT (pool, "Input CUDA Memory not supported on Jeston for output-io-mode=dmabuf-import,"
+ "element = %s", GST_ELEMENT_NAME(src));
+ gst_buffer_unmap (src, &inmap);
+ goto invalid_buffer;
+ }
+ dmafd = src_bufsurf->surfaceList->bufferDesc;
+ /* NOTE: gst-memory with input buffer for nvidia proprietary plugins mostly will be 1,
+ though this may not always be the case as can have per plane separate gst-memory */
+ inmemory = (GstMemory *)gst_buffer_peek_memory (src, 0);
+
+ for (i = 0; i < (guint)group->n_mem; i++) {
+ gsize size, offset, maxsize;
+
+ size = gst_memory_get_sizes (inmemory, &offset, &maxsize);
+
+ mem = (GstV4l2Memory *) group->mem[i];
+
+ /* Update memory */
+ mem->mem.maxsize = maxsize;
+ mem->mem.offset = offset;
+ mem->mem.size = size;
+ mem->dmafd = dmafd;
+
+ /* Update v4l2 structure */
+ group->planes[i].length = maxsize;
+ group->planes[i].bytesused = size + offset;
+ group->planes[i].m.fd = dmafd;
+ group->planes[i].data_offset = offset;
+ }
+
+ /* Copy into buffer structure if not using planes */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (pool->obj->type)) {
+ group->buffer.bytesused = group->planes[0].bytesused;
+ group->buffer.length = group->planes[0].length;
+ group->buffer.m.fd = group->planes[0].m.userptr;
+
+ /* Check if data_offset > 0 and fail for non-multi-planar */
+ g_assert (group->planes[0].data_offset == 0);
+ } else {
+ group->buffer.length = group->n_mem;
+ }
+ gst_buffer_unmap(src, &inmap);
+ } else {
+ GST_INFO_OBJECT (pool, "DMABUF_IMPORT io mode not supported for device %s ",
+ pool->obj->videodev);
+ return GST_FLOW_ERROR;
+ }
+#endif
+
+ gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
+ gst_buffer_ref (src), (GDestroyNotify) gst_buffer_unref);
+
+ gst_buffer_copy_into (dest, src,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ return GST_FLOW_OK;
+
+invalid_buffer:
+ {
+ return GST_FLOW_ERROR;
+ }
+not_our_buffer:
+ {
+ GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool");
+ return GST_FLOW_ERROR;
+ }
+too_many_mems:
+ {
+ GST_ERROR_OBJECT (pool, "could not map buffer");
+ return GST_FLOW_ERROR;
+ }
+#ifndef USE_V4L2_TARGET_NV
+import_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to import dmabuf");
+ return GST_FLOW_ERROR;
+ }
+#endif
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_prepare_buffer (GstV4l2BufferPool * pool,
+ GstBuffer * dest, GstBuffer * src)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+ gboolean own_src = FALSE;
+
+ if (src == NULL) {
+ if (pool->other_pool == NULL) {
+ GST_ERROR_OBJECT (pool, "can't prepare buffer, source buffer missing");
+ return GST_FLOW_ERROR;
+ }
+
+ ret = gst_buffer_pool_acquire_buffer (pool->other_pool, &src, NULL);
+ if (ret != GST_FLOW_OK) {
+ GST_ERROR_OBJECT (pool, "failed to acquire buffer from downstream pool");
+ goto done;
+ }
+
+ own_src = TRUE;
+ }
+
+ switch (pool->obj->mode) {
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_DMABUF:
+ ret = gst_v4l2_buffer_pool_copy_buffer (pool, dest, src);
+ break;
+ case GST_V4L2_IO_USERPTR:
+ ret = gst_v4l2_buffer_pool_import_userptr (pool, dest, src);
+ break;
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ ret = gst_v4l2_buffer_pool_import_dmabuf (pool, dest, src);
+ break;
+ default:
+ break;
+ }
+
+ if (own_src)
+ gst_buffer_unref (src);
+
+done:
+ return ret;
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
+ GstBufferPoolAcquireParams * params)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ GstV4l2MemoryGroup *group = NULL;
+ GstBuffer *newbuf = NULL;
+ GstV4l2Object *obj;
+ GstVideoInfo *info;
+
+ obj = pool->obj;
+ info = &obj->info;
+
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ newbuf =
+ gst_buffer_new_allocate (pool->allocator, pool->size, &pool->params);
+ break;
+ case GST_V4L2_IO_MMAP:
+ group = gst_v4l2_allocator_alloc_mmap (pool->vallocator);
+ break;
+ case GST_V4L2_IO_DMABUF:
+ group = gst_v4l2_allocator_alloc_dmabuf (pool->vallocator,
+ pool->allocator);
+ break;
+ case GST_V4L2_IO_USERPTR:
+ group = gst_v4l2_allocator_alloc_userptr (pool->vallocator);
+ break;
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ group = gst_v4l2_allocator_alloc_dmabufin (pool->vallocator);
+ break;
+ default:
+ newbuf = NULL;
+ g_assert_not_reached ();
+ break;
+ }
+
+ if (group != NULL) {
+ gint i;
+ newbuf = gst_buffer_new ();
+
+ for (i = 0; i < group->n_mem; i++)
+ gst_buffer_append_memory (newbuf, group->mem[i]);
+ } else if (newbuf == NULL) {
+ goto allocation_failed;
+ }
+
+ /* add metadata to raw video buffers */
+ if (pool->add_videometa)
+ gst_buffer_add_video_meta_full (newbuf, GST_VIDEO_FRAME_FLAG_NONE,
+ GST_VIDEO_INFO_FORMAT (info), GST_VIDEO_INFO_WIDTH (info),
+ GST_VIDEO_INFO_HEIGHT (info), GST_VIDEO_INFO_N_PLANES (info),
+ info->offset, info->stride);
+
+ *buffer = newbuf;
+
+ return GST_FLOW_OK;
+
+ /* ERRORS */
+allocation_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to allocate buffer");
+ return GST_FLOW_ERROR;
+ }
+}
+
+static gboolean
+gst_v4l2_buffer_pool_set_config (GstBufferPool * bpool, GstStructure * config)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ GstV4l2Object *obj = pool->obj;
+ GstCaps *caps;
+ guint size, min_buffers, max_buffers;
+ GstAllocator *allocator;
+ GstAllocationParams params;
+ gboolean can_allocate = FALSE;
+ gboolean updated = FALSE;
+ gboolean ret;
+
+ pool->add_videometa =
+ gst_buffer_pool_config_has_option (config,
+ GST_BUFFER_POOL_OPTION_VIDEO_META);
+
+ /* parse the config and keep around */
+ if (!gst_buffer_pool_config_get_params (config, &caps, &size, &min_buffers,
+ &max_buffers))
+ goto wrong_config;
+
+ if (!gst_buffer_pool_config_get_allocator (config, &allocator, ¶ms))
+ goto wrong_config;
+
+ GST_DEBUG_OBJECT (pool, "config %" GST_PTR_FORMAT, config);
+
+ if (pool->allocator)
+ gst_object_unref (pool->allocator);
+ pool->allocator = NULL;
+
+ switch (obj->mode) {
+ case GST_V4L2_IO_DMABUF:
+ pool->allocator = gst_dmabuf_allocator_new ();
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP);
+ break;
+ case GST_V4L2_IO_MMAP:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP);
+ break;
+ case GST_V4L2_IO_USERPTR:
+ can_allocate =
+ GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, USERPTR);
+ break;
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, DMABUF);
+ break;
+ case GST_V4L2_IO_RW:
+ if (allocator)
+ pool->allocator = g_object_ref (allocator);
+ pool->params = params;
+ /* No need to change the configuration */
+ goto done;
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+
+ /* libv4l2 conversion code does not handle CREATE_BUFS, and may lead to
+ * instability and crash, disable it for now */
+ if (can_allocate && obj->fmtdesc->flags & V4L2_FMT_FLAG_EMULATED) {
+ GST_WARNING_OBJECT (pool,
+ "libv4l2 converter detected, disabling CREATE_BUFS");
+ can_allocate = FALSE;
+ GST_OBJECT_FLAG_UNSET (pool->vallocator,
+ GST_V4L2_ALLOCATOR_FLAG_MMAP_CREATE_BUFS
+ | GST_V4L2_ALLOCATOR_FLAG_USERPTR_CREATE_BUFS
+ | GST_V4L2_ALLOCATOR_FLAG_DMABUF_CREATE_BUFS);
+ }
+
+ if (min_buffers < GST_V4L2_MIN_BUFFERS) {
+ updated = TRUE;
+ min_buffers = GST_V4L2_MIN_BUFFERS;
+ GST_INFO_OBJECT (pool, "increasing minimum buffers to %u", min_buffers);
+ }
+
+ /* respect driver requirements */
+ if (min_buffers < obj->min_buffers) {
+ updated = TRUE;
+ min_buffers = obj->min_buffers;
+ GST_INFO_OBJECT (pool, "increasing minimum buffers to %u", min_buffers);
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if (max_buffers > NV_VIDEO_MAX_FRAME || max_buffers == 0) {
+ updated = TRUE;
+ max_buffers = NV_VIDEO_MAX_FRAME;
+ GST_INFO_OBJECT (pool, "reducing maximum buffers to %u", max_buffers);
+ }
+#else
+ if (max_buffers > VIDEO_MAX_FRAME || max_buffers == 0) {
+ updated = TRUE;
+ max_buffers = VIDEO_MAX_FRAME;
+ GST_INFO_OBJECT (pool, "reducing maximum buffers to %u", max_buffers);
+ }
+#endif
+
+ if (min_buffers > max_buffers) {
+ updated = TRUE;
+ min_buffers = max_buffers;
+ GST_INFO_OBJECT (pool, "reducing minimum buffers to %u", min_buffers);
+ } else if (min_buffers != max_buffers) {
+ if (!can_allocate) {
+ updated = TRUE;
+ max_buffers = min_buffers;
+ GST_INFO_OBJECT (pool, "can't allocate, setting maximum to minimum");
+ }
+ }
+
+ if (!pool->add_videometa && obj->need_video_meta) {
+ GST_INFO_OBJECT (pool, "adding needed video meta");
+ updated = TRUE;
+ gst_buffer_pool_config_add_option (config,
+ GST_BUFFER_POOL_OPTION_VIDEO_META);
+ }
+
+ /* Always update the config to ensure the configured size matches */
+ if ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC) || !strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) &&
+ (obj->mode == GST_V4L2_IO_DMABUF_IMPORT)) {
+ gst_buffer_pool_config_set_params (config, caps, sizeof (NvBufSurface), min_buffers,
+ max_buffers);
+ }
+ else
+ gst_buffer_pool_config_set_params (config, caps, obj->info.size, min_buffers,
+ max_buffers);
+#ifdef USE_V4L2_TARGET_NV
+ /* Need to adjust the size to 0th plane's size since we will only output
+ v4l2 memory associated with 0th plane. */
+ if (!V4L2_TYPE_IS_OUTPUT(obj->type))
+ gst_buffer_pool_config_set_params (config, caps, obj->info.width * obj->info.height,
+ min_buffers, max_buffers);
+#endif
+
+ /* keep a GstVideoInfo with defaults for the when we need to copy */
+ gst_video_info_from_caps (&pool->caps_info, caps);
+
+done:
+ ret = GST_BUFFER_POOL_CLASS (parent_class)->set_config (bpool, config);
+
+ /* If anything was changed documentation recommand to return FALSE */
+ return !updated && ret;
+
+ /* ERRORS */
+wrong_config:
+ {
+ GST_ERROR_OBJECT (pool, "invalid config %" GST_PTR_FORMAT, config);
+ return FALSE;
+ }
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_resurect_buffer (GstV4l2BufferPool * pool)
+{
+ GstBufferPoolAcquireParams params = { 0 };
+ GstBuffer *buffer = NULL;
+ GstFlowReturn ret;
+
+ GST_DEBUG_OBJECT (pool, "A buffer was lost, reallocating it");
+
+ /* block recursive calls to this function */
+ g_signal_handler_block (pool->vallocator, pool->group_released_handler);
+
+ params.flags =
+ (GstBufferPoolAcquireFlags) GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT |
+ GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;
+ ret =
+ gst_buffer_pool_acquire_buffer (GST_BUFFER_POOL (pool), &buffer, ¶ms);
+
+ if (ret == GST_FLOW_OK)
+ gst_buffer_unref (buffer);
+
+ g_signal_handler_unblock (pool->vallocator, pool->group_released_handler);
+
+ return ret;
+}
+
+static gboolean
+gst_v4l2_buffer_pool_streamon (GstV4l2BufferPool * pool)
+{
+ GstV4l2Object *obj = pool->obj;
+
+ if (pool->streaming)
+ return TRUE;
+
+ switch (obj->mode) {
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ if (!V4L2_TYPE_IS_OUTPUT (pool->obj->type)) {
+ guint num_queued;
+ guint i, n = 0;
+
+ num_queued = g_atomic_int_get (&pool->num_queued);
+#ifdef USE_V4L2_TARGET_NV
+ GST_DEBUG_OBJECT (pool, "num_allocated %d num_queued %d allocator count %d \
+ dynamic_allocation %d\n", pool->num_allocated, num_queued,
+ pool->vallocator->count, pool->enable_dynamic_allocation);
+#endif
+ if (num_queued < pool->num_allocated)
+ n = pool->num_allocated - num_queued;
+
+ /* For captures, we need to enqueue buffers before we start streaming,
+ * so the driver don't underflow immediatly. As we have put then back
+ * into the base class queue, resurect them, then releasing will queue
+ * them back. */
+ for (i = 0; i < n; i++)
+ gst_v4l2_buffer_pool_resurect_buffer (pool);
+ }
+
+ if (obj->ioctl (pool->video_fd, VIDIOC_STREAMON, &obj->type) < 0)
+ goto streamon_failed;
+
+ pool->streaming = TRUE;
+
+ GST_DEBUG_OBJECT (pool, "Started streaming");
+ break;
+ default:
+ break;
+ }
+
+ return TRUE;
+
+streamon_failed:
+ {
+ GST_ERROR_OBJECT (pool, "error with STREAMON %d (%s)", errno,
+ g_strerror (errno));
+ return FALSE;
+ }
+}
+
+/* Call with streamlock held, or when streaming threads are down */
+static void
+gst_v4l2_buffer_pool_streamoff (GstV4l2BufferPool * pool)
+{
+ GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
+ GstV4l2Object *obj = pool->obj;
+#ifdef USE_V4L2_TARGET_NV
+ GstBuffer *buffers[NV_VIDEO_MAX_FRAME] = {NULL};
+#else
+ GstBuffer *buffers[VIDEO_MAX_FRAME] = {NULL};
+#endif
+ gint i;
+
+ if (!pool->streaming)
+ return;
+
+ GST_OBJECT_LOCK (pool);
+
+ switch (obj->mode) {
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+
+ if (obj->ioctl (pool->video_fd, VIDIOC_STREAMOFF, &obj->type) < 0)
+ GST_WARNING_OBJECT (pool, "STREAMOFF failed with errno %d (%s)",
+ errno, g_strerror (errno));
+
+ pool->streaming = FALSE;
+
+ GST_DEBUG_OBJECT (pool, "Stopped streaming");
+
+ if (pool->vallocator)
+ gst_v4l2_allocator_flush (pool->vallocator);
+ break;
+ default:
+ break;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ for (i = 0; i < NV_VIDEO_MAX_FRAME; i++) {
+#else
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+#endif
+ if (pool->buffers[i]) {
+ buffers[i] = pool->buffers[i];
+ pool->buffers[i] = NULL;
+ g_atomic_int_add (&pool->num_queued, -1);
+ }
+ }
+
+ GST_OBJECT_UNLOCK (pool);
+
+#ifdef USE_V4L2_TARGET_NV
+ for (i = 0; i < NV_VIDEO_MAX_FRAME; i++) {
+#else
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+#endif
+ if (buffers[i]) {
+ GstBuffer *buffer = buffers[i];
+ GstBufferPool *bpool = GST_BUFFER_POOL (pool);
+
+ if (V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ gst_v4l2_buffer_pool_release_buffer (bpool, buffer);
+ else /* Don't re-enqueue capture buffer on stop */
+ pclass->release_buffer (bpool, buffer);
+ }
+ }
+}
+
+static gboolean
+gst_v4l2_buffer_pool_start (GstBufferPool * bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
+ GstV4l2Object *obj = pool->obj;
+ GstStructure *config;
+ GstCaps *caps;
+ guint size, min_buffers, max_buffers;
+ guint max_latency, min_latency, copy_threshold = 0;
+ gboolean can_allocate = FALSE, ret = TRUE;
+
+ GST_DEBUG_OBJECT (pool, "activating pool");
+
+ config = gst_buffer_pool_get_config (bpool);
+ if (!gst_buffer_pool_config_get_params (config, &caps, &size, &min_buffers,
+ &max_buffers))
+ goto wrong_config;
+
+ min_latency = MAX (GST_V4L2_MIN_BUFFERS, obj->min_buffers);
+
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ can_allocate = TRUE;
+#ifdef HAVE_LIBV4L2
+ /* This workaround a unfixable bug in libv4l2 when RW is emulated on top
+ * of MMAP. In this case, the first read initialize the queues, but the
+ * poll before that will always fail. Doing an empty read, forces the
+ * queue to be initialized now. We only do this if we have a streaming
+ * driver. */
+ if (obj->device_caps & V4L2_CAP_STREAMING)
+ obj->read (obj->video_fd, NULL, 0);
+#endif
+ break;
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_MMAP:
+ {
+ guint count;
+
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP);
+
+ /* first, lets request buffers, and see how many we can get: */
+ GST_DEBUG_OBJECT (pool, "requesting %d MMAP buffers", min_buffers);
+
+ count = gst_v4l2_allocator_start (pool->vallocator, min_buffers,
+ V4L2_MEMORY_MMAP);
+ pool->num_allocated = count;
+
+ if (count < GST_V4L2_MIN_BUFFERS) {
+ min_buffers = count;
+ goto no_buffers;
+ }
+
+ /* V4L2 buffer pool are often very limited in the amount of buffers it
+ * can offer. The copy_threshold will workaround this limitation by
+ * falling back to copy if the pipeline needed more buffers. This also
+ * prevent having to do REQBUFS(N)/REQBUFS(0) everytime configure is
+ * called. */
+ if (count != min_buffers || pool->enable_copy_threshold) {
+ GST_WARNING_OBJECT (pool,
+ "Uncertain or not enough buffers, enabling copy threshold");
+ min_buffers = count;
+ copy_threshold = min_latency;
+ }
+
+ break;
+ }
+ case GST_V4L2_IO_USERPTR:
+ {
+ guint count;
+
+ can_allocate =
+ GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, USERPTR);
+
+ GST_DEBUG_OBJECT (pool, "requesting %d USERPTR buffers", min_buffers);
+
+ count = gst_v4l2_allocator_start (pool->vallocator, min_buffers,
+ V4L2_MEMORY_USERPTR);
+ pool->num_allocated = count;
+
+ /* There is no rational to not get what we asked */
+ if (count < min_buffers) {
+ min_buffers = count;
+ goto no_buffers;
+ }
+
+ min_buffers = count;
+ break;
+ }
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ {
+ guint count;
+
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, DMABUF);
+
+ GST_DEBUG_OBJECT (pool, "requesting %d DMABUF buffers", min_buffers);
+
+ count = gst_v4l2_allocator_start (pool->vallocator, min_buffers,
+ V4L2_MEMORY_DMABUF);
+ pool->num_allocated = count;
+
+ /* There is no rational to not get what we asked */
+ if (count < min_buffers) {
+ min_buffers = count;
+ goto no_buffers;
+ }
+
+ min_buffers = count;
+ break;
+ }
+ default:
+ min_buffers = 0;
+ copy_threshold = 0;
+ g_assert_not_reached ();
+ break;
+ }
+
+ if (can_allocate)
+ max_latency = max_buffers;
+ else
+ max_latency = min_buffers;
+
+ pool->size = size;
+ pool->copy_threshold = copy_threshold;
+ pool->max_latency = max_latency;
+ pool->min_latency = min_latency;
+ pool->num_queued = 0;
+
+ if (max_buffers != 0 && max_buffers < min_buffers)
+ max_buffers = min_buffers;
+
+ gst_buffer_pool_config_set_params (config, caps, size, min_buffers,
+ max_buffers);
+ pclass->set_config (bpool, config);
+ gst_structure_free (config);
+
+ if (pool->other_pool)
+ if (!gst_buffer_pool_set_active (pool->other_pool, TRUE))
+ goto other_pool_failed;
+
+ /* now, allocate the buffers: */
+ if (!pclass->start (bpool))
+ goto start_failed;
+
+ if (!V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ pool->group_released_handler =
+ g_signal_connect_swapped (pool->vallocator, "group-released",
+ G_CALLBACK (gst_v4l2_buffer_pool_resurect_buffer), pool);
+ ret = gst_v4l2_buffer_pool_streamon (pool);
+ }
+
+ return ret;
+
+ /* ERRORS */
+wrong_config:
+ {
+ GST_ERROR_OBJECT (pool, "invalid config %" GST_PTR_FORMAT, config);
+ gst_structure_free (config);
+ return FALSE;
+ }
+no_buffers:
+ {
+ GST_ERROR_OBJECT (pool,
+ "we received %d buffer from device '%s', we want at least %d",
+ min_buffers, obj->videodev, GST_V4L2_MIN_BUFFERS);
+ gst_structure_free (config);
+ return FALSE;
+ }
+start_failed:
+ {
+ GST_ERROR_OBJECT (pool, "allocate failed");
+ return FALSE;
+ }
+other_pool_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to active the other pool %"
+ GST_PTR_FORMAT, pool->other_pool);
+ return FALSE;
+ }
+}
+
+static gboolean
+gst_v4l2_buffer_pool_stop (GstBufferPool * bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ gboolean ret;
+
+ GST_DEBUG_OBJECT (pool, "stopping pool");
+
+ if (pool->group_released_handler > 0) {
+ g_signal_handler_disconnect (pool->vallocator,
+ pool->group_released_handler);
+ pool->group_released_handler = 0;
+ }
+
+ if (pool->other_pool) {
+ gst_buffer_pool_set_active (pool->other_pool, FALSE);
+ gst_object_unref (pool->other_pool);
+ pool->other_pool = NULL;
+ }
+
+ gst_v4l2_buffer_pool_streamoff (pool);
+
+ ret = GST_BUFFER_POOL_CLASS (parent_class)->stop (bpool);
+
+ if (ret && pool->vallocator) {
+ GstV4l2Return vret;
+
+ vret = gst_v4l2_allocator_stop (pool->vallocator);
+
+ if (vret == GST_V4L2_BUSY)
+ GST_WARNING_OBJECT (pool, "some buffers are still outstanding");
+
+ ret = (vret == GST_V4L2_OK);
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ /* Make sure, there are no outstanding buffers to be unreffed by
+ * downstream component. In case of DRC, wait before reseting decoder
+ * until last buffer on capture plane is unreffed.
+ */
+ if (!V4L2_TYPE_IS_OUTPUT(pool->obj->type))
+ {
+ g_mutex_lock (&pool->obj->cplane_stopped_lock);
+ pool->obj->capture_plane_stopped = TRUE;
+ g_cond_signal (&pool->obj->cplane_stopped_cond);
+ g_mutex_unlock (&pool->obj->cplane_stopped_lock);
+ }
+#endif
+
+ return ret;
+}
+
+static void
+gst_v4l2_buffer_pool_flush_start (GstBufferPool * bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+
+ GST_DEBUG_OBJECT (pool, "start flushing");
+
+#ifndef USE_V4L2_TARGET_NV
+ gst_poll_set_flushing (pool->poll, TRUE);
+#endif
+
+ GST_OBJECT_LOCK (pool);
+ pool->empty = FALSE;
+ g_cond_broadcast (&pool->empty_cond);
+ GST_OBJECT_UNLOCK (pool);
+
+ if (pool->other_pool)
+ gst_buffer_pool_set_flushing (pool->other_pool, TRUE);
+}
+
+static void
+gst_v4l2_buffer_pool_flush_stop (GstBufferPool * bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+
+ GST_DEBUG_OBJECT (pool, "stop flushing");
+
+ if (pool->other_pool)
+ gst_buffer_pool_set_flushing (pool->other_pool, FALSE);
+
+#ifndef USE_V4L2_TARGET_NV
+ gst_poll_set_flushing (pool->poll, FALSE);
+#endif
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_poll (GstV4l2BufferPool * pool)
+{
+ gint ret;
+
+ /* In RW mode there is no queue, hence no need to wait while the queue is
+ * empty */
+ if (pool->obj->mode != GST_V4L2_IO_RW) {
+ GST_OBJECT_LOCK (pool);
+ while (pool->empty)
+ g_cond_wait (&pool->empty_cond, GST_OBJECT_GET_LOCK (pool));
+ GST_OBJECT_UNLOCK (pool);
+ }
+
+ if (!pool->can_poll_device)
+ goto done;
+
+ GST_LOG_OBJECT (pool, "polling device");
+
+again:
+ ret = gst_poll_wait (pool->poll, GST_CLOCK_TIME_NONE);
+ if (G_UNLIKELY (ret < 0)) {
+ switch (errno) {
+ case EBUSY:
+ goto stopped;
+ case EAGAIN:
+ case EINTR:
+ goto again;
+ case ENXIO:
+ GST_WARNING_OBJECT (pool,
+ "v4l2 device doesn't support polling. Disabling"
+ " using libv4l2 in this case may cause deadlocks");
+ pool->can_poll_device = FALSE;
+ goto done;
+ default:
+ goto select_error;
+ }
+ }
+
+ if (gst_poll_fd_has_error (pool->poll, &pool->pollfd))
+ goto select_error;
+
+done:
+ return GST_FLOW_OK;
+
+ /* ERRORS */
+stopped:
+ {
+ GST_DEBUG_OBJECT (pool, "stop called");
+ return GST_FLOW_FLUSHING;
+ }
+select_error:
+ {
+ GST_ELEMENT_ERROR (pool->obj->element, RESOURCE, READ, (NULL),
+ ("poll error %d: %s (%d)", ret, g_strerror (errno), errno));
+ return GST_FLOW_ERROR;
+ }
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf)
+{
+ GstV4l2MemoryGroup *group = NULL;
+ const GstV4l2Object *obj = pool->obj;
+ GstClockTime timestamp;
+ gint index;
+
+#ifdef USE_V4L2_TARGET_NV
+ if (!gst_v4l2_is_buffer_valid (buf, &group, pool->obj->is_encode)) {
+ GST_ERROR_OBJECT (pool, "invalid buffer %p", buf);
+ return GST_FLOW_ERROR;
+ }
+#else
+ if (!gst_v4l2_is_buffer_valid (buf, &group)) {
+ GST_ERROR_OBJECT (pool, "invalid buffer %p", buf);
+ return GST_FLOW_ERROR;
+ }
+#endif
+
+ index = group->buffer.index;
+
+ if (pool->buffers[index] != NULL)
+ goto already_queued;
+
+ GST_LOG_OBJECT (pool, "queuing buffer %i", index);
+
+ if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ enum v4l2_field field;
+
+ /* Except when field is set to alternate, buffer field is the same as
+ * the one defined in format */
+ if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
+ field = obj->format.fmt.pix_mp.field;
+ else
+ field = obj->format.fmt.pix.field;
+
+ /* NB: At this moment, we can't have alternate mode because it not handled
+ * yet */
+ if (field == V4L2_FIELD_ALTERNATE) {
+ if (GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_FRAME_FLAG_TFF))
+ field = V4L2_FIELD_TOP;
+ else
+ field = V4L2_FIELD_BOTTOM;
+ }
+
+ group->buffer.field = field;
+ }
+
+ if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
+ timestamp = GST_BUFFER_TIMESTAMP (buf);
+ GST_TIME_TO_TIMEVAL (timestamp, group->buffer.timestamp);
+ }
+
+ GST_OBJECT_LOCK (pool);
+ g_atomic_int_inc (&pool->num_queued);
+ pool->buffers[index] = buf;
+
+ if (!gst_v4l2_allocator_qbuf (pool->vallocator, group))
+ goto queue_failed;
+
+ pool->empty = FALSE;
+ g_cond_signal (&pool->empty_cond);
+ GST_OBJECT_UNLOCK (pool);
+
+ return GST_FLOW_OK;
+
+already_queued:
+ {
+ GST_ERROR_OBJECT (pool, "the buffer %i was already queued", index);
+ return GST_FLOW_ERROR;
+ }
+queue_failed:
+ {
+ GST_ERROR_OBJECT (pool, "could not queue a buffer %i", index);
+ /* Mark broken buffer to the allocator */
+ GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_TAG_MEMORY);
+ g_atomic_int_add (&pool->num_queued, -1);
+ pool->buffers[index] = NULL;
+ GST_OBJECT_UNLOCK (pool);
+ return GST_FLOW_ERROR;
+ }
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer)
+{
+ GstFlowReturn res;
+ GstBuffer *outbuf;
+ GstV4l2Object *obj = pool->obj;
+ GstClockTime timestamp;
+ GstV4l2MemoryGroup *group;
+ GstVideoMeta *vmeta;
+ gsize size;
+ gint i;
+
+ if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ goto poll_failed;
+
+ GST_LOG_OBJECT (pool, "dequeueing a buffer");
+
+ res = gst_v4l2_allocator_dqbuf (pool->vallocator, &group);
+ if (res == GST_FLOW_EOS)
+ goto eos;
+ if (res != GST_FLOW_OK)
+ goto dqbuf_failed;
+
+ /* get our GstBuffer with that index from the pool, if the buffer was
+ * outstanding we have a serious problem.
+ */
+ outbuf = pool->buffers[group->buffer.index];
+ if (outbuf == NULL)
+ goto no_buffer;
+
+ /* mark the buffer outstanding */
+ pool->buffers[group->buffer.index] = NULL;
+ if (g_atomic_int_dec_and_test (&pool->num_queued)) {
+ GST_OBJECT_LOCK (pool);
+ pool->empty = TRUE;
+ GST_OBJECT_UNLOCK (pool);
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if (pool->obj->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
+ && obj->enableMVBufferMeta
+ && (!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT)))
+ {
+ v4l2_ctrl_videoenc_outputbuf_metadata_MV enc_mv_metadata;
+ memset ((void *) &enc_mv_metadata, 0, sizeof (enc_mv_metadata));
+
+ if (get_motion_vectors (obj, group->buffer.index, &enc_mv_metadata) == 0)
+ {
+ guint32 numMVs = enc_mv_metadata.bufSize / sizeof (MVInfo), i;
+ MVInfo *pInfo = enc_mv_metadata.pMVInfo;
+ g_print ("Num MVs = %d \n", numMVs);
+
+ for (i = 0; i < numMVs; i++, pInfo++)
+ g_print ("%d: mv_x=%d mv_y=%d weight=%d\n ", i, pInfo->mv_x,
+ pInfo->mv_y, pInfo->weight);
+ }
+ }
+
+ if (pool->obj->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
+ && (((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC)) && (is_cuvid == FALSE))
+ || ((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) && (is_cuvid == FALSE))
+ || ((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) && (is_cuvid == TRUE)))
+ && (obj->Enable_frame_type_reporting || obj->Enable_error_check)) {
+ v4l2_ctrl_videodec_outputbuf_metadata dec_metadata;
+ memset ((void *) &dec_metadata, 0, sizeof (dec_metadata));
+ v4l2_video_dec_get_enable_frame_type_reporting (obj, group->buffer.index,
+ &dec_metadata);
+ report_metadata (obj, group->buffer.index, &dec_metadata);
+ }
+#endif
+ timestamp = GST_TIMEVAL_TO_TIME (group->buffer.timestamp);
+
+ size = 0;
+ vmeta = gst_buffer_get_video_meta (outbuf);
+ for (i = 0; i < group->n_mem; i++) {
+ GST_LOG_OBJECT (pool,
+ "dequeued buffer %p seq:%d (ix=%d), mem %p used %d, plane=%d, flags %08x, ts %"
+ GST_TIME_FORMAT ", pool-queued=%d, buffer=%p", outbuf,
+ group->buffer.sequence, group->buffer.index, group->mem[i],
+ group->planes[i].bytesused, i, group->buffer.flags,
+ GST_TIME_ARGS (timestamp), pool->num_queued, outbuf);
+
+ if (vmeta) {
+ vmeta->offset[i] = size;
+ size += gst_memory_get_sizes (group->mem[i], NULL, NULL);
+ }
+ }
+
+ /* Ignore timestamp and field for OUTPUT device */
+ if (V4L2_TYPE_IS_OUTPUT (obj->type))
+ goto done;
+
+ /* Check for driver bug in reporting feild */
+ if (group->buffer.field == V4L2_FIELD_ANY) {
+ /* Only warn once to avoid the spamming */
+#ifndef GST_DISABLE_GST_DEBUG
+ if (!pool->has_warned_on_buggy_field) {
+ pool->has_warned_on_buggy_field = TRUE;
+ GST_WARNING_OBJECT (pool,
+ "Driver should never set v4l2_buffer.field to ANY");
+ }
+#endif
+
+ /* Use the value from the format (works for UVC bug) */
+ group->buffer.field = obj->format.fmt.pix.field;
+
+ /* If driver also has buggy S_FMT, assume progressive */
+ if (group->buffer.field == V4L2_FIELD_ANY) {
+#ifndef GST_DISABLE_GST_DEBUG
+ if (!pool->has_warned_on_buggy_field) {
+ pool->has_warned_on_buggy_field = TRUE;
+ GST_WARNING_OBJECT (pool,
+ "Driver should never set v4l2_format.pix.field to ANY");
+ }
+#endif
+
+ group->buffer.field = V4L2_FIELD_NONE;
+ }
+ }
+
+ /* set top/bottom field first if v4l2_buffer has the information */
+ switch (group->buffer.field) {
+ case V4L2_FIELD_NONE:
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ break;
+ case V4L2_FIELD_INTERLACED:
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ if (obj->tv_norm == V4L2_STD_NTSC_M ||
+ obj->tv_norm == V4L2_STD_NTSC_M_JP ||
+ obj->tv_norm == V4L2_STD_NTSC_M_KR) {
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ } else {
+ GST_BUFFER_FLAG_SET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ }
+ break;
+ default:
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_INTERLACED);
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
+ GST_FIXME_OBJECT (pool,
+ "Unhandled enum v4l2_field %d - treating as progressive",
+ group->buffer.field);
+ break;
+ }
+
+ if (GST_VIDEO_INFO_FORMAT (&obj->info) == GST_VIDEO_FORMAT_ENCODED) {
+ if ((group->buffer.flags & V4L2_BUF_FLAG_KEYFRAME) ||
+ GST_V4L2_PIXELFORMAT (obj) == V4L2_PIX_FMT_MJPEG ||
+ GST_V4L2_PIXELFORMAT (obj) == V4L2_PIX_FMT_JPEG ||
+ GST_V4L2_PIXELFORMAT (obj) == V4L2_PIX_FMT_PJPG)
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
+ else
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
+ }
+
+ if (group->buffer.flags & V4L2_BUF_FLAG_ERROR)
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_CORRUPTED);
+
+ GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
+ GST_BUFFER_OFFSET (outbuf) = group->buffer.sequence;
+ GST_BUFFER_OFFSET_END (outbuf) = group->buffer.sequence + 1;
+
+done:
+ *buffer = outbuf;
+
+ return GST_FLOW_OK;
+
+ /* ERRORS */
+poll_failed:
+ {
+ GST_DEBUG_OBJECT (pool, "poll error %s", gst_flow_get_name (res));
+ return res;
+ }
+eos:
+ {
+ return GST_FLOW_EOS;
+ }
+dqbuf_failed:
+ {
+ return GST_FLOW_ERROR;
+ }
+no_buffer:
+ {
+ GST_ERROR_OBJECT (pool, "No free buffer found in the pool at index %d.",
+ group->buffer.index);
+ return GST_FLOW_ERROR;
+ }
+}
+
+static GstFlowReturn
+gst_v4l2_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
+ GstBufferPoolAcquireParams * params)
+{
+ GstFlowReturn ret;
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
+ GstV4l2Object *obj = pool->obj;
+
+ GST_DEBUG_OBJECT (pool, "acquire");
+
+ /* If this is being called to resurect a lost buffer */
+ if (params && params->flags & GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT) {
+ ret = pclass->acquire_buffer (bpool, buffer, params);
+ goto done;
+ }
+
+ switch (obj->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ /* capture, This function should return a buffer with new captured data */
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ {
+ /* take empty buffer from the pool */
+ ret = pclass->acquire_buffer (bpool, buffer, params);
+ break;
+ }
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ {
+ /* just dequeue a buffer, we basically use the queue of v4l2 as the
+ * storage for our buffers. This function does poll first so we can
+ * interrupt it fine. */
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
+ break;
+ }
+ default:
+ ret = GST_FLOW_ERROR;
+ g_assert_not_reached ();
+ break;
+ }
+ break;
+
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ /* playback, This function should return an empty buffer */
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ /* get an empty buffer */
+ ret = pclass->acquire_buffer (bpool, buffer, params);
+ break;
+
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ /* get a free unqueued buffer */
+ ret = pclass->acquire_buffer (bpool, buffer, params);
+ break;
+
+ default:
+ ret = GST_FLOW_ERROR;
+ g_assert_not_reached ();
+ break;
+ }
+ break;
+
+ default:
+ ret = GST_FLOW_ERROR;
+ g_assert_not_reached ();
+ break;
+ }
+done:
+ return ret;
+}
+
+static void
+gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ GstBufferPoolClass *pclass = GST_BUFFER_POOL_CLASS (parent_class);
+ GstV4l2Object *obj = pool->obj;
+
+ GST_DEBUG_OBJECT (pool, "release buffer %p", buffer);
+
+ switch (obj->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ /* capture, put the buffer back in the queue so that we can refill it
+ * later. */
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ /* release back in the pool */
+ pclass->release_buffer (bpool, buffer);
+ break;
+
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ {
+ GstV4l2MemoryGroup *group;
+#ifdef USE_V4L2_TARGET_NV
+ if (gst_v4l2_is_buffer_valid (buffer, &group, pool->obj->is_encode)) {
+#else
+ if (gst_v4l2_is_buffer_valid (buffer, &group)) {
+#endif
+ gst_v4l2_allocator_reset_group (pool->vallocator, group);
+ /* queue back in the device */
+ if (pool->other_pool)
+ gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL);
+ if (gst_v4l2_buffer_pool_qbuf (pool, buffer) != GST_FLOW_OK)
+ pclass->release_buffer (bpool, buffer);
+ } else {
+ /* Simply release invalide/modified buffer, the allocator will
+ * give it back later */
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
+ pclass->release_buffer (bpool, buffer);
+ }
+ break;
+ }
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ /* release back in the pool */
+ pclass->release_buffer (bpool, buffer);
+ break;
+
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ {
+ GstV4l2MemoryGroup *group;
+ guint index;
+
+#ifdef USE_V4L2_TARGET_NV
+ if (!gst_v4l2_is_buffer_valid (buffer, &group, pool->obj->is_encode)) {
+#else
+ if (!gst_v4l2_is_buffer_valid (buffer, &group)) {
+#endif
+ /* Simply release invalide/modified buffer, the allocator will
+ * give it back later */
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
+ pclass->release_buffer (bpool, buffer);
+ break;
+ }
+
+ index = group->buffer.index;
+
+ if (pool->buffers[index] == NULL) {
+ GST_LOG_OBJECT (pool, "buffer %u not queued, putting on free list",
+ index);
+
+ /* Remove qdata, this will unmap any map data in userptr */
+ gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
+ GST_V4L2_IMPORT_QUARK, NULL, NULL);
+
+ /* reset to default size */
+ gst_v4l2_allocator_reset_group (pool->vallocator, group);
+
+ /* playback, put the buffer back in the queue to refill later. */
+ pclass->release_buffer (bpool, buffer);
+ } else {
+ /* the buffer is queued in the device but maybe not played yet. We just
+ * leave it there and not make it available for future calls to acquire
+ * for now. The buffer will be dequeued and reused later. */
+ GST_LOG_OBJECT (pool, "buffer %u is queued", index);
+ }
+ break;
+ }
+
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ break;
+
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+}
+
+static void
+gst_v4l2_buffer_pool_dispose (GObject * object)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (object);
+
+ if (pool->vallocator)
+ gst_object_unref (pool->vallocator);
+ pool->vallocator = NULL;
+
+ if (pool->allocator)
+ gst_object_unref (pool->allocator);
+ pool->allocator = NULL;
+
+ if (pool->other_pool)
+ gst_object_unref (pool->other_pool);
+ pool->other_pool = NULL;
+
+ G_OBJECT_CLASS (parent_class)->dispose (object);
+}
+
+static void
+gst_v4l2_buffer_pool_finalize (GObject * object)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (object);
+
+ if (pool->video_fd >= 0)
+ pool->obj->close (pool->video_fd);
+
+#ifndef USE_V4L2_TARGET_NV
+ gst_poll_free (pool->poll);
+#endif
+
+ /* This can't be done in dispose method because we must not set pointer
+ * to NULL as it is part of the v4l2object and dispose could be called
+ * multiple times */
+ gst_object_unref (pool->obj->element);
+
+ g_cond_clear (&pool->empty_cond);
+
+ /* FIXME have we done enough here ? */
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+static void
+gst_v4l2_buffer_pool_init (GstV4l2BufferPool * pool)
+{
+#ifndef USE_V4L2_TARGET_NV
+ pool->poll = gst_poll_new (TRUE);
+#endif
+ g_cond_init (&pool->empty_cond);
+ pool->empty = TRUE;
+}
+
+static void
+gst_v4l2_buffer_pool_class_init (GstV4l2BufferPoolClass * klass)
+{
+ GObjectClass *object_class = G_OBJECT_CLASS (klass);
+ GstBufferPoolClass *bufferpool_class = GST_BUFFER_POOL_CLASS (klass);
+
+ object_class->dispose = gst_v4l2_buffer_pool_dispose;
+ object_class->finalize = gst_v4l2_buffer_pool_finalize;
+
+ bufferpool_class->start = gst_v4l2_buffer_pool_start;
+ bufferpool_class->stop = gst_v4l2_buffer_pool_stop;
+ bufferpool_class->set_config = gst_v4l2_buffer_pool_set_config;
+ bufferpool_class->alloc_buffer = gst_v4l2_buffer_pool_alloc_buffer;
+ bufferpool_class->acquire_buffer = gst_v4l2_buffer_pool_acquire_buffer;
+ bufferpool_class->release_buffer = gst_v4l2_buffer_pool_release_buffer;
+ bufferpool_class->flush_start = gst_v4l2_buffer_pool_flush_start;
+ bufferpool_class->flush_stop = gst_v4l2_buffer_pool_flush_stop;
+
+ GST_DEBUG_CATEGORY_INIT (v4l2bufferpool_debug, "v4l2bufferpool", 0,
+ "V4L2 Buffer Pool");
+ GST_DEBUG_CATEGORY_GET (CAT_PERFORMANCE, "GST_PERFORMANCE");
+}
+
+/**
+ * gst_v4l2_buffer_pool_new:
+ * @obj: the v4l2 object owning the pool
+ *
+ * Construct a new buffer pool.
+ *
+ * Returns: the new pool, use gst_object_unref() to free resources
+ */
+GstBufferPool *
+gst_v4l2_buffer_pool_new (GstV4l2Object * obj, GstCaps * caps)
+{
+ GstV4l2BufferPool *pool;
+ GstStructure *config;
+ gchar *name, *parent_name;
+ gint fd;
+
+ fd = obj->dup (obj->video_fd);
+ if (fd < 0)
+ goto dup_failed;
+
+ /* setting a significant unique name */
+ parent_name = gst_object_get_name (GST_OBJECT (obj->element));
+ name = g_strconcat (parent_name, ":", "pool:",
+ V4L2_TYPE_IS_OUTPUT (obj->type) ? "sink" : "src", NULL);
+ g_free (parent_name);
+
+ pool = (GstV4l2BufferPool *) g_object_new (GST_TYPE_V4L2_BUFFER_POOL,
+ "name", name, NULL);
+ g_object_ref_sink (pool);
+ g_free (name);
+
+#ifndef USE_V4L2_TARGET_NV
+ gst_poll_fd_init (&pool->pollfd);
+ pool->pollfd.fd = fd;
+ gst_poll_add_fd (pool->poll, &pool->pollfd);
+ if (V4L2_TYPE_IS_OUTPUT (obj->type))
+ gst_poll_fd_ctl_write (pool->poll, &pool->pollfd, TRUE);
+ else
+ gst_poll_fd_ctl_read (pool->poll, &pool->pollfd, TRUE);
+#endif
+
+ pool->video_fd = fd;
+ pool->obj = obj;
+ /* TODO: Check with poll_device set to FALSE */
+
+#ifdef USE_V4L2_TARGET_NV
+ pool->can_poll_device = FALSE;
+#endif
+
+ pool->vallocator = gst_v4l2_allocator_new (GST_OBJECT (pool), obj);
+ if (pool->vallocator == NULL)
+ goto allocator_failed;
+
+ gst_object_ref (obj->element);
+
+ config = gst_buffer_pool_get_config (GST_BUFFER_POOL_CAST (pool));
+#ifndef USE_V4L2_TARGET_NV
+ gst_buffer_pool_config_set_params (config, caps, obj->info.size, 0, 0);
+#else
+ /* TODO: Fix below once have a single source for Jetson TX1, TX2 and Xavier */
+ if (((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC)) && (is_cuvid == FALSE)) ||
+ ((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) && (is_cuvid == FALSE)) ||
+ ((!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) && (is_cuvid == TRUE))) {
+ gst_buffer_pool_config_set_params (config, caps, obj->info.size, 0, 0);
+ /* Need to adjust the size to 0th plane's size since we will only output
+ v4l2 memory associated with 0th plane. */
+ if (!V4L2_TYPE_IS_OUTPUT(obj->type))
+ gst_buffer_pool_config_set_params (config, caps, obj->info.width * obj->info.height, 0, 0);
+ }
+ if (!strcmp(obj->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp(obj->videodev, V4L2_DEVICE_PATH_NVENC_ALT))
+ gst_buffer_pool_config_set_params (config, caps, obj->info.size, 0, 0);
+#endif
+ /* This will simply set a default config, but will not configure the pool
+ * because min and max are not valid */
+ gst_buffer_pool_set_config (GST_BUFFER_POOL_CAST (pool), config);
+
+ return GST_BUFFER_POOL (pool);
+
+ /* ERRORS */
+dup_failed:
+ {
+ GST_ERROR ("failed to dup fd %d (%s)", errno, g_strerror (errno));
+ return NULL;
+ }
+allocator_failed:
+ {
+ GST_ERROR_OBJECT (pool, "Failed to create V4L2 allocator");
+ gst_object_unref (pool);
+ return NULL;
+ }
+}
+
+static GstFlowReturn
+gst_v4l2_do_read (GstV4l2BufferPool * pool, GstBuffer * buf)
+{
+ GstFlowReturn res;
+ GstV4l2Object *obj = pool->obj;
+ gint amount;
+ GstMapInfo map;
+ gint toread;
+
+ toread = obj->info.size;
+
+ GST_LOG_OBJECT (pool, "reading %d bytes into buffer %p", toread, buf);
+
+ gst_buffer_map (buf, &map, GST_MAP_WRITE);
+
+ do {
+ if ((res = gst_v4l2_buffer_pool_poll (pool)) != GST_FLOW_OK)
+ goto poll_error;
+
+ amount = obj->read (obj->video_fd, map.data, toread);
+
+ if (amount == toread) {
+ break;
+ } else if (amount == -1) {
+ if (errno == EAGAIN || errno == EINTR) {
+ continue;
+ } else
+ goto read_error;
+ } else {
+ /* short reads can happen if a signal interrupts the read */
+ continue;
+ }
+ } while (TRUE);
+
+ GST_LOG_OBJECT (pool, "read %d bytes", amount);
+ gst_buffer_unmap (buf, &map);
+ gst_buffer_resize (buf, 0, amount);
+
+ return GST_FLOW_OK;
+
+ /* ERRORS */
+poll_error:
+ {
+ GST_DEBUG ("poll error %s", gst_flow_get_name (res));
+ goto cleanup;
+ }
+read_error:
+ {
+ GST_ELEMENT_ERROR (obj->element, RESOURCE, READ,
+ (_("Error reading %d bytes from device '%s'."),
+ toread, obj->videodev), GST_ERROR_SYSTEM);
+ res = GST_FLOW_ERROR;
+ goto cleanup;
+ }
+cleanup:
+ {
+ gst_buffer_unmap (buf, &map);
+ gst_buffer_resize (buf, 0, 0);
+ return res;
+ }
+}
+
+/**
+ * gst_v4l2_buffer_pool_process:
+ * @bpool: a #GstBufferPool
+ * @buf: a #GstBuffer, maybe be replaced
+ *
+ * Process @buf in @bpool. For capture devices, this functions fills @buf with
+ * data from the device. For output devices, this functions send the contents of
+ * @buf to the device for playback.
+ *
+ * Returns: %GST_FLOW_OK on success.
+ */
+GstFlowReturn
+gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstBufferPool *bpool = GST_BUFFER_POOL_CAST (pool);
+ GstV4l2Object *obj = pool->obj;
+
+ GST_DEBUG_OBJECT (pool, "process buffer %p", buf);
+
+ if (GST_BUFFER_POOL_IS_FLUSHING (pool))
+ return GST_FLOW_FLUSHING;
+
+ switch (obj->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ /* capture */
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ /* capture into the buffer */
+ ret = gst_v4l2_do_read (pool, *buf);
+ break;
+
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_DMABUF:
+ {
+ GstBuffer *tmp;
+
+ if ((*buf)->pool == bpool) {
+ guint num_queued;
+ gsize size = gst_buffer_get_size (*buf);
+
+ if (size == 0) {
+ if (GST_BUFFER_FLAG_IS_SET (*buf, GST_BUFFER_FLAG_CORRUPTED))
+ goto buffer_corrupted;
+ else
+ goto eos;
+ }
+
+ num_queued = g_atomic_int_get (&pool->num_queued);
+ GST_TRACE_OBJECT (pool, "Only %i buffer left in the capture queue.",
+ num_queued);
+
+ /* If we have no more buffer, and can allocate it time to do so */
+#ifdef USE_V4L2_TARGET_NV
+ if (num_queued == 0 && pool->enable_dynamic_allocation) {
+#else
+ if (num_queued == 0) {
+#endif
+ if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {
+ ret = gst_v4l2_buffer_pool_resurect_buffer (pool);
+ if (ret == GST_FLOW_OK)
+ goto done;
+ }
+ }
+
+ /* start copying buffers when we are running low on buffers */
+#ifdef USE_V4L2_TARGET_NV
+ GST_DEBUG_OBJECT (pool, "num_queued %d copy_threshold %d dynamic_allocation %d\n",
+ num_queued, pool->copy_threshold, pool->enable_dynamic_allocation);
+
+ if (num_queued < pool->copy_threshold && pool->enable_dynamic_allocation) {
+ if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {
+ ret = gst_v4l2_buffer_pool_resurect_buffer (pool);
+ if (ret == GST_FLOW_OK)
+ goto done;
+ }
+ }
+#else
+ if (num_queued < pool->copy_threshold) {
+ GstBuffer *copy;
+
+ if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {
+ ret = gst_v4l2_buffer_pool_resurect_buffer (pool);
+ if (ret == GST_FLOW_OK)
+ goto done;
+ }
+
+ /* copy the buffer */
+ copy = gst_buffer_copy_region (*buf,
+ GST_BUFFER_COPY_ALL | GST_BUFFER_COPY_DEEP, 0, -1);
+ GST_LOG_OBJECT (pool, "copy buffer %p->%p", *buf, copy);
+
+ /* and requeue so that we can continue capturing */
+ gst_buffer_unref (*buf);
+ *buf = copy;
+ }
+#endif
+ ret = GST_FLOW_OK;
+ /* nothing, data was inside the buffer when we did _acquire() */
+ goto done;
+ }
+
+ /* buffer not from our pool, grab a frame and copy it into the target */
+ if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp)) != GST_FLOW_OK)
+ goto done;
+
+ /* An empty buffer on capture indicates the end of stream */
+ if (gst_buffer_get_size (tmp) == 0) {
+ gboolean corrupted = GST_BUFFER_FLAG_IS_SET (tmp,
+ GST_BUFFER_FLAG_CORRUPTED);
+
+ gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
+
+ if (corrupted)
+ goto buffer_corrupted;
+ else
+ goto eos;
+ }
+
+ ret = gst_v4l2_buffer_pool_copy_buffer (pool, *buf, tmp);
+
+ /* an queue the buffer again after the copy */
+ gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
+
+ if (ret != GST_FLOW_OK)
+ goto copy_failed;
+ break;
+ }
+
+ case GST_V4L2_IO_USERPTR:
+ {
+ struct UserPtrData *data;
+ GstBuffer *tmp;
+
+ /* Replace our buffer with downstream allocated buffer */
+ data = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
+ GST_V4L2_IMPORT_QUARK);
+ tmp = gst_buffer_ref (data->buffer);
+ _unmap_userptr_frame (data);
+
+ /* Now tmp is writable, copy the flags and timestamp */
+ gst_buffer_copy_into (tmp, *buf,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ gst_buffer_replace (buf, tmp);
+ gst_buffer_unref (tmp);
+ break;
+ }
+
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ {
+ GstBuffer *tmp;
+
+ /* Replace our buffer with downstream allocated buffer */
+ tmp = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
+ GST_V4L2_IMPORT_QUARK);
+
+ gst_buffer_copy_into (tmp, *buf,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ gst_buffer_replace (buf, tmp);
+ gst_buffer_unref (tmp);
+ break;
+ }
+
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ /* playback */
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ /* FIXME, do write() */
+ GST_WARNING_OBJECT (pool, "implement write()");
+ break;
+
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ case GST_V4L2_IO_DMABUF:
+ case GST_V4L2_IO_MMAP:
+ {
+ GstBuffer *to_queue = NULL;
+#ifndef USE_V4L2_TARGET_NV
+ GstV4l2MemoryGroup *group;
+#else
+ GstV4l2MemoryGroup *group = NULL;
+#endif
+ gint index;
+
+ if ((*buf)->pool != bpool)
+ goto copying;
+
+#ifdef USE_V4L2_TARGET_NV
+ if (!gst_v4l2_is_buffer_valid (*buf, &group, pool->obj->is_encode))
+ goto copying;
+#else
+ if (!gst_v4l2_is_buffer_valid (*buf, &group))
+ goto copying;
+#endif
+
+ index = group->buffer.index;
+
+ GST_LOG_OBJECT (pool, "processing buffer %i from our pool", index);
+
+ if (pool->buffers[index] != NULL) {
+ GST_LOG_OBJECT (pool, "buffer %i already queued, copying", index);
+ goto copying;
+ }
+
+ /* we can queue directly */
+ to_queue = gst_buffer_ref (*buf);
+
+ copying:
+ if (to_queue == NULL) {
+ GstBufferPoolAcquireParams params = { 0 };
+
+ GST_LOG_OBJECT (pool, "alloc buffer from our pool");
+
+ /* this can return EOS if all buffers are outstanding which would
+ * be strange because we would expect the upstream element to have
+ * allocated them and returned to us.. */
+ params.flags = GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;
+ ret = gst_buffer_pool_acquire_buffer (bpool, &to_queue, ¶ms);
+ if (ret != GST_FLOW_OK)
+ goto acquire_failed;
+
+ ret = gst_v4l2_buffer_pool_prepare_buffer (pool, to_queue, *buf);
+ if (ret != GST_FLOW_OK) {
+ gst_buffer_unref (to_queue);
+ goto prepare_failed;
+ }
+ }
+
+ if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue)) != GST_FLOW_OK)
+ goto queue_failed;
+
+ /* if we are not streaming yet (this is the first buffer, start
+ * streaming now */
+ if (!gst_v4l2_buffer_pool_streamon (pool)) {
+ /* don't check return value because qbuf would have failed */
+#ifdef USE_V4L2_TARGET_NV
+ gst_v4l2_is_buffer_valid (to_queue, &group, pool->obj->is_encode);
+#else
+ gst_v4l2_is_buffer_valid (to_queue, &group);
+#endif
+
+ /* qbuf has stored to_queue buffer but we are not in
+ * streaming state, so the flush logic won't be performed.
+ * To avoid leaks, flush the allocator and restore the queued
+ * buffer as non-queued */
+ gst_v4l2_allocator_flush (pool->vallocator);
+
+ pool->buffers[group->buffer.index] = NULL;
+
+ gst_mini_object_set_qdata (GST_MINI_OBJECT (to_queue),
+ GST_V4L2_IMPORT_QUARK, NULL, NULL);
+ gst_buffer_unref (to_queue);
+ g_atomic_int_add (&pool->num_queued, -1);
+ goto start_failed;
+ }
+
+ /* Remove our ref, we will still hold this buffer in acquire as needed,
+ * otherwise the pool will think it is outstanding and will refuse to stop. */
+ gst_buffer_unref (to_queue);
+
+#ifndef USE_V4L2_TARGET_NV
+ if (g_atomic_int_get (&pool->num_queued) >= pool->min_latency) {
+#else
+ if (g_atomic_int_get (&pool->num_queued) >= (gint) pool->min_latency) {
+#endif
+ GstBuffer *out;
+ /* all buffers are queued, try to dequeue one and release it back
+ * into the pool so that _acquire can get to it again. */
+ ret = gst_v4l2_buffer_pool_dqbuf (pool, &out);
+ if (ret == GST_FLOW_OK && out->pool == NULL)
+ /* release the rendered buffer back into the pool. This wakes up any
+ * thread waiting for a buffer in _acquire(). */
+ gst_v4l2_buffer_pool_release_buffer (bpool, out);
+ }
+ break;
+ }
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+done:
+ return ret;
+
+ /* ERRORS */
+copy_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to copy buffer");
+ return ret;
+ }
+buffer_corrupted:
+ {
+ GST_WARNING_OBJECT (pool, "Dropping corrupted buffer without payload");
+ gst_buffer_unref (*buf);
+ *buf = NULL;
+ return GST_V4L2_FLOW_CORRUPTED_BUFFER;
+ }
+eos:
+ {
+ GST_DEBUG_OBJECT (pool, "end of stream reached");
+ gst_buffer_unref (*buf);
+ *buf = NULL;
+ return GST_V4L2_FLOW_LAST_BUFFER;
+ }
+acquire_failed:
+ {
+ if (ret == GST_FLOW_FLUSHING)
+ GST_DEBUG_OBJECT (pool, "flushing");
+ else
+ GST_WARNING_OBJECT (pool, "failed to acquire a buffer: %s",
+ gst_flow_get_name (ret));
+ return ret;
+ }
+prepare_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to prepare data");
+ return ret;
+ }
+queue_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to queue buffer");
+ return ret;
+ }
+start_failed:
+ {
+ GST_ERROR_OBJECT (pool, "failed to start streaming");
+ return GST_FLOW_ERROR;
+ }
+}
+
+void
+gst_v4l2_buffer_pool_set_other_pool (GstV4l2BufferPool * pool,
+ GstBufferPool * other_pool)
+{
+ g_return_if_fail (!gst_buffer_pool_is_active (GST_BUFFER_POOL (pool)));
+
+ if (pool->other_pool)
+ gst_object_unref (pool->other_pool);
+ pool->other_pool = gst_object_ref (other_pool);
+}
+
+void
+gst_v4l2_buffer_pool_copy_at_threshold (GstV4l2BufferPool * pool, gboolean copy)
+{
+ GST_OBJECT_LOCK (pool);
+ pool->enable_copy_threshold = copy;
+ GST_OBJECT_UNLOCK (pool);
+}
+
+gboolean
+gst_v4l2_buffer_pool_flush (GstBufferPool * bpool)
+{
+ GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
+ gboolean ret = TRUE;
+
+ gst_v4l2_buffer_pool_streamoff (pool);
+
+ if (!V4L2_TYPE_IS_OUTPUT (pool->obj->type))
+ ret = gst_v4l2_buffer_pool_streamon (pool);
+
+ return ret;
+}
+
+#ifdef USE_V4L2_TARGET_NV
+void
+gst_v4l2_buffer_pool_enable_dynamic_allocation (GstV4l2BufferPool * pool,
+ gboolean enable_dynamic_allocation)
+{
+ GST_DEBUG_OBJECT (pool, "dynamic allocation enable %d", enable_dynamic_allocation);
+
+ GST_OBJECT_LOCK (pool);
+ pool->enable_dynamic_allocation = enable_dynamic_allocation;
+ if (pool->vallocator)
+ gst_v4l2_allocator_enable_dynamic_allocation (pool->vallocator, enable_dynamic_allocation);
+ GST_OBJECT_UNLOCK (pool);
+}
+
+gint
+get_motion_vectors(GstV4l2Object *obj, guint32 bufferIndex,
+ v4l2_ctrl_videoenc_outputbuf_metadata_MV *enc_mv_metadata)
+{
+ v4l2_ctrl_video_metadata metadata;
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ metadata.buffer_index = bufferIndex;
+ metadata.VideoEncMetadataMV = enc_mv_metadata;
+
+ control.id = V4L2_CID_MPEG_VIDEOENC_METADATA_MV;
+ control.string = (gchar *)&metadata;
+
+ if (!GST_V4L2_IS_OPEN (obj))
+ g_print ("V4L2 device is not open\n");
+ ret = obj->ioctl (obj->video_fd, VIDIOC_G_EXT_CTRLS, &ctrls);
+
+ if (ret < 0)
+ g_print ("Error getting dfata\n");
+ return ret;
+}
+
+static void
+report_metadata (GstV4l2Object * obj, guint32 buffer_index,
+ v4l2_ctrl_videodec_outputbuf_metadata * metadata)
+{
+ if (obj->Enable_frame_type_reporting) {
+ switch (metadata->CodecParams.H264DecParams.FrameType) {
+ case 0:
+ g_print ("FrameType = B\n");
+ break;
+ case 1:
+ g_print ("FrameType = P\n");
+ break;
+ case 2:
+ g_print ("FrameType = I\n");
+ if (metadata->CodecParams.H264DecParams.dpbInfo.currentFrame.bIdrFrame) {
+ g_print (" (IDR)\n");
+ }
+ break;
+ }
+ g_print ("nActiveRefFrames = %d\n",
+ metadata->CodecParams.H264DecParams.dpbInfo.nActiveRefFrames);
+ }
+ if (obj->Enable_error_check) {
+ g_print
+ ("ErrorType= %d Decoded MBs= %d Concealed MBs= %d FrameDecodeTime %d\n",
+ metadata->FrameDecStats.DecodeError, metadata->FrameDecStats.DecodedMBs,
+ metadata->FrameDecStats.ConcealedMBs,
+ metadata->FrameDecStats.FrameDecodeTime);
+ }
+}
+
+static void
+v4l2_video_dec_get_enable_frame_type_reporting (GstV4l2Object * obj,
+ guint32 buffer_index, v4l2_ctrl_videodec_outputbuf_metadata * dec_metadata)
+{
+ v4l2_ctrl_video_metadata metadata;
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret = -1;
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ metadata.buffer_index = buffer_index;
+ metadata.VideoDecMetadata = dec_metadata;
+
+ control.id = V4L2_CID_MPEG_VIDEODEC_METADATA;
+ control.string = (gchar *) &metadata;
+
+ ret = obj->ioctl (obj->video_fd, VIDIOC_G_EXT_CTRLS, &ctrls);
+ if (ret < 0)
+ g_print ("Error while getting report metadata\n");
+}
+#endif
+
diff --git a/gst-v4l2/gstv4l2bufferpool.h b/gst-v4l2/gstv4l2bufferpool.h
new file mode 100644
index 0000000..70242db
--- /dev/null
+++ b/gst-v4l2/gstv4l2bufferpool.h
@@ -0,0 +1,140 @@
+/* GStreamer
+ *
+ * Copyright (C) 2001-2002 Ronald Bultje
+ * 2006 Edgard Lima
+ * 2009 Texas Instruments, Inc - http://www.ti.com/
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * gstv4l2bufferpool.h V4L2 buffer pool class
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __GST_V4L2_BUFFER_POOL_H__
+#define __GST_V4L2_BUFFER_POOL_H__
+
+#include
+
+/* The structures are renamed as the name conflicts with the
+ * OSS v4l2 library structures. */
+#ifdef USE_V4L2_TARGET_NV
+#define GstV4l2BufferPool GstNvV4l2BufferPool
+#define GstV4l2BufferPoolClass GstNvV4l2BufferPoolClass
+#endif
+
+typedef struct _GstV4l2BufferPool GstV4l2BufferPool;
+typedef struct _GstV4l2BufferPoolClass GstV4l2BufferPoolClass;
+typedef struct _GstV4l2Meta GstV4l2Meta;
+
+#include "gstv4l2object.h"
+#include "gstv4l2allocator.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_V4L2_BUFFER_POOL (gst_v4l2_buffer_pool_get_type())
+#define GST_IS_V4L2_BUFFER_POOL(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_V4L2_BUFFER_POOL))
+#define GST_V4L2_BUFFER_POOL(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_V4L2_BUFFER_POOL, GstV4l2BufferPool))
+#define GST_V4L2_BUFFER_POOL_CAST(obj) ((GstV4l2BufferPool*)(obj))
+
+/* This flow return is used to indicated that the last buffer of a
+ * drain or a resoltuion change has been found. This should normally
+ * only occure for mem-2-mem devices. */
+#define GST_V4L2_FLOW_LAST_BUFFER GST_FLOW_CUSTOM_SUCCESS
+
+/* This flow return is used to indicated that the returned buffer was marked
+ * with the error flag and had no payload. This error should be recovered by
+ * simply waiting for next buffer. */
+#define GST_V4L2_FLOW_CORRUPTED_BUFFER GST_FLOW_CUSTOM_SUCCESS_1
+
+struct _GstV4l2BufferPool
+{
+ GstBufferPool parent;
+
+ GstV4l2Object *obj; /* the v4l2 object */
+ gint video_fd; /* a dup(2) of the v4l2object's video_fd */
+ GstPoll *poll; /* a poll for video_fd */
+ GstPollFD pollfd;
+ gboolean can_poll_device;
+
+ gboolean empty;
+ GCond empty_cond;
+
+ GstV4l2Allocator *vallocator;
+ GstAllocator *allocator;
+ GstAllocationParams params;
+ GstBufferPool *other_pool;
+ guint size;
+ GstVideoInfo caps_info; /* Default video information */
+
+ gboolean add_videometa; /* set if video meta should be added */
+ gboolean enable_copy_threshold; /* If copy_threshold should be set */
+
+ guint min_latency; /* number of buffers we will hold */
+ guint max_latency; /* number of buffers we can hold */
+ guint num_queued; /* number of buffers queued in the driver */
+ guint num_allocated; /* number of buffers allocated */
+ guint copy_threshold; /* when our pool runs lower, start handing out copies */
+
+ gboolean streaming;
+ gboolean flushing;
+
+#ifdef USE_V4L2_TARGET_NV
+ GstBuffer *buffers[NV_VIDEO_MAX_FRAME];
+#else
+ GstBuffer *buffers[VIDEO_MAX_FRAME];
+#endif
+
+ /* signal handlers */
+ gulong group_released_handler;
+
+ /* Control to warn only once on buggy feild driver bug */
+ gboolean has_warned_on_buggy_field;
+
+#ifdef USE_V4L2_TARGET_NV
+ gboolean enable_dynamic_allocation; /* If dynamic_allocation should be set */
+#endif
+};
+
+struct _GstV4l2BufferPoolClass
+{
+ GstBufferPoolClass parent_class;
+};
+
+GType gst_v4l2_buffer_pool_get_type (void);
+
+GstBufferPool * gst_v4l2_buffer_pool_new (GstV4l2Object *obj, GstCaps *caps);
+
+GstFlowReturn gst_v4l2_buffer_pool_process (GstV4l2BufferPool * bpool, GstBuffer ** buf);
+
+void gst_v4l2_buffer_pool_set_other_pool (GstV4l2BufferPool * pool,
+ GstBufferPool * other_pool);
+void gst_v4l2_buffer_pool_copy_at_threshold (GstV4l2BufferPool * pool,
+ gboolean copy);
+
+gboolean gst_v4l2_buffer_pool_flush (GstBufferPool *pool);
+
+#ifdef USE_V4L2_TARGET_NV
+void
+gst_v4l2_buffer_pool_enable_dynamic_allocation (GstV4l2BufferPool * pool,
+ gboolean enable_dynamic_allocation);
+gint
+get_motion_vectors (GstV4l2Object *obj, guint32 bufferIndex,
+ v4l2_ctrl_videoenc_outputbuf_metadata_MV *enc_mv_metadata);
+#endif
+
+G_END_DECLS
+
+#endif /*__GST_V4L2_BUFFER_POOL_H__ */
diff --git a/gst-v4l2/gstv4l2h264enc.c b/gst-v4l2/gstv4l2h264enc.c
new file mode 100644
index 0000000..3cc9874
--- /dev/null
+++ b/gst-v4l2/gstv4l2h264enc.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2014 SUMOMO Computer Association
+ * Author: ayaka
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+
+#include "gstv4l2object.h"
+#include "gstv4l2h264enc.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_h264_enc_debug);
+#define GST_CAT_DEFAULT gst_v4l2_h264_enc_debug
+
+#ifdef USE_V4L2_TARGET_NV
+static GType
+gst_v4l2_videnc_profile_get_type (void);
+
+#define GST_TYPE_V4L2_VID_ENC_PROFILE (gst_v4l2_videnc_profile_get_type ())
+
+/* prototypes */
+gboolean gst_v4l2_h264_enc_slice_header_spacing (GstV4l2Object * v4l2object,
+ guint32 slice_header_spacing, enum v4l2_enc_slice_length_type slice_length_type);
+gboolean set_v4l2_h264_encoder_properties (GstVideoEncoder * encoder);
+#endif
+
+#ifdef USE_V4L2_TARGET_NV
+static GstStaticCaps src_template_caps =
+GST_STATIC_CAPS ("video/x-h264, stream-format=(string) byte-stream, "
+ "alignment=(string) { au, nal }");
+#else
+static GstStaticCaps src_template_caps =
+GST_STATIC_CAPS ("video/x-h264, stream-format=(string) byte-stream, "
+ "alignment=(string) au");
+#endif
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+#ifdef USE_V4L2_TARGET_NV
+ PROP_PROFILE,
+ PROP_INSERT_VUI,
+ PROP_EXTENDED_COLORFORMAT,
+ PROP_INSERT_SPS_PPS,
+ PROP_INSERT_AUD,
+ PROP_NUM_BFRAMES,
+ PROP_ENTROPY_CODING,
+ PROP_BIT_PACKETIZATION,
+ PROP_SLICE_INTRA_REFRESH,
+ PROP_SLICE_INTRA_REFRESH_INTERVAL,
+ PROP_TWO_PASS_CBR,
+ PROP_ENABLE_MV_META,
+ PROP_SLICE_HEADER_SPACING,
+ PROP_NUM_REFERENCE_FRAMES,
+ PROP_PIC_ORDER_CNT_TYPE,
+ PROP_ENABLE_LOSSLESS_ENC
+#endif
+/* TODO add H264 controls
+ * PROP_I_FRAME_QP,
+ * PROP_P_FRAME_QP,
+ * PROP_B_FRAME_QP,
+ * PROP_MIN_QP,
+ * PROP_MAX_QP,
+ * PROP_8x8_TRANSFORM,
+ * PROP_CPB_SIZE,
+ * PROP_ENTROPY_MODE,
+ * PROP_I_PERIOD,
+ * PROP_LOOP_FILTER_ALPHA,
+ * PROP_LOOP_FILTER_BETA,
+ * PROP_LOOP_FILTER_MODE,
+ * PROP_VUI_EXT_SAR_HEIGHT,
+ * PROP_VUI_EXT_SAR_WIDTH,
+ * PROP_VUI_SAR_ENABLED,
+ * PROP_VUI_SAR_IDC,
+ * PROP_SEI_FRAME_PACKING,
+ * PROP_SEI_FP_CURRENT_FRAME_0,
+ * PROP_SEI_FP_ARRANGEMENT_TYP,
+ * ...
+ * */
+};
+
+#ifdef USE_V4L2_TARGET_NV
+#define DEFAULT_PROFILE V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE
+#define DEFAULT_NUM_B_FRAMES 0
+#define MAX_NUM_B_FRAMES 2
+#define DEFAULT_NUM_REFERENCE_FRAMES 1
+#define MAX_NUM_REFERENCE_FRAMES 8
+#define DEFAULT_BIT_PACKETIZATION FALSE
+#define DEFAULT_SLICE_HEADER_SPACING 0
+#define DEFAULT_INTRA_REFRESH_FRAME_INTERVAL 60
+#define DEFAULT_PIC_ORDER_CNT_TYPE 0
+#endif
+
+#define gst_v4l2_h264_enc_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2H264Enc, gst_v4l2_h264_enc, GST_TYPE_V4L2_VIDEO_ENC);
+
+static void
+gst_v4l2_h264_enc_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ /* TODO */
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2H264Enc *self = GST_V4L2_H264_ENC (object);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_PROFILE:
+ self->profile = g_value_get_enum (value);
+ if (GST_V4L2_IS_OPEN (video_enc->v4l2output)) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE, self->profile)) {
+ g_print ("S_EXT_CTRLS for H264_PROFILE failed\n");
+ }
+ }
+ break;
+ case PROP_NUM_BFRAMES:
+ self->nBFrames = g_value_get_uint (value);
+ if (self->nBFrames && self->nRefFrames == DEFAULT_NUM_REFERENCE_FRAMES)
+ {
+ self->nRefFrames = 2;
+ g_print ("Minimum 2 Ref-Frames are required for B-frames encoding\n");
+ }
+ break;
+ case PROP_INSERT_SPS_PPS:
+ self->insert_sps_pps = g_value_get_boolean (value);
+ break;
+ case PROP_INSERT_AUD:
+ self->insert_aud = g_value_get_boolean (value);
+ break;
+ case PROP_INSERT_VUI:
+ self->insert_vui = g_value_get_boolean (value);
+ break;
+ /* extended-colorformat property is available for cuvid path only*/
+ case PROP_EXTENDED_COLORFORMAT:
+ self->extended_colorformat = g_value_get_boolean (value);
+ break;
+ case PROP_ENTROPY_CODING:
+ self->disable_cabac_entropy_coding = g_value_get_boolean (value);
+ break;
+ case PROP_BIT_PACKETIZATION:
+ self->bit_packetization = g_value_get_boolean (value);
+ break;
+ case PROP_SLICE_HEADER_SPACING:
+ self->slice_header_spacing = g_value_get_uint64 (value);
+ if (self->slice_header_spacing)
+ video_enc->slice_output = TRUE;
+ else
+ video_enc->slice_output = FALSE;
+ break;
+ case PROP_SLICE_INTRA_REFRESH_INTERVAL:
+ self->SliceIntraRefreshInterval = g_value_get_uint (value);
+ break;
+ case PROP_TWO_PASS_CBR:
+ self->EnableTwopassCBR = g_value_get_boolean (value);
+ break;
+ case PROP_ENABLE_MV_META:
+ self->EnableMVBufferMeta = g_value_get_boolean (value);
+ video_enc->v4l2capture->enableMVBufferMeta = g_value_get_boolean (value);
+ break;
+ case PROP_NUM_REFERENCE_FRAMES:
+ self->nRefFrames = g_value_get_uint (value);
+ break;
+ case PROP_PIC_ORDER_CNT_TYPE:
+ self->poc_type = g_value_get_uint (value);
+ break;
+ case PROP_ENABLE_LOSSLESS_ENC:
+ self->enableLossless = g_value_get_boolean (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+#endif
+}
+
+static void
+gst_v4l2_h264_enc_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ /* TODO */
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2H264Enc *self = GST_V4L2_H264_ENC (object);
+
+ switch (prop_id) {
+ case PROP_PROFILE:
+ g_value_set_enum (value, self->profile);
+ break;
+ case PROP_NUM_BFRAMES:
+ g_value_set_uint (value, self->nBFrames);
+ break;
+ case PROP_INSERT_SPS_PPS:
+ g_value_set_boolean (value, self->insert_sps_pps);
+ break;
+ case PROP_INSERT_AUD:
+ g_value_set_boolean (value, self->insert_aud);
+ break;
+ case PROP_INSERT_VUI:
+ g_value_set_boolean (value, self->insert_vui);
+ break;
+ /* extended-colorformat property is available for cuvid path only*/
+ case PROP_EXTENDED_COLORFORMAT:
+ g_value_set_boolean (value, self->extended_colorformat);
+ break;
+ case PROP_ENTROPY_CODING:
+ g_value_set_boolean (value, self->disable_cabac_entropy_coding);
+ break;
+ case PROP_BIT_PACKETIZATION:
+ g_value_set_boolean (value, self->bit_packetization);
+ break;
+ case PROP_SLICE_HEADER_SPACING:
+ g_value_set_uint64 (value, self->slice_header_spacing);
+ break;
+ case PROP_SLICE_INTRA_REFRESH_INTERVAL:
+ g_value_set_uint (value, self->SliceIntraRefreshInterval);
+ break;
+ case PROP_TWO_PASS_CBR:
+ g_value_set_boolean (value, self->EnableTwopassCBR);
+ break;
+ case PROP_ENABLE_MV_META:
+ g_value_set_boolean (value, self->EnableMVBufferMeta);
+ break;
+ case PROP_NUM_REFERENCE_FRAMES:
+ g_value_set_uint (value, self->nRefFrames);
+ break;
+ case PROP_PIC_ORDER_CNT_TYPE:
+ g_value_set_uint (value, self->poc_type);
+ break;
+ case PROP_ENABLE_LOSSLESS_ENC:
+ g_value_set_boolean (value, self->enableLossless);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+#endif
+}
+
+static gint
+v4l2_profile_from_string (const gchar * profile)
+{
+ gint v4l2_profile = -1;
+
+ if (g_str_equal (profile, "baseline")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+ } else if (g_str_equal (profile, "constrained-baseline")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE;
+ } else if (g_str_equal (profile, "main")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
+ } else if (g_str_equal (profile, "extended")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
+ } else if (g_str_equal (profile, "high")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+ } else if (g_str_equal (profile, "high-10")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10;
+ } else if (g_str_equal (profile, "high-4:2:2")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422;
+ } else if (g_str_equal (profile, "high-4:4:4")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE;
+ } else if (g_str_equal (profile, "high-10-intra")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA;
+ } else if (g_str_equal (profile, "high-4:2:2-intra")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA;
+ } else if (g_str_equal (profile, "high-4:4:4-intra")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA;
+ } else if (g_str_equal (profile, "cavlc-4:4:4-intra")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA;
+ } else if (g_str_equal (profile, "scalable-baseline")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE;
+ } else if (g_str_equal (profile, "scalable-high")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH;
+ } else if (g_str_equal (profile, "scalable-high-intra")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA;
+ } else if (g_str_equal (profile, "stereo-high")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH;
+ } else if (g_str_equal (profile, "multiview-high")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH;
+ } else {
+ GST_WARNING ("Unsupported profile string '%s'", profile);
+ }
+
+ return v4l2_profile;
+}
+
+static const gchar *
+v4l2_profile_to_string (gint v4l2_profile)
+{
+ switch (v4l2_profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return "baseline";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ return "constrained-baseline";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return "main";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return "extended";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return "high";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10:
+ return "high-10";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422:
+ return "high-4:2:2";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE:
+ return "high-4:4:4";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA:
+ return "high-10-intra";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA:
+ return "high-4:2:2-intra";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA:
+ return "high-4:4:4-intra";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA:
+ return "cavlc-4:4:4-intra";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE:
+ return "scalable-baseline";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH:
+ return "scalable-high";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA:
+ return "scalable-high-intra";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH:
+ return "stereo-high";
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH:
+ return "multiview-high";
+ default:
+ GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
+ break;
+ }
+
+ return NULL;
+}
+
+static gint
+v4l2_level_from_string (const gchar * level)
+{
+ gint v4l2_level = -1;
+
+ if (g_str_equal (level, "1"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ else if (g_str_equal (level, "1b"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ else if (g_str_equal (level, "1.1"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ else if (g_str_equal (level, "1.2"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
+ else if (g_str_equal (level, "1.3"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
+ else if (g_str_equal (level, "2"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
+ else if (g_str_equal (level, "2.1"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ else if (g_str_equal (level, "2.2"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ else if (g_str_equal (level, "3"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
+ else if (g_str_equal (level, "3.1"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ else if (g_str_equal (level, "3.2"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ else if (g_str_equal (level, "4"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ else if (g_str_equal (level, "4.1"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ else if (g_str_equal (level, "4.2"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ else if (g_str_equal (level, "5"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ else if (g_str_equal (level, "5.1"))
+ v4l2_level = V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+ else
+ GST_WARNING ("Unsupported level '%s'", level);
+
+ return v4l2_level;
+}
+
+static const gchar *
+v4l2_level_to_string (gint v4l2_level)
+{
+ switch (v4l2_level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return "1";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return "1b";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return "1.1";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return "1.2";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return "1.3";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return "2";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return "2.1";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return "2.2";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return "3.0";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return "3.1";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return "3.2";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return "4";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return "4.1";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return "4.2";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return "5";
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return "5.1";
+ default:
+ GST_WARNING ("Unsupported V4L2 level %i", v4l2_level);
+ break;
+ }
+
+ return NULL;
+}
+
+static void
+gst_v4l2_h264_enc_init (GstV4l2H264Enc * self)
+{
+#ifdef USE_V4L2_TARGET_NV
+ self->profile = DEFAULT_PROFILE;
+ self->insert_sps_pps = FALSE;
+ self->insert_aud = FALSE;
+ self->insert_vui = FALSE;
+ self->enableLossless = FALSE;
+
+ if (is_cuvid == TRUE)
+ self->extended_colorformat = FALSE;
+
+ self->nBFrames = 0;
+ self->nRefFrames = 1;
+ self->bit_packetization = DEFAULT_BIT_PACKETIZATION;
+ self->slice_header_spacing = DEFAULT_SLICE_HEADER_SPACING;
+ self->poc_type = DEFAULT_PIC_ORDER_CNT_TYPE;
+#endif
+}
+
+static void
+gst_v4l2_h264_enc_class_init (GstV4l2H264EncClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstV4l2VideoEncClass *baseclass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ baseclass = (GstV4l2VideoEncClass *) (klass);
+
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_h264_enc_debug, "v4l2h264enc", 0,
+ "V4L2 H.264 Encoder");
+
+ gst_element_class_set_static_metadata (element_class,
+ "V4L2 H.264 Encoder",
+ "Codec/Encoder/Video",
+ "Encode H.264 video streams via V4L2 API", "ayaka ");
+
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_h264_enc_set_property);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_h264_enc_get_property);
+
+#ifdef USE_V4L2_TARGET_NV
+ g_object_class_install_property (gobject_class, PROP_PROFILE,
+ g_param_spec_enum ("profile", "profile",
+ "Set profile for v4l2 encode",
+ GST_TYPE_V4L2_VID_ENC_PROFILE, DEFAULT_PROFILE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ if (is_cuvid == TRUE) {
+ g_object_class_install_property (gobject_class, PROP_EXTENDED_COLORFORMAT,
+ g_param_spec_boolean ("extended-colorformat",
+ "Set Extended ColorFormat",
+ "Set Extended ColorFormat pixel values 0 to 255 in VUI Info",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ } else if (is_cuvid == FALSE) {
+ g_object_class_install_property (gobject_class, PROP_PIC_ORDER_CNT_TYPE,
+ g_param_spec_uint ("poc-type",
+ "Picture Order Count type",
+ "Set Picture Order Count type value",
+ 0, 2, DEFAULT_PIC_ORDER_CNT_TYPE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_INSERT_VUI,
+ g_param_spec_boolean ("insert-vui",
+ "Insert H.264 VUI",
+ "Insert H.264 VUI(Video Usability Information) in SPS",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_INSERT_SPS_PPS,
+ g_param_spec_boolean ("insert-sps-pps",
+ "Insert H.264 SPS, PPS",
+ "Insert H.264 SPS, PPS at every IDR frame",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_INSERT_AUD,
+ g_param_spec_boolean ("insert-aud",
+ "Insert H.264 AUD",
+ "Insert H.264 Access Unit Delimiter(AUD)",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_NUM_BFRAMES,
+ g_param_spec_uint ("num-B-Frames",
+ "B Frames between two reference frames",
+ "Number of B Frames between two reference frames (not recommended)",
+ 0, MAX_NUM_B_FRAMES, DEFAULT_NUM_B_FRAMES,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_ENTROPY_CODING,
+ g_param_spec_boolean ("disable-cabac",
+ "Set Entropy Coding",
+ "Set Entropy Coding Type CAVLC(TRUE) or CABAC(FALSE)",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_BIT_PACKETIZATION,
+ g_param_spec_boolean ("bit-packetization", "Bit Based Packetization",
+ "Whether or not Packet size is based upon Number Of bits",
+ DEFAULT_BIT_PACKETIZATION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_SLICE_HEADER_SPACING,
+ g_param_spec_uint64 ("slice-header-spacing", "Slice Header Spacing",
+ "Slice Header Spacing number of macroblocks/bits in one packet",
+ 0, G_MAXUINT64, DEFAULT_SLICE_HEADER_SPACING,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_MV_META,
+ g_param_spec_boolean ("EnableMVBufferMeta",
+ "Enable Motion Vector Meta data",
+ "Enable Motion Vector Meta data for encoding",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class,
+ PROP_SLICE_INTRA_REFRESH_INTERVAL,
+ g_param_spec_uint ("SliceIntraRefreshInterval",
+ "SliceIntraRefreshInterval", "Set SliceIntraRefreshInterval", 0,
+ G_MAXUINT, DEFAULT_INTRA_REFRESH_FRAME_INTERVAL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_TWO_PASS_CBR,
+ g_param_spec_boolean ("EnableTwopassCBR",
+ "Enable Two pass CBR",
+ "Enable two pass CBR while encoding",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_NUM_REFERENCE_FRAMES,
+ g_param_spec_uint ("num-Ref-Frames",
+ "Sets the number of reference frames for encoder",
+ "Number of Reference Frames for encoder",
+ 0, MAX_NUM_REFERENCE_FRAMES, DEFAULT_NUM_REFERENCE_FRAMES,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_LOSSLESS_ENC,
+ g_param_spec_boolean ("enable-lossless",
+ "Enable Lossless encoding",
+ "Enable lossless encoding for YUV444",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+ }
+#endif
+ baseclass->codec_name = "H264";
+ baseclass->profile_cid = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
+ baseclass->profile_to_string = v4l2_profile_to_string;
+ baseclass->profile_from_string = v4l2_profile_from_string;
+ baseclass->level_cid = V4L2_CID_MPEG_VIDEO_H264_LEVEL;
+ baseclass->level_to_string = v4l2_level_to_string;
+ baseclass->level_from_string = v4l2_level_from_string;
+#ifdef USE_V4L2_TARGET_NV
+ baseclass->set_encoder_properties = set_v4l2_h264_encoder_properties;
+#endif
+}
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_h264_enc (GstCaps * sink_caps, GstCaps * src_caps)
+{
+ return gst_v4l2_is_video_enc (sink_caps, src_caps,
+ gst_static_caps_get (&src_template_caps));
+}
+
+void
+gst_v4l2_h264_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gst_v4l2_video_enc_register (plugin, GST_TYPE_V4L2_H264_ENC,
+ "h264", basename, device_path, sink_caps,
+ gst_static_caps_get (&src_template_caps), src_caps);
+}
+
+#ifdef USE_V4L2_TARGET_NV
+static GType
+gst_v4l2_videnc_profile_get_type (void)
+{
+ static volatile gsize profile = 0;
+ static const GEnumValue profile_type[] = {
+ {V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ "GST_V4L2_H264_VIDENC_BASELINE_PROFILE",
+ "Baseline"},
+ {V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, "GST_V4L2_H264_VIDENC_MAIN_PROFILE",
+ "Main"},
+ {V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, "GST_V4L2_H264_VIDENC_HIGH_PROFILE",
+ "High"},
+ {V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE, "GST_V4L2_H264_VIDENC_HIGH_444_PREDICTIVE",
+ "High444"},
+ {0, NULL, NULL}
+ };
+
+ if (g_once_init_enter (&profile)) {
+ GType tmp =
+ g_enum_register_static ("GstV4l2VideoEncProfileType", profile_type);
+ g_once_init_leave (&profile, tmp);
+ }
+ return (GType) profile;
+}
+
+gboolean
+gst_v4l2_h264_enc_slice_header_spacing (GstV4l2Object * v4l2object,
+ guint32 slice_header_spacing, enum v4l2_enc_slice_length_type slice_length_type)
+{
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+ v4l2_enc_slice_length_param param =
+ { slice_length_type, slice_header_spacing };
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = V4L2_CID_MPEG_VIDEOENC_ENABLE_SLICE_LEVEL_ENCODE;
+ control.value = TRUE;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting spacing and packetization\n");
+ return FALSE;
+ }
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = V4L2_CID_MPEG_VIDEOENC_SLICE_LENGTH_PARAM;
+ control.string = (gchar *) ¶m;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting spacing and packetization\n");
+ return FALSE;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (v4l2object->type)) {
+ v4l2object->format.fmt.pix_mp.plane_fmt[0].sizeimage = slice_header_spacing;
+ } else {
+ v4l2object->format.fmt.pix.sizeimage = slice_header_spacing;
+ }
+
+ return TRUE;
+}
+
+gboolean
+set_v4l2_h264_encoder_properties (GstVideoEncoder * encoder)
+{
+ GstV4l2H264Enc *self = GST_V4L2_H264_ENC (encoder);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (encoder);
+
+ if (!GST_V4L2_IS_OPEN (video_enc->v4l2output)) {
+ g_print ("V4L2 device is not open\n");
+ return FALSE;
+ }
+
+ if (self->profile) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ self->profile)) {
+ g_print ("S_EXT_CTRLS for H264_PROFILE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->nBFrames) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_NUM_BFRAMES,
+ self->nBFrames)) {
+ g_print ("S_EXT_CTRLS for NUM_BFRAMES failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->insert_vui) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_INSERT_VUI, 1)) {
+ g_print ("S_EXT_CTRLS for INSERT_VUI failed\n");
+ return FALSE;
+ }
+ }
+
+ if (is_cuvid == TRUE) {
+ if (self->extended_colorformat) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_EXTEDED_COLORFORMAT, 1)) {
+ g_print ("S_EXT_CTRLS for EXTENDED_COLORFORMAT failed\n");
+ return FALSE;
+ }
+ }
+ }
+ if (self->insert_aud) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_INSERT_AUD, 1)) {
+ g_print ("S_EXT_CTRLS for INSERT_AUD failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->insert_sps_pps) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_INSERT_SPS_PPS_AT_IDR, 1)) {
+ g_print ("S_EXT_CTRLS for SPS_PPS_AT_IDR failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->disable_cabac_entropy_coding) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC)) {
+ g_print ("S_EXT_CTRLS for ENTROPY_MODE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->slice_header_spacing) {
+ enum v4l2_enc_slice_length_type slice_length_type = V4L2_ENC_SLICE_LENGTH_TYPE_MBLK;
+ if (self->bit_packetization) {
+ slice_length_type = V4L2_ENC_SLICE_LENGTH_TYPE_BITS;
+ }
+ if (!gst_v4l2_h264_enc_slice_header_spacing (video_enc->v4l2capture,
+ self->slice_header_spacing,
+ slice_length_type)) {
+ g_print ("S_EXT_CTRLS for SLICE_LENGTH_PARAM failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->EnableMVBufferMeta) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_ENABLE_METADATA_MV,
+ self->EnableMVBufferMeta)) {
+ g_print ("S_EXT_CTRLS for ENABLE_METADATA_MV failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->SliceIntraRefreshInterval) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM,
+ self->SliceIntraRefreshInterval)) {
+ g_print ("S_EXT_CTRLS for SLICE_INTRAREFRESH_PARAM failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->EnableTwopassCBR) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_TWO_PASS_CBR, 1)) {
+ g_print ("S_EXT_CTRLS for TWO_PASS_CBR failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->nRefFrames) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES,
+ self->nRefFrames)) {
+ g_print ("S_EXT_CTRLS for NUM_REFERENCE_FRAMES failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->poc_type) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_POC_TYPE, self->poc_type)) {
+ g_print ("S_EXT_CTRLS for POC_TYPE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->enableLossless) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_ENABLE_LOSSLESS, self->enableLossless)) {
+ g_print ("S_EXT_CTRLS for ENABLE_LOSSLESS failed\n");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+#endif
diff --git a/gst-v4l2/gstv4l2h264enc.h b/gst-v4l2/gstv4l2h264enc.h
new file mode 100644
index 0000000..db2f2a5
--- /dev/null
+++ b/gst-v4l2/gstv4l2h264enc.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014 SUMOMO Computer Association.
+ * Author: ayaka
+ * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_H264_ENC_H__
+#define __GST_V4L2_H264_ENC_H__
+
+#include
+#include "gstv4l2videoenc.h"
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_H264_ENC \
+ (gst_v4l2_h264_enc_get_type())
+#define GST_V4L2_H264_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_H264_ENC,GstV4l2H264Enc))
+#define GST_V4L2_H264_ENC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_H264_ENC,GstV4l2H264EncClass))
+#define GST_IS_V4L2_H264_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_H264_ENC))
+#define GST_IS_V4L2_H264_ENC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_H264_ENC))
+typedef struct _GstV4l2H264Enc GstV4l2H264Enc;
+typedef struct _GstV4l2H264EncClass GstV4l2H264EncClass;
+
+struct _GstV4l2H264Enc
+{
+ GstV4l2VideoEnc parent;
+#ifdef USE_V4L2_TARGET_NV
+ guint profile;
+ guint nBFrames;
+ guint nRefFrames;
+ gboolean insert_sps_pps;
+ gboolean insert_aud;
+ gboolean insert_vui;
+ gboolean extended_colorformat;
+ gboolean EnableTwopassCBR;
+ gboolean SliceIntraRefreshEnable;
+ guint SliceIntraRefreshInterval;
+ gboolean disable_cabac_entropy_coding;
+ gboolean bit_packetization;
+ guint32 slice_header_spacing;
+ gboolean EnableMVBufferMeta;
+ guint poc_type;
+ gboolean enableLossless;
+#endif
+};
+
+struct _GstV4l2H264EncClass
+{
+ GstV4l2VideoEncClass parent_class;
+};
+
+GType gst_v4l2_h264_enc_get_type (void);
+
+gboolean gst_v4l2_is_h264_enc (GstCaps * sink_caps, GstCaps * src_caps);
+
+void gst_v4l2_h264_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);
+
+G_END_DECLS
+#endif /* __GST_V4L2_H264_ENC_H__ */
diff --git a/gst-v4l2/gstv4l2h265enc.c b/gst-v4l2/gstv4l2h265enc.c
new file mode 100644
index 0000000..b6666f5
--- /dev/null
+++ b/gst-v4l2/gstv4l2h265enc.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+
+#include "gstv4l2object.h"
+#include "gstv4l2h265enc.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_h265_enc_debug);
+#define GST_CAT_DEFAULT gst_v4l2_h265_enc_debug
+
+
+static GstStaticCaps src_template_caps =
+GST_STATIC_CAPS ("video/x-h265, stream-format=(string) byte-stream, "
+ "alignment=(string) au");
+
+static GType
+gst_v4l2_videnc_profile_get_type (void);
+
+#define GST_TYPE_V4L2_VID_ENC_PROFILE (gst_v4l2_videnc_profile_get_type ())
+
+/* prototypes */
+gboolean set_v4l2_h265_encoder_properties (GstVideoEncoder * encoder);
+gboolean gst_v4l2_h265_enc_slice_header_spacing (GstV4l2Object * v4l2object,
+ guint32 slice_header_spacing, enum v4l2_enc_slice_length_type slice_length_type);
+void set_h265_video_enc_property (GstV4l2Object * v4l2object, guint label,
+ gint param);
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+ PROP_INSERT_SPS_PPS,
+ PROP_PROFILE,
+ PROP_INSERT_VUI,
+ PROP_EXTENDED_COLORFORMAT,
+ PROP_INSERT_AUD,
+ PROP_BIT_PACKETIZATION,
+ PROP_SLICE_HEADER_SPACING,
+ PROP_SLICE_INTRA_REFRESH_INTERVAL,
+ PROP_TWO_PASS_CBR,
+ PROP_ENABLE_MV_META,
+ PROP_NUM_BFRAMES,
+ PROP_NUM_REFERENCE_FRAMES,
+ PROP_ENABLE_LOSSLESS_ENC
+};
+
+#define DEFAULT_PROFILE V4L2_MPEG_VIDEO_H265_PROFILE_MAIN
+#define DEFAULT_BIT_PACKETIZATION FALSE
+#define DEFAULT_SLICE_HEADER_SPACING 0
+#define DEFAULT_INTRA_REFRESH_FRAME_INTERVAL 60
+#define DEFAULT_NUM_B_FRAMES 0
+#define MAX_NUM_B_FRAMES 2
+#define DEFAULT_NUM_REFERENCE_FRAMES 1
+#define MAX_NUM_REFERENCE_FRAMES 8
+
+#define gst_v4l2_h265_enc_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2H265Enc, gst_v4l2_h265_enc, GST_TYPE_V4L2_VIDEO_ENC);
+
+static void
+gst_v4l2_h265_enc_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstV4l2H265Enc *self = GST_V4L2_H265_ENC (object);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_INSERT_SPS_PPS:
+ self->insert_sps_pps = g_value_get_boolean (value);
+ break;
+ case PROP_PROFILE:
+ self->profile = g_value_get_enum (value);
+ if (GST_V4L2_IS_OPEN(video_enc->v4l2output)) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H265_PROFILE, self->profile)) {
+ g_print ("S_EXT_CTRLS for H265_PROFILE failed\n");
+ }
+ }
+ break;
+ case PROP_INSERT_AUD:
+ self->insert_aud = g_value_get_boolean (value);
+ break;
+ case PROP_INSERT_VUI:
+ self->insert_vui = g_value_get_boolean (value);
+ break;
+ /* extended-colorformat property is available for cuvid path only*/
+ case PROP_EXTENDED_COLORFORMAT:
+ self->extended_colorformat = g_value_get_boolean (value);
+ break;
+ case PROP_BIT_PACKETIZATION:
+ self->bit_packetization = g_value_get_boolean (value);
+ break;
+ case PROP_SLICE_HEADER_SPACING:
+ self->slice_header_spacing = g_value_get_uint64 (value);
+ break;
+ case PROP_SLICE_INTRA_REFRESH_INTERVAL:
+ self->SliceIntraRefreshInterval = g_value_get_uint (value);
+ break;
+ case PROP_TWO_PASS_CBR:
+ self->EnableTwopassCBR = g_value_get_boolean (value);
+ break;
+ case PROP_ENABLE_MV_META:
+ self->EnableMVBufferMeta = g_value_get_boolean (value);
+ video_enc->v4l2capture->enableMVBufferMeta = g_value_get_boolean (value);
+ break;
+ case PROP_NUM_BFRAMES:
+ self->nBFrames = g_value_get_uint (value);
+ if (self->nBFrames && (self->nRefFrames == DEFAULT_NUM_REFERENCE_FRAMES)) {
+ // Minimum 2 Ref-Frames are required for B-frames encoding
+ self->nRefFrames = 2;
+ }
+ break;
+ case PROP_NUM_REFERENCE_FRAMES:
+ self->nRefFrames = g_value_get_uint (value);
+ break;
+ case PROP_ENABLE_LOSSLESS_ENC:
+ self->enableLossless = g_value_get_boolean (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_v4l2_h265_enc_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstV4l2H265Enc *self = GST_V4L2_H265_ENC (object);
+
+ switch (prop_id) {
+ case PROP_INSERT_SPS_PPS:
+ g_value_set_boolean (value, self->insert_sps_pps);
+ break;
+ case PROP_PROFILE:
+ g_value_set_enum (value, self->profile);
+ break;
+ case PROP_INSERT_AUD:
+ g_value_set_boolean (value, self->insert_aud);
+ break;
+ case PROP_INSERT_VUI:
+ g_value_set_boolean (value, self->insert_vui);
+ break;
+ /* extended-colorformat property is available for cuvid path only*/
+ case PROP_EXTENDED_COLORFORMAT:
+ g_value_set_boolean (value, self->extended_colorformat);
+ break;
+ case PROP_BIT_PACKETIZATION:
+ g_value_set_boolean (value, self->bit_packetization);
+ break;
+ case PROP_SLICE_HEADER_SPACING:
+ g_value_set_uint64 (value, self->slice_header_spacing);
+ break;
+ case PROP_SLICE_INTRA_REFRESH_INTERVAL:
+ g_value_set_uint (value, self->SliceIntraRefreshInterval);
+ break;
+ case PROP_TWO_PASS_CBR:
+ g_value_set_boolean (value, self->EnableTwopassCBR);
+ break;
+ case PROP_ENABLE_MV_META:
+ g_value_set_boolean (value, self->EnableMVBufferMeta);
+ break;
+ case PROP_NUM_BFRAMES:
+ g_value_set_uint (value, self->nBFrames);
+ break;
+ case PROP_NUM_REFERENCE_FRAMES:
+ g_value_set_uint (value, self->nRefFrames);
+ break;
+ case PROP_ENABLE_LOSSLESS_ENC:
+ g_value_set_boolean (value, self->enableLossless);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static gint
+v4l2_profile_from_string (const gchar * profile)
+{
+ gint v4l2_profile = -1;
+
+ if (g_str_equal (profile, "main")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAIN;
+ } else if (g_str_equal (profile, "main10")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10;
+ } else if (g_str_equal (profile, "mainstillpicture")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE;
+ } else if (g_str_equal (profile, "frext")) {
+ v4l2_profile = V4L2_MPEG_VIDEO_H265_PROFILE_FREXT;
+ } else {
+ GST_WARNING ("Unsupported profile string '%s'", profile);
+ }
+ return v4l2_profile;
+}
+
+static const gchar *
+v4l2_profile_to_string (gint v4l2_profile)
+{
+ switch (v4l2_profile) {
+ case V4L2_MPEG_VIDEO_H265_PROFILE_MAIN:
+ return "main";
+ case V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10:
+ return "main10";
+ case V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE:
+ return "mainstillpicture";
+ case V4L2_MPEG_VIDEO_H265_PROFILE_FREXT:
+ return "frext";
+ default:
+ GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
+ break;
+ }
+ return NULL;
+}
+
+static gint
+v4l2_level_from_string (const gchar * level)
+{
+ gint v4l2_level = -1;
+
+ //TODO : Since videodev2 file does not list H265 profiles
+ //we need to add profiles inside v4l2_nv_extensions.h
+ //and use them here.
+
+ return v4l2_level;
+}
+
+static const gchar *
+v4l2_level_to_string (gint v4l2_level)
+{
+ return NULL;
+}
+
+static void
+gst_v4l2_h265_enc_init (GstV4l2H265Enc * self)
+{
+ self->insert_sps_pps = FALSE;
+ self->profile = DEFAULT_PROFILE;
+ self->insert_aud = FALSE;
+ self->insert_vui = FALSE;
+ self->extended_colorformat = FALSE;
+ self->bit_packetization = DEFAULT_BIT_PACKETIZATION;
+ self->slice_header_spacing = DEFAULT_SLICE_HEADER_SPACING;
+ self->nRefFrames = 1;
+ self->nBFrames = 0;
+ self->enableLossless = FALSE;
+}
+
+static void
+gst_v4l2_h265_enc_class_init (GstV4l2H265EncClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstV4l2VideoEncClass *baseclass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ baseclass = (GstV4l2VideoEncClass *) (klass);
+
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_h265_enc_debug, "v4l2h265enc", 0,
+ "V4L2 H.265 Encoder");
+
+ gst_element_class_set_static_metadata (element_class,
+ "V4L2 H.265 Encoder",
+ "Codec/Encoder/Video",
+ "Encode H.265 video streams via V4L2 API",
+ "Viranjan Pagar , Amit Pandya ");
+
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_h265_enc_set_property);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_h265_enc_get_property);
+
+#ifdef USE_V4L2_TARGET_NV
+ g_object_class_install_property (gobject_class, PROP_PROFILE,
+ g_param_spec_enum ("profile", "profile",
+ "Set profile for v4l2 encode",
+ GST_TYPE_V4L2_VID_ENC_PROFILE, DEFAULT_PROFILE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ if (is_cuvid == TRUE) {
+ g_object_class_install_property (gobject_class, PROP_EXTENDED_COLORFORMAT,
+ g_param_spec_boolean ("extended-colorformat",
+ "Set Extended ColorFormat",
+ "Set Extended ColorFormat pixel values 0 to 255 in VUI info",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ } else if (is_cuvid == FALSE) {
+ g_object_class_install_property (gobject_class, PROP_INSERT_SPS_PPS,
+ g_param_spec_boolean ("insert-sps-pps",
+ "Insert H.265 SPS, PPS",
+ "Insert H.265 SPS, PPS at every IDR frame",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_INSERT_VUI,
+ g_param_spec_boolean ("insert-vui",
+ "Insert H.265 VUI",
+ "Insert H.265 VUI(Video Usability Information) in SPS",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_INSERT_AUD,
+ g_param_spec_boolean ("insert-aud",
+ "Insert H.265 AUD",
+ "Insert H.265 Access Unit Delimiter(AUD)",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_BIT_PACKETIZATION,
+ g_param_spec_boolean ("bit-packetization", "Bit Based Packetization",
+ "Whether or not Packet size is based upon Number Of bits",
+ DEFAULT_BIT_PACKETIZATION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_SLICE_HEADER_SPACING,
+ g_param_spec_uint64 ("slice-header-spacing", "Slice Header Spacing",
+ "Slice Header Spacing number of macroblocks/bits in one packet",
+ 0, G_MAXUINT64, DEFAULT_SLICE_HEADER_SPACING,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_MV_META,
+ g_param_spec_boolean ("EnableMVBufferMeta",
+ "Enable Motion Vector Meta data",
+ "Enable Motion Vector Meta data for encoding",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class,
+ PROP_SLICE_INTRA_REFRESH_INTERVAL,
+ g_param_spec_uint ("SliceIntraRefreshInterval",
+ "SliceIntraRefreshInterval", "Set SliceIntraRefreshInterval", 0,
+ G_MAXUINT, DEFAULT_INTRA_REFRESH_FRAME_INTERVAL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_TWO_PASS_CBR,
+ g_param_spec_boolean ("EnableTwopassCBR",
+ "Enable Two pass CBR",
+ "Enable two pass CBR while encoding",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_NUM_BFRAMES,
+ g_param_spec_uint ("num-B-Frames",
+ "B Frames between two reference frames",
+ "Number of B Frames between two reference frames (not recommended)(Supported only on Xavier)",
+ 0, MAX_NUM_B_FRAMES, DEFAULT_NUM_B_FRAMES,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_NUM_REFERENCE_FRAMES,
+ g_param_spec_uint ("num-Ref-Frames",
+ "Sets the number of reference frames for encoder",
+ "Number of Reference Frames for encoder",
+ 0, MAX_NUM_REFERENCE_FRAMES, DEFAULT_NUM_REFERENCE_FRAMES,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_LOSSLESS_ENC,
+ g_param_spec_boolean ("enable-lossless",
+ "Enable Lossless encoding",
+ "Enable lossless encoding for YUV444",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+ }
+#endif
+
+ baseclass->codec_name = "H265";
+ baseclass->profile_cid = V4L2_CID_MPEG_VIDEO_H265_PROFILE;
+ baseclass->profile_to_string = v4l2_profile_to_string;
+ baseclass->profile_from_string = v4l2_profile_from_string;
+ //baseclass->level_cid = V4L2_CID_MPEG_VIDEO_H265_LEVEL;
+ baseclass->level_to_string = v4l2_level_to_string;
+ baseclass->level_from_string = v4l2_level_from_string;
+ baseclass->set_encoder_properties = set_v4l2_h265_encoder_properties;
+}
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_h265_enc (GstCaps * sink_caps, GstCaps * src_caps)
+{
+ return gst_v4l2_is_video_enc (sink_caps, src_caps,
+ gst_static_caps_get (&src_template_caps));
+}
+
+void
+gst_v4l2_h265_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gst_v4l2_video_enc_register (plugin, GST_TYPE_V4L2_H265_ENC,
+ "h265", basename, device_path, sink_caps,
+ gst_static_caps_get (&src_template_caps), src_caps);
+}
+
+static GType
+gst_v4l2_videnc_profile_get_type (void)
+{
+ static volatile gsize profile = 0;
+ static const GEnumValue profile_type[] = {
+ {V4L2_MPEG_VIDEO_H265_PROFILE_MAIN,
+ "GST_V4L2_H265_VIDENC_MAIN_PROFILE", "Main"},
+ {V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10,
+ "GST_V4L2_H265_VIDENC_MAIN10_PROFILE", "Main10"},
+ {V4L2_MPEG_VIDEO_H265_PROFILE_FREXT,
+ "GST_V4L2_H265_VIDENC_FREXT_PROFILE", "FREXT"},
+ {0, NULL, NULL}
+ };
+
+ if (g_once_init_enter (&profile)) {
+ GType tmp =
+ g_enum_register_static ("GstV4L2VideoEncProfileType", profile_type);
+ g_once_init_leave (&profile, tmp);
+ }
+ return (GType) profile;
+}
+
+gboolean
+gst_v4l2_h265_enc_slice_header_spacing (GstV4l2Object * v4l2object,
+ guint32 slice_header_spacing, enum v4l2_enc_slice_length_type slice_length_type)
+{
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+ v4l2_enc_slice_length_param param =
+ { slice_length_type, slice_header_spacing };
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = V4L2_CID_MPEG_VIDEOENC_SLICE_LENGTH_PARAM;
+ control.string = (gchar *) ¶m;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting spacing and packetization\n");
+ return FALSE;
+ }
+ return TRUE;
+}
+
+gboolean
+set_v4l2_h265_encoder_properties (GstVideoEncoder * encoder)
+{
+ GstV4l2H265Enc *self = GST_V4L2_H265_ENC (encoder);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (encoder);
+
+ if (!GST_V4L2_IS_OPEN (video_enc->v4l2output)) {
+ g_print ("V4L2 device is not open\n");
+ return FALSE;
+ }
+
+ if (self->insert_sps_pps) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_INSERT_SPS_PPS_AT_IDR, 1)) {
+ g_print ("S_EXT_CTRLS for INSERT_SPS_PPS_AT_IDR failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->profile) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H265_PROFILE, self->profile)) {
+ g_print ("S_EXT_CTRLS for H265_PROFILE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->insert_vui) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_INSERT_VUI, 1)) {
+ g_print ("S_EXT_CTRLS for INSERT_VUI failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->extended_colorformat) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_EXTEDED_COLORFORMAT, 1)) {
+ g_print ("S_EXT_CTRLS for EXTENDED_COLORFORMAT failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->insert_aud) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_INSERT_AUD, 1)) {
+ g_print ("S_EXT_CTRLS for INSERT_AUD failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->slice_header_spacing) {
+ enum v4l2_enc_slice_length_type slice_length_type = V4L2_ENC_SLICE_LENGTH_TYPE_MBLK;
+ if (self->bit_packetization) {
+ slice_length_type = V4L2_ENC_SLICE_LENGTH_TYPE_BITS;
+ }
+ if (!gst_v4l2_h265_enc_slice_header_spacing (video_enc->v4l2output,
+ self->slice_header_spacing, slice_length_type)) {
+ g_print ("S_EXT_CTRLS for SLICE_LENGTH_PARAM failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->EnableMVBufferMeta) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_ENABLE_METADATA_MV,
+ self->EnableMVBufferMeta)) {
+ g_print ("S_EXT_CTRLS for ENABLE_METADATA_MV failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->SliceIntraRefreshInterval) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM,
+ self->SliceIntraRefreshInterval)) {
+ g_print ("S_EXT_CTRLS for SLICE_INTRAREFRESH_PARAM failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->EnableTwopassCBR) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_TWO_PASS_CBR, 1)) {
+ g_print ("S_EXT_CTRLS for TWO_PASS_CBR failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->nBFrames) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_NUM_BFRAMES,
+ self->nBFrames)) {
+ g_print ("S_EXT_CTRLS for NUM_BFRAMES failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->nRefFrames) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES,
+ self->nRefFrames)) {
+ g_print ("S_EXT_CTRLS for NUM_REFERENCE_FRAMES failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->enableLossless) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_ENABLE_LOSSLESS, self->enableLossless)) {
+ g_print ("S_EXT_CTRLS for ENABLE_LOSSLESS failed\n");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
diff --git a/gst-v4l2/gstv4l2h265enc.h b/gst-v4l2/gstv4l2h265enc.h
new file mode 100644
index 0000000..bd7de19
--- /dev/null
+++ b/gst-v4l2/gstv4l2h265enc.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_H265_ENC_H__
+#define __GST_V4L2_H265_ENC_H__
+
+#include
+#include "gstv4l2videoenc.h"
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_H265_ENC \
+ (gst_v4l2_h265_enc_get_type())
+#define GST_V4L2_H265_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_H265_ENC,GstV4l2H265Enc))
+#define GST_V4L2_H265_ENC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_H265_ENC,GstV4l2H265EncClass))
+#define GST_IS_V4L2_H265_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_H265_ENC))
+#define GST_IS_V4L2_H265_ENC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_H265_ENC))
+typedef struct _GstV4l2H265Enc GstV4l2H265Enc;
+typedef struct _GstV4l2H265EncClass GstV4l2H265EncClass;
+
+struct _GstV4l2H265Enc
+{
+ GstV4l2VideoEnc parent;
+ gboolean insert_sps_pps;
+ guint profile;
+ guint nBFrames;
+ guint nRefFrames;
+ gboolean insert_aud;
+ gboolean insert_vui;
+ gboolean extended_colorformat;
+ guint SliceIntraRefreshInterval;
+ gboolean EnableTwopassCBR;
+ gboolean bit_packetization;
+ guint32 slice_header_spacing;
+ gboolean EnableMVBufferMeta;
+ gboolean enableLossless;
+};
+
+struct _GstV4l2H265EncClass
+{
+ GstV4l2VideoEncClass parent_class;
+};
+
+GType gst_v4l2_h265_enc_get_type (void);
+
+gboolean gst_v4l2_is_h265_enc (GstCaps * sink_caps, GstCaps * src_caps);
+
+void gst_v4l2_h265_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);
+
+G_END_DECLS
+#endif /* __GST_V4L2_H265_ENC_H__ */
diff --git a/gst-v4l2/gstv4l2h26xparser.c b/gst-v4l2/gstv4l2h26xparser.c
new file mode 100644
index 0000000..bf9ff66
--- /dev/null
+++ b/gst-v4l2/gstv4l2h26xparser.c
@@ -0,0 +1,924 @@
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "nalutils.h"
+#include "gstv4l2h26xparser.h"
+
+#include
+#include
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (h26x_parser_debug);
+#define GST_CAT_DEFAULT h26x_parser_debug
+
+static gboolean initialized = FALSE;
+#define INITIALIZE_DEBUG_CATEGORY \
+ if (!initialized) { \
+ GST_DEBUG_CATEGORY_INIT (h26x_parser_debug, "codecparsers_h26x", 0, \
+ "h26x parser library"); \
+ initialized = TRUE; \
+ }
+
+/**** Default scaling_lists according to Table 7-2 *****/
+static const guint8 default_4x4_intra[16] = {
+ 6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32,
+ 32, 37, 37, 42
+};
+
+static const guint8 default_4x4_inter[16] = {
+ 10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27,
+ 27, 30, 30, 34
+};
+
+static const guint8 default_8x8_intra[64] = {
+ 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18,
+ 18, 18, 18, 23, 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27,
+ 27, 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31, 31, 33,
+ 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42
+};
+
+static const guint8 default_8x8_inter[64] = {
+ 9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19,
+ 19, 19, 19, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24,
+ 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27, 27, 28,
+ 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35
+};
+
+/***** Utils ****/
+#define EXTENDED_SAR 255
+
+
+static gboolean
+h264_parse_nalu_header (H264NalUnit * nalu)
+{
+ guint8 *data = nalu->data + nalu->offset;
+
+ if (nalu->size < 1)
+ return FALSE;
+
+ nalu->type = (data[0] & 0x1f);
+ nalu->ref_idc = (data[0] & 0x60) >> 5;
+ nalu->idr_pic_flag = (nalu->type == 5 ? 1 : 0);
+ nalu->header_bytes = 1;
+
+ nalu->extension_type = H264_NAL_EXTENSION_NONE;
+
+ GST_DEBUG ("Nal type %u, ref_idc %u", nalu->type, nalu->ref_idc);
+ return TRUE;
+}
+
+static gboolean
+h264_sps_copy (H264SPS * dst_sps, const H264SPS * src_sps)
+{
+ g_return_val_if_fail (dst_sps != NULL, FALSE);
+ g_return_val_if_fail (src_sps != NULL, FALSE);
+
+ h264_sps_clear (dst_sps);
+
+ *dst_sps = *src_sps;
+
+ return TRUE;
+}
+
+static gboolean
+h264_parser_parse_scaling_list (NalReader * nr,
+ guint8 scaling_lists_4x4[6][16], guint8 scaling_lists_8x8[6][64],
+ const guint8 fallback_4x4_inter[16], const guint8 fallback_4x4_intra[16],
+ const guint8 fallback_8x8_inter[64], const guint8 fallback_8x8_intra[64],
+ guint8 n_lists)
+{
+ guint i;
+
+ static const guint8 *default_lists[12] = {
+ default_4x4_intra, default_4x4_intra, default_4x4_intra,
+ default_4x4_inter, default_4x4_inter, default_4x4_inter,
+ default_8x8_intra, default_8x8_inter,
+ default_8x8_intra, default_8x8_inter,
+ default_8x8_intra, default_8x8_inter
+ };
+
+ GST_DEBUG ("parsing scaling lists");
+
+ for (i = 0; i < 12; i++) {
+ gboolean use_default = FALSE;
+
+ if (i < n_lists) {
+ guint8 scaling_list_present_flag;
+
+ READ_UINT8 (nr, scaling_list_present_flag, 1);
+ if (scaling_list_present_flag) {
+ guint8 *scaling_list;
+ guint size;
+ guint j;
+ guint8 last_scale, next_scale;
+
+ if (i < 6) {
+ scaling_list = scaling_lists_4x4[i];
+ size = 16;
+ } else {
+ scaling_list = scaling_lists_8x8[i - 6];
+ size = 64;
+ }
+
+ last_scale = 8;
+ next_scale = 8;
+ for (j = 0; j < size; j++) {
+ if (next_scale != 0) {
+ gint32 delta_scale;
+
+ READ_SE (nr, delta_scale);
+ next_scale = (last_scale + delta_scale) & 0xff;
+ }
+ if (j == 0 && next_scale == 0) {
+ /* Use default scaling lists (7.4.2.1.1.1) */
+ memcpy (scaling_list, default_lists[i], size);
+ break;
+ }
+ last_scale = scaling_list[j] =
+ (next_scale == 0) ? last_scale : next_scale;
+ }
+ } else
+ use_default = TRUE;
+ } else
+ use_default = TRUE;
+
+ if (use_default) {
+ switch (i) {
+ case 0:
+ memcpy (scaling_lists_4x4[0], fallback_4x4_intra, 16);
+ break;
+ case 1:
+ memcpy (scaling_lists_4x4[1], scaling_lists_4x4[0], 16);
+ break;
+ case 2:
+ memcpy (scaling_lists_4x4[2], scaling_lists_4x4[1], 16);
+ break;
+ case 3:
+ memcpy (scaling_lists_4x4[3], fallback_4x4_inter, 16);
+ break;
+ case 4:
+ memcpy (scaling_lists_4x4[4], scaling_lists_4x4[3], 16);
+ break;
+ case 5:
+ memcpy (scaling_lists_4x4[5], scaling_lists_4x4[4], 16);
+ break;
+ case 6:
+ memcpy (scaling_lists_8x8[0], fallback_8x8_intra, 64);
+ break;
+ case 7:
+ memcpy (scaling_lists_8x8[1], fallback_8x8_inter, 64);
+ break;
+ case 8:
+ memcpy (scaling_lists_8x8[2], scaling_lists_8x8[0], 64);
+ break;
+ case 9:
+ memcpy (scaling_lists_8x8[3], scaling_lists_8x8[1], 64);
+ break;
+ case 10:
+ memcpy (scaling_lists_8x8[4], scaling_lists_8x8[2], 64);
+ break;
+ case 11:
+ memcpy (scaling_lists_8x8[5], scaling_lists_8x8[3], 64);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ return TRUE;
+
+error:
+ GST_WARNING ("error parsing scaling lists");
+ return FALSE;
+}
+
+H264NalParser *
+h264_nal_parser_new (void)
+{
+ H264NalParser *nalparser;
+
+ nalparser = g_slice_new0 (H264NalParser);
+ INITIALIZE_DEBUG_CATEGORY;
+
+ return nalparser;
+}
+
+void
+h264_nal_parser_free (H264NalParser * nalparser)
+{
+ guint i;
+
+ for (i = 0; i < H264_MAX_SPS_COUNT; i++)
+ h264_sps_clear (&nalparser->sps[i]);
+ g_slice_free (H264NalParser, nalparser);
+
+ nalparser = NULL;
+}
+
+H264ParserResult
+h264_parser_identify_nalu_unchecked (H264NalParser * nalparser,
+ const guint8 * data, guint offset, gsize size, H264NalUnit * nalu)
+{
+ gint off1;
+
+ memset (nalu, 0, sizeof (*nalu));
+
+ if (size < offset + 4) {
+ GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
+ ", offset %u", size, offset);
+ return H264_PARSER_ERROR;
+ }
+
+ off1 = scan_for_start_codes (data + offset, size - offset);
+
+ if (off1 < 0) {
+ GST_DEBUG ("No start code prefix in this buffer");
+ return H264_PARSER_NO_NAL;
+ }
+
+ if (offset + off1 == size - 1) {
+ GST_DEBUG ("Missing data to identify nal unit");
+
+ return H264_PARSER_ERROR;
+ }
+
+ nalu->sc_offset = offset + off1;
+
+
+ nalu->offset = offset + off1 + 3;
+ nalu->data = (guint8 *) data;
+ nalu->size = size - nalu->offset;
+
+ if (!h264_parse_nalu_header (nalu)) {
+ GST_WARNING ("error parsing \"NAL unit header\"");
+ nalu->size = 0;
+ return H264_PARSER_BROKEN_DATA;
+ }
+
+ nalu->valid = TRUE;
+
+ /* sc might have 2 or 3 0-bytes */
+ if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00
+ && (nalu->type == H264_NAL_SPS || nalu->type == H264_NAL_PPS
+ || nalu->type == H264_NAL_AU_DELIMITER))
+ nalu->sc_offset--;
+
+ if (nalu->type == H264_NAL_SEQ_END ||
+ nalu->type == H264_NAL_STREAM_END) {
+ GST_DEBUG ("end-of-seq or end-of-stream nal found");
+ nalu->size = 1;
+ return H264_PARSER_OK;
+ }
+ return H264_PARSER_OK;
+}
+
+H264ParserResult
+h264_parser_identify_nalu (H264NalParser * nalparser,
+ const guint8 * data, guint offset, gsize size, H264NalUnit * nalu)
+{
+ H264ParserResult res;
+ gint off2;
+
+ res =
+ h264_parser_identify_nalu_unchecked (nalparser, data, offset, size,
+ nalu);
+
+ if (res != H264_PARSER_OK)
+ goto beach;
+
+ /* The two NALs are exactly 1 byte size and are placed at the end of an AU,
+ * there is no need to wait for the following */
+ if (nalu->type == H264_NAL_SEQ_END ||
+ nalu->type == H264_NAL_STREAM_END)
+ goto beach;
+
+ off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset);
+ if (off2 < 0) {
+ GST_DEBUG ("Nal start %d, No end found", nalu->offset);
+
+ return H264_PARSER_NO_NAL_END;
+ }
+
+ /* Mini performance improvement:
+ * We could have a way to store how many 0s were skipped to avoid
+ * parsing them again on the next NAL */
+ while (off2 > 0 && data[nalu->offset + off2 - 1] == 00)
+ off2--;
+
+ nalu->size = off2;
+ if (nalu->size < 2)
+ return H264_PARSER_BROKEN_DATA;
+
+ GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size);
+
+beach:
+ return res;
+}
+
+H264ParserResult
+h264_parser_parse_sps (H264NalParser * nalparser, H264NalUnit * nalu,
+ H264SPS * sps, gboolean parse_vui_params)
+{
+ H264ParserResult res = h264_parse_sps (nalu, sps, parse_vui_params);
+
+ return res;
+ if (res == H264_PARSER_OK) {
+ GST_DEBUG ("adding sequence parameter set with id: %d to array", sps->id);
+
+ if (!h264_sps_copy (&nalparser->sps[sps->id], sps))
+ return H264_PARSER_ERROR;
+ nalparser->last_sps = &nalparser->sps[sps->id];
+ }
+ return res;
+}
+
+/* Parse seq_parameter_set_data() */
+static gboolean
+h264_parse_sps_data (NalReader * nr, H264SPS * sps,
+ gboolean parse_vui_params)
+{
+ gint width, height;
+ guint subwc[] = { 1, 2, 2, 1 };
+ guint subhc[] = { 1, 2, 1, 1 };
+
+ memset (sps, 0, sizeof (*sps));
+
+ /* set default values for fields that might not be present in the bitstream
+ and have valid defaults */
+ sps->extension_type = H264_NAL_EXTENSION_NONE;
+ sps->chroma_format_idc = 1;
+ memset (sps->scaling_lists_4x4, 16, 96);
+ memset (sps->scaling_lists_8x8, 16, 384);
+
+ READ_UINT8 (nr, sps->profile_idc, 8);
+ READ_UINT8 (nr, sps->constraint_set0_flag, 1);
+ READ_UINT8 (nr, sps->constraint_set1_flag, 1);
+ READ_UINT8 (nr, sps->constraint_set2_flag, 1);
+ READ_UINT8 (nr, sps->constraint_set3_flag, 1);
+ READ_UINT8 (nr, sps->constraint_set4_flag, 1);
+ READ_UINT8 (nr, sps->constraint_set5_flag, 1);
+
+ /* skip reserved_zero_2bits */
+ if (!_skip (nr, 2))
+ goto error;
+
+ READ_UINT8 (nr, sps->level_idc, 8);
+
+ READ_UE_MAX (nr, sps->id, H264_MAX_SPS_COUNT - 1);
+
+ if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
+ sps->profile_idc == 122 || sps->profile_idc == 244 ||
+ sps->profile_idc == 44 || sps->profile_idc == 83 ||
+ sps->profile_idc == 86 || sps->profile_idc == 118 ||
+ sps->profile_idc == 128) {
+ READ_UE_MAX (nr, sps->chroma_format_idc, 3);
+ if (sps->chroma_format_idc == 3)
+ READ_UINT8 (nr, sps->separate_colour_plane_flag, 1);
+
+ READ_UE_MAX (nr, sps->bit_depth_luma_minus8, 6);
+ READ_UE_MAX (nr, sps->bit_depth_chroma_minus8, 6);
+ READ_UINT8 (nr, sps->qpprime_y_zero_transform_bypass_flag, 1);
+
+ READ_UINT8 (nr, sps->scaling_matrix_present_flag, 1);
+ if (sps->scaling_matrix_present_flag) {
+ guint8 n_lists;
+
+ n_lists = (sps->chroma_format_idc != 3) ? 8 : 12;
+ if (!h264_parser_parse_scaling_list (nr,
+ sps->scaling_lists_4x4, sps->scaling_lists_8x8,
+ default_4x4_inter, default_4x4_intra,
+ default_8x8_inter, default_8x8_intra, n_lists))
+ goto error;
+ }
+ }
+
+ READ_UE_MAX (nr, sps->log2_max_frame_num_minus4, 12);
+
+ sps->max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+
+ READ_UE_MAX (nr, sps->pic_order_cnt_type, 2);
+ if (sps->pic_order_cnt_type == 0) {
+ READ_UE_MAX (nr, sps->log2_max_pic_order_cnt_lsb_minus4, 12);
+ } else if (sps->pic_order_cnt_type == 1) {
+ guint i;
+
+ READ_UINT8 (nr, sps->delta_pic_order_always_zero_flag, 1);
+ READ_SE (nr, sps->offset_for_non_ref_pic);
+ READ_SE (nr, sps->offset_for_top_to_bottom_field);
+ READ_UE_MAX (nr, sps->num_ref_frames_in_pic_order_cnt_cycle, 255);
+
+ for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++)
+ READ_SE (nr, sps->offset_for_ref_frame[i]);
+ }
+
+ READ_UE (nr, sps->num_ref_frames);
+ READ_UINT8 (nr, sps->gaps_in_frame_num_value_allowed_flag, 1);
+ READ_UE (nr, sps->pic_width_in_mbs_minus1);
+ READ_UE (nr, sps->pic_height_in_map_units_minus1);
+ READ_UINT8 (nr, sps->frame_mbs_only_flag, 1);
+
+ if (!sps->frame_mbs_only_flag)
+ READ_UINT8 (nr, sps->mb_adaptive_frame_field_flag, 1);
+
+ READ_UINT8 (nr, sps->direct_8x8_inference_flag, 1);
+ READ_UINT8 (nr, sps->frame_cropping_flag, 1);
+ if (sps->frame_cropping_flag) {
+ READ_UE (nr, sps->frame_crop_left_offset);
+ READ_UE (nr, sps->frame_crop_right_offset);
+ READ_UE (nr, sps->frame_crop_top_offset);
+ READ_UE (nr, sps->frame_crop_bottom_offset);
+ }
+
+ /* calculate ChromaArrayType */
+ if (!sps->separate_colour_plane_flag)
+ sps->chroma_array_type = sps->chroma_format_idc;
+
+ /* Calculate width and height */
+ width = (sps->pic_width_in_mbs_minus1 + 1);
+ width *= 16;
+ height = (sps->pic_height_in_map_units_minus1 + 1);
+ height *= 16 * (2 - sps->frame_mbs_only_flag);
+ GST_LOG ("initial width=%d, height=%d", width, height);
+ if (width < 0 || height < 0) {
+ GST_WARNING ("invalid width/height in SPS");
+ goto error;
+ }
+
+ sps->width = width;
+ sps->height = height;
+
+ if (sps->frame_cropping_flag) {
+ const guint crop_unit_x = subwc[sps->chroma_format_idc];
+ const guint crop_unit_y =
+ subhc[sps->chroma_format_idc] * (2 - sps->frame_mbs_only_flag);
+
+ width -= (sps->frame_crop_left_offset + sps->frame_crop_right_offset)
+ * crop_unit_x;
+ height -= (sps->frame_crop_top_offset + sps->frame_crop_bottom_offset)
+ * crop_unit_y;
+
+ sps->crop_rect_width = width;
+ sps->crop_rect_height = height;
+ sps->crop_rect_x = sps->frame_crop_left_offset * crop_unit_x;
+ sps->crop_rect_y = sps->frame_crop_top_offset * crop_unit_y;
+
+ GST_LOG ("crop_rectangle x=%u y=%u width=%u, height=%u", sps->crop_rect_x,
+ sps->crop_rect_y, width, height);
+ }
+
+ sps->fps_num_removed = 0;
+ sps->fps_den_removed = 1;
+
+ return TRUE;
+
+error:
+ return FALSE;
+}
+
+H264ParserResult
+h264_parse_sps (H264NalUnit * nalu, H264SPS * sps,
+ gboolean parse_vui_params)
+{
+ NalReader nr;
+
+ INITIALIZE_DEBUG_CATEGORY;
+ GST_DEBUG ("parsing SPS");
+
+ init_nal (&nr, nalu->data + nalu->offset + nalu->header_bytes,
+ nalu->size - nalu->header_bytes);
+
+ if (!h264_parse_sps_data (&nr, sps, parse_vui_params))
+ goto error;
+
+ sps->valid = TRUE;
+
+ return H264_PARSER_OK;
+
+error:
+ GST_WARNING ("error parsing \"Sequence parameter set\"");
+ sps->valid = FALSE;
+ return H264_PARSER_ERROR;
+}
+
+void
+h264_sps_clear (H264SPS * sps)
+{
+ g_return_if_fail (sps != NULL);
+}
+
+
+/************************** H265 *****************************/
+
+
+static gboolean
+h265_parse_nalu_header (H265NalUnit * nalu)
+{
+ guint8 *data = nalu->data + nalu->offset;
+ GstBitReader br;
+
+ if (nalu->size < 2)
+ return FALSE;
+
+ gst_bit_reader_init (&br, data, nalu->size - nalu->offset);
+
+ /* skip the forbidden_zero_bit */
+ gst_bit_reader_skip_unchecked (&br, 1);
+
+ nalu->type = gst_bit_reader_get_bits_uint8_unchecked (&br, 6);
+ nalu->layer_id = gst_bit_reader_get_bits_uint8_unchecked (&br, 6);
+ nalu->temporal_id_plus1 = gst_bit_reader_get_bits_uint8_unchecked (&br, 3);
+ nalu->header_bytes = 2;
+
+ return TRUE;
+}
+
+/****** Parsing functions *****/
+
+static gboolean
+h265_parse_profile_tier_level (H265ProfileTierLevel * ptl,
+ NalReader * nr, guint8 maxNumSubLayersMinus1)
+{
+ guint i, j;
+ GST_DEBUG ("parsing \"ProfileTierLevel parameters\"");
+
+ READ_UINT8 (nr, ptl->profile_space, 2);
+ READ_UINT8 (nr, ptl->tier_flag, 1);
+ READ_UINT8 (nr, ptl->profile_idc, 5);
+
+ for (j = 0; j < 32; j++)
+ READ_UINT8 (nr, ptl->profile_compatibility_flag[j], 1);
+
+ READ_UINT8 (nr, ptl->progressive_source_flag, 1);
+ READ_UINT8 (nr, ptl->interlaced_source_flag, 1);
+ READ_UINT8 (nr, ptl->non_packed_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->frame_only_constraint_flag, 1);
+
+ READ_UINT8 (nr, ptl->max_12bit_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->max_10bit_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->max_8bit_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->max_422chroma_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->max_420chroma_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->max_monochrome_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->intra_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->one_picture_only_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->lower_bit_rate_constraint_flag, 1);
+ READ_UINT8 (nr, ptl->max_14bit_constraint_flag, 1);
+
+ /* skip the reserved zero bits */
+ if (!_skip (nr, 34))
+ goto error;
+
+ READ_UINT8 (nr, ptl->level_idc, 8);
+ for (j = 0; j < maxNumSubLayersMinus1; j++) {
+ READ_UINT8 (nr, ptl->sub_layer_profile_present_flag[j], 1);
+ READ_UINT8 (nr, ptl->sub_layer_level_present_flag[j], 1);
+ }
+
+ if (maxNumSubLayersMinus1 > 0) {
+ for (i = maxNumSubLayersMinus1; i < 8; i++)
+ if (!_skip (nr, 2))
+ goto error;
+ }
+
+ for (i = 0; i < maxNumSubLayersMinus1; i++) {
+ if (ptl->sub_layer_profile_present_flag[i]) {
+ READ_UINT8 (nr, ptl->sub_layer_profile_space[i], 2);
+ READ_UINT8 (nr, ptl->sub_layer_tier_flag[i], 1);
+ READ_UINT8 (nr, ptl->sub_layer_profile_idc[i], 5);
+
+ for (j = 0; j < 32; j++)
+ READ_UINT8 (nr, ptl->sub_layer_profile_compatibility_flag[i][j], 1);
+
+ READ_UINT8 (nr, ptl->sub_layer_progressive_source_flag[i], 1);
+ READ_UINT8 (nr, ptl->sub_layer_interlaced_source_flag[i], 1);
+ READ_UINT8 (nr, ptl->sub_layer_non_packed_constraint_flag[i], 1);
+ READ_UINT8 (nr, ptl->sub_layer_frame_only_constraint_flag[i], 1);
+
+ if (!_skip (nr, 44))
+ goto error;
+ }
+
+ if (ptl->sub_layer_level_present_flag[i])
+ READ_UINT8 (nr, ptl->sub_layer_level_idc[i], 8);
+ }
+
+ return TRUE;
+
+error:
+ GST_WARNING ("error parsing \"ProfileTierLevel Parameters\"");
+ return FALSE;
+}
+
+H265Parser *
+h265_parser_new (void)
+{
+ H265Parser *parser;
+
+ parser = g_slice_new0 (H265Parser);
+ INITIALIZE_DEBUG_CATEGORY;
+
+ return parser;
+}
+
+void
+h265_parser_free (H265Parser * parser)
+{
+ g_slice_free (H265Parser, parser);
+ parser = NULL;
+}
+
+H265ParserResult
+h265_parser_identify_nalu_unchecked (H265Parser * parser,
+ const guint8 * data, guint offset, gsize size, H265NalUnit * nalu)
+{
+ gint off1;
+
+ memset (nalu, 0, sizeof (*nalu));
+
+ if (size < offset + 4) {
+ GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
+ ", offset %u", size, offset);
+ return H265_PARSER_ERROR;
+ }
+
+ off1 = scan_for_start_codes (data + offset, size - offset);
+
+ if (off1 < 0) {
+ GST_DEBUG ("No start code prefix in this buffer");
+ return H265_PARSER_NO_NAL;
+ }
+
+ if (offset + off1 == size - 1) {
+ GST_DEBUG ("Missing data to identify nal unit");
+
+ return H265_PARSER_ERROR;
+ }
+
+ nalu->sc_offset = offset + off1;
+
+ /* sc might have 2 or 3 0-bytes */
+ if (nalu->sc_offset > 0 && data[nalu->sc_offset - 1] == 00)
+ nalu->sc_offset--;
+
+ nalu->offset = offset + off1 + 3;
+ nalu->data = (guint8 *) data;
+ nalu->size = size - nalu->offset;
+
+ if (!h265_parse_nalu_header (nalu)) {
+ GST_WARNING ("error parsing \"NAL unit header\"");
+ nalu->size = 0;
+ return H265_PARSER_BROKEN_DATA;
+ }
+
+ nalu->valid = TRUE;
+
+ if (nalu->type == H265_NAL_EOS || nalu->type == H265_NAL_EOB) {
+ GST_DEBUG ("end-of-seq or end-of-stream nal found");
+ nalu->size = 2;
+ return H265_PARSER_OK;
+ }
+
+ return H265_PARSER_OK;
+}
+
+H265ParserResult
+h265_parser_identify_nalu (H265Parser * parser,
+ const guint8 * data, guint offset, gsize size, H265NalUnit * nalu)
+{
+ H265ParserResult res;
+ gint off2;
+
+ res =
+ h265_parser_identify_nalu_unchecked (parser, data, offset, size,
+ nalu);
+
+ if (res != H265_PARSER_OK)
+ goto beach;
+
+ /* The two NALs are exactly 2 bytes size and are placed at the end of an AU,
+ * there is no need to wait for the following */
+ if (nalu->type == H265_NAL_EOS || nalu->type == H265_NAL_EOB)
+ goto beach;
+
+ off2 = scan_for_start_codes (data + nalu->offset, size - nalu->offset);
+ if (off2 < 0) {
+ GST_DEBUG ("Nal start %d, No end found", nalu->offset);
+
+ return H265_PARSER_NO_NAL_END;
+ }
+
+ /* Mini performance improvement:
+ * We could have a way to store how many 0s were skipped to avoid
+ * parsing them again on the next NAL */
+ while (off2 > 0 && data[nalu->offset + off2 - 1] == 00)
+ off2--;
+
+ nalu->size = off2;
+ if (nalu->size < 3)
+ return H265_PARSER_BROKEN_DATA;
+
+ GST_DEBUG ("Complete nal found. Off: %d, Size: %d", nalu->offset, nalu->size);
+
+beach:
+ return res;
+}
+
+H265ParserResult
+h265_parser_identify_nalu_hevc (H265Parser * parser,
+ const guint8 * data, guint offset, gsize size, guint8 nal_length_size,
+ H265NalUnit * nalu)
+{
+ GstBitReader br;
+
+ memset (nalu, 0, sizeof (*nalu));
+
+ if (size < offset + nal_length_size) {
+ GST_DEBUG ("Can't parse, buffer has too small size %" G_GSIZE_FORMAT
+ ", offset %u", size, offset);
+ return H265_PARSER_ERROR;
+ }
+
+ size = size - offset;
+ gst_bit_reader_init (&br, data + offset, size);
+
+ nalu->size = gst_bit_reader_get_bits_uint32_unchecked (&br,
+ nal_length_size * 8);
+ nalu->sc_offset = offset;
+ nalu->offset = offset + nal_length_size;
+
+ if (size < nalu->size + nal_length_size) {
+ nalu->size = 0;
+
+ return H265_PARSER_NO_NAL_END;
+ }
+
+ nalu->data = (guint8 *) data;
+
+ if (!h265_parse_nalu_header (nalu)) {
+ GST_WARNING ("error parsing \"NAL unit header\"");
+ nalu->size = 0;
+ return H265_PARSER_BROKEN_DATA;
+ }
+
+ if (nalu->size < 2)
+ return H265_PARSER_BROKEN_DATA;
+
+ nalu->valid = TRUE;
+
+ return H265_PARSER_OK;
+}
+
+H265ParserResult
+h265_parser_parse_sps (H265Parser * parser, H265NalUnit * nalu,
+ H265SPS * sps, gboolean parse_vui_params)
+{
+ H265ParserResult res =
+ h265_parse_sps (parser, nalu, sps, parse_vui_params);
+ return res;
+ if (res == H265_PARSER_OK) {
+ GST_DEBUG ("adding sequence parameter set with id: %d to array", sps->id);
+
+ parser->sps[sps->id] = *sps;
+ parser->last_sps = &parser->sps[sps->id];
+ }
+
+ return res;
+}
+
+H265ParserResult
+h265_parse_sps (H265Parser * parser, H265NalUnit * nalu,
+ H265SPS * sps, gboolean parse_vui_params)
+{
+ NalReader nr;
+ guint8 vps_id;
+ guint i;
+ guint subwc[] = { 1, 2, 2, 1, 1 };
+ guint subhc[] = { 1, 2, 1, 1, 1 };
+
+ INITIALIZE_DEBUG_CATEGORY;
+ GST_DEBUG ("parsing SPS");
+
+ init_nal (&nr, nalu->data + nalu->offset + nalu->header_bytes,
+ nalu->size - nalu->header_bytes);
+
+ memset (sps, 0, sizeof (*sps));
+
+ READ_UINT8 (&nr, vps_id, 4);
+
+ READ_UINT8 (&nr, sps->max_sub_layers_minus1, 3);
+ READ_UINT8 (&nr, sps->temporal_id_nesting_flag, 1);
+
+ if (!h265_parse_profile_tier_level (&sps->profile_tier_level, &nr,
+ sps->max_sub_layers_minus1))
+ goto error;
+
+ READ_UE_MAX (&nr, sps->id, H265_MAX_SPS_COUNT - 1);
+
+ READ_UE_MAX (&nr, sps->chroma_format_idc, 3);
+ if (sps->chroma_format_idc == 3)
+ READ_UINT8 (&nr, sps->separate_colour_plane_flag, 1);
+
+ READ_UE_ALLOWED (&nr, sps->pic_width_in_luma_samples, 1, 16888);
+ READ_UE_ALLOWED (&nr, sps->pic_height_in_luma_samples, 1, 16888);
+
+ READ_UINT8 (&nr, sps->conformance_window_flag, 1);
+ if (sps->conformance_window_flag) {
+ READ_UE (&nr, sps->conf_win_left_offset);
+ READ_UE (&nr, sps->conf_win_right_offset);
+ READ_UE (&nr, sps->conf_win_top_offset);
+ READ_UE (&nr, sps->conf_win_bottom_offset);
+ }
+
+ READ_UE_MAX (&nr, sps->bit_depth_luma_minus8, 6);
+ READ_UE_MAX (&nr, sps->bit_depth_chroma_minus8, 6);
+ READ_UE_MAX (&nr, sps->log2_max_pic_order_cnt_lsb_minus4, 12);
+
+ READ_UINT8 (&nr, sps->sub_layer_ordering_info_present_flag, 1);
+ for (i =
+ (sps->sub_layer_ordering_info_present_flag ? 0 :
+ sps->max_sub_layers_minus1); i <= sps->max_sub_layers_minus1; i++) {
+ READ_UE_MAX (&nr, sps->max_dec_pic_buffering_minus1[i], 16);
+ READ_UE_MAX (&nr, sps->max_num_reorder_pics[i],
+ sps->max_dec_pic_buffering_minus1[i]);
+ READ_UE_MAX (&nr, sps->max_latency_increase_plus1[i], G_MAXUINT32 - 1);
+ }
+ /* setting default values if sps->sub_layer_ordering_info_present_flag is zero */
+ if (!sps->sub_layer_ordering_info_present_flag && sps->max_sub_layers_minus1) {
+ for (i = 0; i <= (guint)(sps->max_sub_layers_minus1 - 1); i++) {
+ sps->max_dec_pic_buffering_minus1[i] =
+ sps->max_dec_pic_buffering_minus1[sps->max_sub_layers_minus1];
+ sps->max_num_reorder_pics[i] =
+ sps->max_num_reorder_pics[sps->max_sub_layers_minus1];
+ sps->max_latency_increase_plus1[i] =
+ sps->max_latency_increase_plus1[sps->max_sub_layers_minus1];
+ }
+ }
+
+ /* The limits are calculted based on the profile_tier_level constraint
+ * in Annex-A: CtbLog2SizeY = 4 to 6 */
+ READ_UE_MAX (&nr, sps->log2_min_luma_coding_block_size_minus3, 3);
+ READ_UE_MAX (&nr, sps->log2_diff_max_min_luma_coding_block_size, 6);
+ READ_UE_MAX (&nr, sps->log2_min_transform_block_size_minus2, 3);
+ READ_UE_MAX (&nr, sps->log2_diff_max_min_transform_block_size, 3);
+ READ_UE_MAX (&nr, sps->max_transform_hierarchy_depth_inter, 4);
+ READ_UE_MAX (&nr, sps->max_transform_hierarchy_depth_intra, 4);
+
+ /* Calculate width and height */
+ sps->width = sps->pic_width_in_luma_samples;
+ sps->height = sps->pic_height_in_luma_samples;
+ if (sps->width < 0 || sps->height < 0) {
+ GST_WARNING ("invalid width/height in SPS");
+ goto error;
+ }
+
+ if (sps->conformance_window_flag) {
+ const guint crop_unit_x = subwc[sps->chroma_format_idc];
+ const guint crop_unit_y = subhc[sps->chroma_format_idc];
+
+ sps->crop_rect_width = sps->width -
+ (sps->conf_win_left_offset + sps->conf_win_right_offset) * crop_unit_x;
+ sps->crop_rect_height = sps->height -
+ (sps->conf_win_top_offset + sps->conf_win_bottom_offset) * crop_unit_y;
+ sps->crop_rect_x = sps->conf_win_left_offset * crop_unit_x;
+ sps->crop_rect_y = sps->conf_win_top_offset * crop_unit_y;
+
+ GST_LOG ("crop_rectangle x=%u y=%u width=%u, height=%u", sps->crop_rect_x,
+ sps->crop_rect_y, sps->crop_rect_width, sps->crop_rect_height);
+ }
+
+ sps->fps_num = 0;
+ sps->fps_den = 1;
+
+ sps->valid = TRUE;
+
+ return H265_PARSER_OK;
+
+error:
+ GST_WARNING ("error parsing \"Sequence parameter set\"");
+ sps->valid = FALSE;
+ return H265_PARSER_ERROR;
+}
diff --git a/gst-v4l2/gstv4l2h26xparser.h b/gst-v4l2/gstv4l2h26xparser.h
new file mode 100644
index 0000000..040fde0
--- /dev/null
+++ b/gst-v4l2/gstv4l2h26xparser.h
@@ -0,0 +1,462 @@
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __H26X_PARSER_H__
+#define __H26X_PARSER_H__
+
+#include
+
+G_BEGIN_DECLS
+
+#define H264_MAX_SPS_COUNT 32
+
+typedef enum
+{
+ H264_NAL_UNKNOWN = 0,
+ H264_NAL_SLICE = 1,
+ H264_NAL_SLICE_DPA = 2,
+ H264_NAL_SLICE_DPB = 3,
+ H264_NAL_SLICE_DPC = 4,
+ H264_NAL_SLICE_IDR = 5,
+ H264_NAL_SEI = 6,
+ H264_NAL_SPS = 7,
+ H264_NAL_PPS = 8,
+ H264_NAL_AU_DELIMITER = 9,
+ H264_NAL_SEQ_END = 10,
+ H264_NAL_STREAM_END = 11,
+ H264_NAL_FILLER_DATA = 12,
+ H264_NAL_SPS_EXT = 13,
+ H264_NAL_PREFIX_UNIT = 14,
+ H264_NAL_SUBSET_SPS = 15,
+ H264_NAL_DEPTH_SPS = 16,
+ H264_NAL_SLICE_AUX = 19,
+ H264_NAL_SLICE_EXT = 20,
+ H264_NAL_SLICE_DEPTH = 21
+} H264NalUnitType;
+
+typedef enum
+{
+ H264_NAL_EXTENSION_NONE = 0,
+ H264_NAL_EXTENSION_SVC,
+ H264_NAL_EXTENSION_MVC,
+} H264NalUnitExtensionType;
+
+typedef enum
+{
+ H264_PARSER_OK,
+ H264_PARSER_BROKEN_DATA,
+ H264_PARSER_BROKEN_LINK,
+ H264_PARSER_ERROR,
+ H264_PARSER_NO_NAL,
+ H264_PARSER_NO_NAL_END
+} H264ParserResult;
+
+typedef enum
+{
+ H264_FRAME_PACKING_NONE = 6,
+ H264_FRAME_PACKING_CHECKERBOARD_INTERLEAVING = 0,
+ H264_FRAME_PACKING_COLUMN_INTERLEAVING = 1,
+ H264_FRAME_PACKING_ROW_INTERLEAVING = 2,
+ H264_FRAME_PACKING_SIDE_BY_SIDE = 3,
+ H264_FRMAE_PACKING_TOP_BOTTOM = 4,
+ H264_FRAME_PACKING_TEMPORAL_INTERLEAVING = 5
+} H264FramePackingType;
+
+
+typedef enum
+{
+ H264_P_SLICE = 0,
+ H264_B_SLICE = 1,
+ H264_I_SLICE = 2,
+ H264_SP_SLICE = 3,
+ H264_SI_SLICE = 4,
+ H264_S_P_SLICE = 5,
+ H264_S_B_SLICE = 6,
+ H264_S_I_SLICE = 7,
+ H264_S_SP_SLICE = 8,
+ H264_S_SI_SLICE = 9
+} H264SliceType;
+
+typedef enum
+{
+ H264_CT_TYPE_PROGRESSIVE = 0,
+ H264_CT_TYPE_INTERLACED = 1,
+ H264_CT_TYPE_UNKNOWN = 2,
+} CtType;
+
+typedef struct _H264NalParser H264NalParser;
+
+typedef struct _H264NalUnit H264NalUnit;
+
+typedef struct _H264SPS H264SPS;
+
+struct _H264NalUnit
+{
+ guint16 ref_idc;
+ guint16 type;
+
+ /* calculated values */
+ guint8 idr_pic_flag;
+ guint size;
+ guint offset;
+ guint sc_offset;
+ gboolean valid;
+
+ guint8 *data;
+
+ guint8 header_bytes;
+ guint8 extension_type;
+};
+
+struct _H264SPS
+{
+ gint id;
+
+ guint8 profile_idc;
+ guint8 constraint_set0_flag;
+ guint8 constraint_set1_flag;
+ guint8 constraint_set2_flag;
+ guint8 constraint_set3_flag;
+ guint8 constraint_set4_flag;
+ guint8 constraint_set5_flag;
+ guint8 level_idc;
+
+ guint8 chroma_format_idc;
+ guint8 separate_colour_plane_flag;
+ guint8 bit_depth_luma_minus8;
+ guint8 bit_depth_chroma_minus8;
+ guint8 qpprime_y_zero_transform_bypass_flag;
+
+ guint8 scaling_matrix_present_flag;
+ guint8 scaling_lists_4x4[6][16];
+ guint8 scaling_lists_8x8[6][64];
+
+ guint8 log2_max_frame_num_minus4;
+ guint8 pic_order_cnt_type;
+
+ /* if pic_order_cnt_type == 0 */
+ guint8 log2_max_pic_order_cnt_lsb_minus4;
+
+ /* else if pic_order_cnt_type == 1 */
+ guint8 delta_pic_order_always_zero_flag;
+ gint32 offset_for_non_ref_pic;
+ gint32 offset_for_top_to_bottom_field;
+ guint8 num_ref_frames_in_pic_order_cnt_cycle;
+ gint32 offset_for_ref_frame[255];
+
+ guint32 num_ref_frames;
+ guint8 gaps_in_frame_num_value_allowed_flag;
+ guint32 pic_width_in_mbs_minus1;
+ guint32 pic_height_in_map_units_minus1;
+ guint8 frame_mbs_only_flag;
+
+ guint8 mb_adaptive_frame_field_flag;
+
+ guint8 direct_8x8_inference_flag;
+
+ guint8 frame_cropping_flag;
+
+ /* if frame_cropping_flag */
+ guint32 frame_crop_left_offset;
+ guint32 frame_crop_right_offset;
+ guint32 frame_crop_top_offset;
+ guint32 frame_crop_bottom_offset;
+
+ guint8 vui_parameters_present_flag;
+
+ /* calculated values */
+ guint8 chroma_array_type;
+ guint32 max_frame_num;
+ gint width, height;
+ gint crop_rect_width, crop_rect_height;
+ gint crop_rect_x, crop_rect_y;
+ gint fps_num_removed, fps_den_removed; /* FIXME: remove */
+ gboolean valid;
+
+ /* Subset SPS extensions */
+ guint8 extension_type;
+};
+
+struct _H264NalParser
+{
+ /*< private >*/
+ H264SPS sps[H264_MAX_SPS_COUNT];
+ H264SPS *last_sps;
+};
+
+
+H264NalParser *h264_nal_parser_new (void);
+
+
+H264ParserResult h264_parser_identify_nalu (H264NalParser *nalparser,
+ const guint8 *data, guint offset,
+ gsize size, H264NalUnit *nalu);
+
+
+H264ParserResult h264_parser_identify_nalu_unchecked (H264NalParser *nalparser,
+ const guint8 *data, guint offset,
+ gsize size, H264NalUnit *nalu);
+
+
+H264ParserResult h264_parser_parse_sps (H264NalParser *nalparser, H264NalUnit *nalu,
+ H264SPS *sps, gboolean parse_vui_params);
+
+
+void h264_nal_parser_free (H264NalParser *nalparser);
+
+
+H264ParserResult h264_parse_sps (H264NalUnit *nalu,
+ H264SPS *sps, gboolean parse_vui_params);
+
+
+void h264_sps_clear (H264SPS *sps);
+
+
+#define H265_MAX_SUB_LAYERS 8
+#define H265_MAX_SPS_COUNT 16
+
+
+typedef enum
+{
+ H265_NAL_SLICE_TRAIL_N = 0,
+ H265_NAL_SLICE_TRAIL_R = 1,
+ H265_NAL_SLICE_TSA_N = 2,
+ H265_NAL_SLICE_TSA_R = 3,
+ H265_NAL_SLICE_STSA_N = 4,
+ H265_NAL_SLICE_STSA_R = 5,
+ H265_NAL_SLICE_RADL_N = 6,
+ H265_NAL_SLICE_RADL_R = 7,
+ H265_NAL_SLICE_RASL_N = 8,
+ H265_NAL_SLICE_RASL_R = 9,
+ H265_NAL_SLICE_BLA_W_LP = 16,
+ H265_NAL_SLICE_BLA_W_RADL = 17,
+ H265_NAL_SLICE_BLA_N_LP = 18,
+ H265_NAL_SLICE_IDR_W_RADL = 19,
+ H265_NAL_SLICE_IDR_N_LP = 20,
+ H265_NAL_SLICE_CRA_NUT = 21,
+ H265_NAL_VPS = 32,
+ H265_NAL_SPS = 33,
+ H265_NAL_PPS = 34,
+ H265_NAL_AUD = 35,
+ H265_NAL_EOS = 36,
+ H265_NAL_EOB = 37,
+ H265_NAL_FD = 38,
+ H265_NAL_PREFIX_SEI = 39,
+ H265_NAL_SUFFIX_SEI = 40
+} H265NalUnitType;
+
+typedef enum
+{
+ H265_PARSER_OK,
+ H265_PARSER_BROKEN_DATA,
+ H265_PARSER_BROKEN_LINK,
+ H265_PARSER_ERROR,
+ H265_PARSER_NO_NAL,
+ H265_PARSER_NO_NAL_END
+} H265ParserResult;
+
+
+typedef struct _H265Parser H265Parser;
+
+typedef struct _H265NalUnit H265NalUnit;
+
+typedef struct _H265SPS H265SPS;
+typedef struct _H265ProfileTierLevel H265ProfileTierLevel;
+
+struct _H265NalUnit
+{
+ guint8 type;
+ guint8 layer_id;
+ guint8 temporal_id_plus1;
+
+ /* calculated values */
+ guint size;
+ guint offset;
+ guint sc_offset;
+ gboolean valid;
+
+ guint8 *data;
+ guint8 header_bytes;
+};
+
+struct _H265ProfileTierLevel {
+ guint8 profile_space;
+ guint8 tier_flag;
+ guint8 profile_idc;
+
+ guint8 profile_compatibility_flag[32];
+
+ guint8 progressive_source_flag;
+ guint8 interlaced_source_flag;
+ guint8 non_packed_constraint_flag;
+ guint8 frame_only_constraint_flag;
+
+ guint8 max_12bit_constraint_flag;
+ guint8 max_10bit_constraint_flag;
+ guint8 max_8bit_constraint_flag;
+ guint8 max_422chroma_constraint_flag;
+ guint8 max_420chroma_constraint_flag;
+ guint8 max_monochrome_constraint_flag;
+ guint8 intra_constraint_flag;
+ guint8 one_picture_only_constraint_flag;
+ guint8 lower_bit_rate_constraint_flag;
+ guint8 max_14bit_constraint_flag;
+
+ guint8 level_idc;
+
+ guint8 sub_layer_profile_present_flag[6];
+ guint8 sub_layer_level_present_flag[6];
+
+ guint8 sub_layer_profile_space[6];
+ guint8 sub_layer_tier_flag[6];
+ guint8 sub_layer_profile_idc[6];
+ guint8 sub_layer_profile_compatibility_flag[6][32];
+ guint8 sub_layer_progressive_source_flag[6];
+ guint8 sub_layer_interlaced_source_flag[6];
+ guint8 sub_layer_non_packed_constraint_flag[6];
+ guint8 sub_layer_frame_only_constraint_flag[6];
+ guint8 sub_layer_level_idc[6];
+};
+
+struct _H265SPS
+{
+ guint8 id;
+
+ guint8 max_sub_layers_minus1;
+ guint8 temporal_id_nesting_flag;
+
+ H265ProfileTierLevel profile_tier_level;
+
+ guint8 chroma_format_idc;
+ guint8 separate_colour_plane_flag;
+ guint16 pic_width_in_luma_samples;
+ guint16 pic_height_in_luma_samples;
+
+ guint8 conformance_window_flag;
+ /* if conformance_window_flag */
+ guint32 conf_win_left_offset;
+ guint32 conf_win_right_offset;
+ guint32 conf_win_top_offset;
+ guint32 conf_win_bottom_offset;
+
+ guint8 bit_depth_luma_minus8;
+ guint8 bit_depth_chroma_minus8;
+ guint8 log2_max_pic_order_cnt_lsb_minus4;
+
+ guint8 sub_layer_ordering_info_present_flag;
+ guint8 max_dec_pic_buffering_minus1[H265_MAX_SUB_LAYERS];
+ guint8 max_num_reorder_pics[H265_MAX_SUB_LAYERS];
+ guint8 max_latency_increase_plus1[H265_MAX_SUB_LAYERS];
+
+ guint8 log2_min_luma_coding_block_size_minus3;
+ guint8 log2_diff_max_min_luma_coding_block_size;
+ guint8 log2_min_transform_block_size_minus2;
+ guint8 log2_diff_max_min_transform_block_size;
+ guint8 max_transform_hierarchy_depth_inter;
+ guint8 max_transform_hierarchy_depth_intra;
+
+ guint8 scaling_list_enabled_flag;
+ /* if scaling_list_enabled_flag */
+ guint8 scaling_list_data_present_flag;
+
+ guint8 amp_enabled_flag;
+ guint8 sample_adaptive_offset_enabled_flag;
+ guint8 pcm_enabled_flag;
+ /* if pcm_enabled_flag */
+ guint8 pcm_sample_bit_depth_luma_minus1;
+ guint8 pcm_sample_bit_depth_chroma_minus1;
+ guint8 log2_min_pcm_luma_coding_block_size_minus3;
+ guint8 log2_diff_max_min_pcm_luma_coding_block_size;
+ guint8 pcm_loop_filter_disabled_flag;
+
+ guint8 num_short_term_ref_pic_sets;
+
+ guint8 long_term_ref_pics_present_flag;
+ /* if long_term_ref_pics_present_flag */
+ guint8 num_long_term_ref_pics_sps;
+ guint16 lt_ref_pic_poc_lsb_sps[32];
+ guint8 used_by_curr_pic_lt_sps_flag[32];
+
+ guint8 temporal_mvp_enabled_flag;
+ guint8 strong_intra_smoothing_enabled_flag;
+ guint8 vui_parameters_present_flag;
+
+ /* if vui_parameters_present_flat */
+ guint8 sps_extension_flag;
+
+ /* calculated values */
+ guint8 chroma_array_type;
+ gint width, height;
+ gint crop_rect_width, crop_rect_height;
+ gint crop_rect_x, crop_rect_y;
+ gint fps_num, fps_den;
+ gboolean valid;
+};
+
+struct _H265Parser
+{
+ /*< private >*/
+ H265SPS sps[H265_MAX_SPS_COUNT];
+ H265SPS *last_sps;
+};
+
+H265Parser * h265_parser_new (void);
+
+
+H265ParserResult h265_parser_identify_nalu (H265Parser * parser,
+ const guint8 * data,
+ guint offset,
+ gsize size,
+ H265NalUnit * nalu);
+
+
+H265ParserResult h265_parser_identify_nalu_unchecked (H265Parser * parser,
+ const guint8 * data,
+ guint offset,
+ gsize size,
+ H265NalUnit * nalu);
+
+
+H265ParserResult h265_parser_identify_nalu_hevc (H265Parser * parser,
+ const guint8 * data,
+ guint offset,
+ gsize size,
+ guint8 nal_length_size,
+ H265NalUnit * nalu);
+
+
+
+H265ParserResult h265_parser_parse_sps (H265Parser * parser,
+ H265NalUnit * nalu,
+ H265SPS * sps,
+ gboolean parse_vui_params);
+
+void h265_parser_free (H265Parser * parser);
+
+
+
+H265ParserResult h265_parse_sps (H265Parser * parser,
+ H265NalUnit * nalu,
+ H265SPS * sps,
+ gboolean parse_vui_params);
+
+
+G_END_DECLS
+#endif
diff --git a/gst-v4l2/gstv4l2object.c b/gst-v4l2/gstv4l2object.c
new file mode 100644
index 0000000..aa83a1b
--- /dev/null
+++ b/gst-v4l2/gstv4l2object.c
@@ -0,0 +1,5062 @@
+/* GStreamer
+ *
+ * Copyright (C) 2001-2002 Ronald Bultje
+ * 2006 Edgard Lima
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * gstv4l2object.c: base class for V4L2 elements
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Library General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version. This library is distributed in the hope
+ * that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU Library General Public License for more details.
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#ifdef HAVE_GUDEV
+#include
+#endif
+
+#include "linux/videodev2.h"
+#include "gstv4l2object.h"
+
+#ifndef USE_V4L2_TARGET_NV
+#include "gstv4l2tuner.h"
+#include "gstv4l2colorbalance.h"
+#else
+#include "gstv4l2videodec.h"
+#include "gstv4l2videoenc.h"
+#endif
+
+#include "gst/gst-i18n-plugin.h"
+
+#include
+
+GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
+#define GST_CAT_DEFAULT v4l2_debug
+
+#define DEFAULT_PROP_DEVICE_NAME NULL
+#define DEFAULT_PROP_DEVICE_FD -1
+#define DEFAULT_PROP_FLAGS 0
+#define DEFAULT_PROP_TV_NORM 0
+#define DEFAULT_PROP_IO_MODE GST_V4L2_IO_AUTO
+
+#define ENCODED_BUFFER_SIZE (4 * 1024 * 1024)
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+};
+
+/*
+ * common format / caps utilities:
+ */
+typedef enum
+{
+ GST_V4L2_RAW = 1 << 0,
+ GST_V4L2_CODEC = 1 << 1,
+ GST_V4L2_TRANSPORT = 1 << 2,
+ GST_V4L2_NO_PARSE = 1 << 3,
+ GST_V4L2_ALL = 0xffff
+} GstV4L2FormatFlags;
+
+typedef struct
+{
+ guint32 format;
+ gboolean dimensions;
+ GstV4L2FormatFlags flags;
+} GstV4L2FormatDesc;
+
+static const GstV4L2FormatDesc gst_v4l2_formats[] = {
+ /* RGB formats */
+ {V4L2_PIX_FMT_RGB332, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_ARGB555, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_XRGB555, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_ARGB555X, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_XRGB555X, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_RGB565, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_RGB565X, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_BGR666, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_BGR24, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_RGB24, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_ABGR32, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_XBGR32, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_ARGB32, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_XRGB32, TRUE, GST_V4L2_RAW},
+
+ /* Deprecated Packed RGB Image Formats (alpha ambiguity) */
+ {V4L2_PIX_FMT_RGB444, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_RGB555, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_RGB555X, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_BGR32, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_RGB32, TRUE, GST_V4L2_RAW},
+
+ /* Grey formats */
+ {V4L2_PIX_FMT_GREY, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y4, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y6, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y10, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y12, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y16, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y16_BE, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y10BPACK, TRUE, GST_V4L2_RAW},
+
+ /* Palette formats */
+ {V4L2_PIX_FMT_PAL8, TRUE, GST_V4L2_RAW},
+
+ /* Chrominance formats */
+ {V4L2_PIX_FMT_UV8, TRUE, GST_V4L2_RAW},
+
+ /* Luminance+Chrominance formats */
+ {V4L2_PIX_FMT_YVU410, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YVU420, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YVU420M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUYV, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YYUV, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YVYU, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_UYVY, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_VYUY, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV422P, TRUE, GST_V4L2_RAW},
+#ifdef USE_V4L2_TARGET_NV
+ {V4L2_PIX_FMT_YUV422M, TRUE, GST_V4L2_RAW},
+#endif
+ {V4L2_PIX_FMT_YUV411P, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_Y41P, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV444, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV555, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV565, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV32, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV410, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV420, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV420M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_HI240, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_HM12, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_M420, TRUE, GST_V4L2_RAW},
+
+ /* two planes -- one Y, one Cr + Cb interleaved */
+ {V4L2_PIX_FMT_NV12, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV12M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV12MT, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV12MT_16X16, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV21, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV21M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV16, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV16M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV61, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV61M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV24, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV42, TRUE, GST_V4L2_RAW},
+#ifdef USE_V4L2_TARGET_NV
+ {V4L2_PIX_FMT_P010, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_P010M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_P012, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_P012M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_NV24M, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV444_10LE, TRUE, GST_V4L2_RAW},
+ {V4L2_PIX_FMT_YUV444_12LE, TRUE, GST_V4L2_RAW},
+#endif
+ /* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
+ {V4L2_PIX_FMT_SBGGR8, TRUE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_SGBRG8, TRUE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_SGRBG8, TRUE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_SRGGB8, TRUE, GST_V4L2_CODEC},
+
+ /* compressed formats */
+ {V4L2_PIX_FMT_MJPEG, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_JPEG, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_PJPG, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_DV, FALSE, GST_V4L2_TRANSPORT},
+ {V4L2_PIX_FMT_MPEG, FALSE, GST_V4L2_TRANSPORT},
+ {V4L2_PIX_FMT_H264, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_H264_NO_SC, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_H264_MVC, FALSE, GST_V4L2_CODEC},
+#ifdef USE_V4L2_TARGET_NV
+ {V4L2_PIX_FMT_H265, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_AV1, FALSE, GST_V4L2_CODEC | GST_V4L2_NO_PARSE},
+#endif
+ {V4L2_PIX_FMT_H263, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_MPEG1, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_MPEG2, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_MPEG4, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_XVID, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_VC1_ANNEX_G, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_VC1_ANNEX_L, FALSE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_VP8, FALSE, GST_V4L2_CODEC | GST_V4L2_NO_PARSE},
+ {V4L2_PIX_FMT_VP9, FALSE, GST_V4L2_CODEC | GST_V4L2_NO_PARSE},
+
+ /* Vendor-specific formats */
+ {V4L2_PIX_FMT_WNVA, TRUE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_SN9C10X, TRUE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_PWC1, TRUE, GST_V4L2_CODEC},
+ {V4L2_PIX_FMT_PWC2, TRUE, GST_V4L2_CODEC},
+};
+
+#define GST_V4L2_FORMAT_COUNT (G_N_ELEMENTS (gst_v4l2_formats))
+
+static GSList *gst_v4l2_object_get_format_list (GstV4l2Object * v4l2object);
+
+
+#define GST_TYPE_V4L2_DEVICE_FLAGS (gst_v4l2_device_get_type ())
+static GType
+gst_v4l2_device_get_type (void)
+{
+ static GType v4l2_device_type = 0;
+
+ if (v4l2_device_type == 0) {
+ static const GFlagsValue values[] = {
+ {V4L2_CAP_VIDEO_CAPTURE, "Device supports video capture", "capture"},
+ {V4L2_CAP_VIDEO_OUTPUT, "Device supports video playback", "output"},
+ {V4L2_CAP_VIDEO_OVERLAY, "Device supports video overlay", "overlay"},
+
+ {V4L2_CAP_VBI_CAPTURE, "Device supports the VBI capture", "vbi-capture"},
+ {V4L2_CAP_VBI_OUTPUT, "Device supports the VBI output", "vbi-output"},
+
+ {V4L2_CAP_TUNER, "Device has a tuner or modulator", "tuner"},
+ {V4L2_CAP_AUDIO, "Device has audio inputs or outputs", "audio"},
+
+ {0, NULL, NULL}
+ };
+
+ v4l2_device_type =
+ g_flags_register_static ("GstV4l2DeviceTypeFlags", values);
+ }
+
+ return v4l2_device_type;
+}
+
+#define GST_TYPE_V4L2_TV_NORM (gst_v4l2_tv_norm_get_type ())
+static GType
+gst_v4l2_tv_norm_get_type (void)
+{
+ static GType v4l2_tv_norm = 0;
+
+ if (!v4l2_tv_norm) {
+ static const GEnumValue tv_norms[] = {
+ {0, "none", "none"},
+
+ {V4L2_STD_NTSC, "NTSC", "NTSC"},
+ {V4L2_STD_NTSC_M, "NTSC-M", "NTSC-M"},
+ {V4L2_STD_NTSC_M_JP, "NTSC-M-JP", "NTSC-M-JP"},
+ {V4L2_STD_NTSC_M_KR, "NTSC-M-KR", "NTSC-M-KR"},
+ {V4L2_STD_NTSC_443, "NTSC-443", "NTSC-443"},
+
+ {V4L2_STD_PAL, "PAL", "PAL"},
+ {V4L2_STD_PAL_BG, "PAL-BG", "PAL-BG"},
+ {V4L2_STD_PAL_B, "PAL-B", "PAL-B"},
+ {V4L2_STD_PAL_B1, "PAL-B1", "PAL-B1"},
+ {V4L2_STD_PAL_G, "PAL-G", "PAL-G"},
+ {V4L2_STD_PAL_H, "PAL-H", "PAL-H"},
+ {V4L2_STD_PAL_I, "PAL-I", "PAL-I"},
+ {V4L2_STD_PAL_DK, "PAL-DK", "PAL-DK"},
+ {V4L2_STD_PAL_D, "PAL-D", "PAL-D"},
+ {V4L2_STD_PAL_D1, "PAL-D1", "PAL-D1"},
+ {V4L2_STD_PAL_K, "PAL-K", "PAL-K"},
+ {V4L2_STD_PAL_M, "PAL-M", "PAL-M"},
+ {V4L2_STD_PAL_N, "PAL-N", "PAL-N"},
+ {V4L2_STD_PAL_Nc, "PAL-Nc", "PAL-Nc"},
+ {V4L2_STD_PAL_60, "PAL-60", "PAL-60"},
+
+ {V4L2_STD_SECAM, "SECAM", "SECAM"},
+ {V4L2_STD_SECAM_B, "SECAM-B", "SECAM-B"},
+ {V4L2_STD_SECAM_G, "SECAM-G", "SECAM-G"},
+ {V4L2_STD_SECAM_H, "SECAM-H", "SECAM-H"},
+ {V4L2_STD_SECAM_DK, "SECAM-DK", "SECAM-DK"},
+ {V4L2_STD_SECAM_D, "SECAM-D", "SECAM-D"},
+ {V4L2_STD_SECAM_K, "SECAM-K", "SECAM-K"},
+ {V4L2_STD_SECAM_K1, "SECAM-K1", "SECAM-K1"},
+ {V4L2_STD_SECAM_L, "SECAM-L", "SECAM-L"},
+ {V4L2_STD_SECAM_LC, "SECAM-Lc", "SECAM-Lc"},
+
+ {0, NULL, NULL}
+ };
+
+ v4l2_tv_norm = g_enum_register_static ("V4L2_TV_norms", tv_norms);
+ }
+
+ return v4l2_tv_norm;
+}
+
+GType
+gst_v4l2_io_mode_get_type (void)
+{
+ static GType v4l2_io_mode = 0;
+
+ if (!v4l2_io_mode) {
+ static const GEnumValue io_modes[] = {
+ {GST_V4L2_IO_AUTO, "GST_V4L2_IO_AUTO", "auto"},
+#ifndef USE_V4L2_TARGET_NV
+ {GST_V4L2_IO_RW, "GST_V4L2_IO_RW", "rw"},
+#endif
+ {GST_V4L2_IO_MMAP, "GST_V4L2_IO_MMAP", "mmap"},
+#ifndef USE_V4L2_TARGET_NV
+ {GST_V4L2_IO_USERPTR, "GST_V4L2_IO_USERPTR", "userptr"},
+ {GST_V4L2_IO_DMABUF, "GST_V4L2_IO_DMABUF", "dmabuf"},
+#endif
+ {GST_V4L2_IO_DMABUF_IMPORT, "GST_V4L2_IO_DMABUF_IMPORT",
+ "dmabuf-import"},
+
+ {0, NULL, NULL}
+ };
+#ifndef USE_V4L2_TARGET_NV
+ v4l2_io_mode = g_enum_register_static ("GstV4l2IOMode", io_modes);
+#else
+ v4l2_io_mode = g_enum_register_static ("GstNvV4l2IOMode", io_modes);
+#endif
+ }
+ return v4l2_io_mode;
+}
+
+void
+gst_v4l2_object_install_properties_helper (GObjectClass * gobject_class,
+ const char *default_device)
+{
+ g_object_class_install_property (gobject_class, PROP_DEVICE,
+ g_param_spec_string ("device", "Device", "Device location",
+ default_device, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_DEVICE_NAME,
+ g_param_spec_string ("device-name", "Device name",
+ "Name of the device", DEFAULT_PROP_DEVICE_NAME,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_DEVICE_FD,
+ g_param_spec_int ("device-fd", "File descriptor",
+ "File descriptor of the device", -1, G_MAXINT, DEFAULT_PROP_DEVICE_FD,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_FLAGS,
+ g_param_spec_flags ("flags", "Flags", "Device type flags",
+ GST_TYPE_V4L2_DEVICE_FLAGS, DEFAULT_PROP_FLAGS,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstV4l2Src:brightness:
+ *
+ * Picture brightness, or more precisely, the black level
+ */
+ g_object_class_install_property (gobject_class, PROP_BRIGHTNESS,
+ g_param_spec_int ("brightness", "Brightness",
+ "Picture brightness, or more precisely, the black level", G_MININT,
+ G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
+ /**
+ * GstV4l2Src:contrast:
+ *
+ * Picture contrast or luma gain
+ */
+ g_object_class_install_property (gobject_class, PROP_CONTRAST,
+ g_param_spec_int ("contrast", "Contrast",
+ "Picture contrast or luma gain", G_MININT,
+ G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
+ /**
+ * GstV4l2Src:saturation:
+ *
+ * Picture color saturation or chroma gain
+ */
+ g_object_class_install_property (gobject_class, PROP_SATURATION,
+ g_param_spec_int ("saturation", "Saturation",
+ "Picture color saturation or chroma gain", G_MININT,
+ G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
+ /**
+ * GstV4l2Src:hue:
+ *
+ * Hue or color balance
+ */
+ g_object_class_install_property (gobject_class, PROP_HUE,
+ g_param_spec_int ("hue", "Hue",
+ "Hue or color balance", G_MININT,
+ G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
+
+ /**
+ * GstV4l2Src:norm:
+ *
+ * TV norm
+ */
+ g_object_class_install_property (gobject_class, PROP_TV_NORM,
+ g_param_spec_enum ("norm", "TV norm",
+ "video standard",
+ GST_TYPE_V4L2_TV_NORM, DEFAULT_PROP_TV_NORM,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstV4l2Src:io-mode:
+ *
+ * IO Mode
+ */
+ g_object_class_install_property (gobject_class, PROP_IO_MODE,
+ g_param_spec_enum ("io-mode", "IO mode",
+ "I/O mode",
+ GST_TYPE_V4L2_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstV4l2Src:extra-controls:
+ *
+ * Additional v4l2 controls for the device. The controls are identified
+ * by the control name (lowercase with '_' for any non-alphanumeric
+ * characters).
+ *
+ * Since: 1.2
+ */
+ g_object_class_install_property (gobject_class, PROP_EXTRA_CONTROLS,
+ g_param_spec_boxed ("extra-controls", "Extra Controls",
+ "Extra v4l2 controls (CIDs) for the device",
+ GST_TYPE_STRUCTURE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstV4l2Src:pixel-aspect-ratio:
+ *
+ * The pixel aspect ratio of the device. This overwrites the pixel aspect
+ * ratio queried from the device.
+ *
+ * Since: 1.2
+ */
+ g_object_class_install_property (gobject_class, PROP_PIXEL_ASPECT_RATIO,
+ g_param_spec_string ("pixel-aspect-ratio", "Pixel Aspect Ratio",
+ "Overwrite the pixel aspect ratio of the device", "1/1",
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstV4l2Src:force-aspect-ratio:
+ *
+ * When enabled, the pixel aspect ratio queried from the device or set
+ * with the pixel-aspect-ratio property will be enforced.
+ *
+ * Since: 1.2
+ */
+ g_object_class_install_property (gobject_class, PROP_FORCE_ASPECT_RATIO,
+ g_param_spec_boolean ("force-aspect-ratio", "Force aspect ratio",
+ "When enabled, the pixel aspect ratio will be enforced", TRUE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+}
+
+#ifdef USE_V4L2_TARGET_NV
+void
+gst_v4l2_object_install_m2m_dec_iomode_properties_helper (GObjectClass * gobject_class)
+{
+ if (is_cuvid == FALSE) {
+ g_object_class_install_property (gobject_class, PROP_OUTPUT_IO_MODE,
+ g_param_spec_enum ("output-io-mode", "Output IO mode",
+ "Output side I/O mode (matches sink pad)",
+ GST_TYPE_V4L2_DEC_OUTPUT_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_CAPTURE_IO_MODE,
+ g_param_spec_enum ("capture-io-mode", "Capture IO mode",
+ "Capture I/O mode (matches src pad)",
+ GST_TYPE_V4L2_DEC_CAPTURE_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ }
+}
+
+void
+gst_v4l2_object_install_m2m_enc_iomode_properties_helper (GObjectClass * gobject_class)
+{
+ if (is_cuvid == FALSE) {
+ g_object_class_install_property (gobject_class, PROP_OUTPUT_IO_MODE,
+ g_param_spec_enum ("output-io-mode", "Output IO mode",
+ "Output side I/O mode (matches sink pad)",
+ GST_TYPE_V4L2_ENC_OUTPUT_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_CAPTURE_IO_MODE,
+ g_param_spec_enum ("capture-io-mode", "Capture IO mode",
+ "Capture I/O mode (matches src pad)",
+ GST_TYPE_V4L2_ENC_CAPTURE_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ }
+}
+#endif
+
+void
+gst_v4l2_object_install_m2m_properties_helper (GObjectClass * gobject_class)
+{
+ g_object_class_install_property (gobject_class, PROP_DEVICE,
+ g_param_spec_string ("device", "Device", "Device location",
+ NULL, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_DEVICE_NAME,
+ g_param_spec_string ("device-name", "Device name",
+ "Name of the device", DEFAULT_PROP_DEVICE_NAME,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_DEVICE_FD,
+ g_param_spec_int ("device-fd", "File descriptor",
+ "File descriptor of the device", -1, G_MAXINT, DEFAULT_PROP_DEVICE_FD,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+
+ if (is_cuvid == TRUE) {
+ g_object_class_install_property (gobject_class, PROP_OUTPUT_IO_MODE,
+ g_param_spec_enum ("output-io-mode", "Output IO mode",
+ "Output side I/O mode (matches sink pad)",
+ GST_TYPE_V4L2_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_CAPTURE_IO_MODE,
+ g_param_spec_enum ("capture-io-mode", "Capture IO mode",
+ "Capture I/O mode (matches src pad)",
+ GST_TYPE_V4L2_IO_MODE, DEFAULT_PROP_IO_MODE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ }
+ g_object_class_install_property (gobject_class, PROP_EXTRA_CONTROLS,
+ g_param_spec_boxed ("extra-controls", "Extra Controls",
+ "Extra v4l2 controls (CIDs) for the device",
+ GST_TYPE_STRUCTURE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+}
+
+/* Support for 32bit off_t, this wrapper is casting off_t to gint64 */
+#ifdef HAVE_LIBV4L2
+#if SIZEOF_OFF_T < 8
+
+static gpointer
+v4l2_mmap_wrapper (gpointer start, gsize length, gint prot, gint flags, gint fd,
+ off_t offset)
+{
+ return v4l2_mmap (start, length, prot, flags, fd, (gint64) offset);
+}
+
+#define v4l2_mmap v4l2_mmap_wrapper
+
+#endif /* SIZEOF_OFF_T < 8 */
+#endif /* HAVE_LIBV4L2 */
+
+GstV4l2Object *
+gst_v4l2_object_new (GstElement * element,
+ GstObject * debug_object,
+ enum v4l2_buf_type type,
+ const char *default_device,
+ GstV4l2GetInOutFunction get_in_out_func,
+ GstV4l2SetInOutFunction set_in_out_func,
+ GstV4l2UpdateFpsFunction update_fps_func)
+{
+ GstV4l2Object *v4l2object;
+
+ /*
+ * some default values
+ */
+ v4l2object = g_new0 (GstV4l2Object, 1);
+
+ v4l2object->type = type;
+ v4l2object->formats = NULL;
+
+ v4l2object->element = element;
+ v4l2object->dbg_obj = debug_object;
+ v4l2object->get_in_out_func = get_in_out_func;
+ v4l2object->set_in_out_func = set_in_out_func;
+ v4l2object->update_fps_func = update_fps_func;
+
+ v4l2object->video_fd = -1;
+ v4l2object->active = FALSE;
+ v4l2object->videodev = g_strdup (default_device);
+
+ v4l2object->norms = NULL;
+ v4l2object->channels = NULL;
+ v4l2object->colors = NULL;
+
+ v4l2object->keep_aspect = TRUE;
+
+ v4l2object->n_v4l2_planes = 0;
+
+ v4l2object->no_initial_format = FALSE;
+
+ /* We now disable libv4l2 by default, but have an env to enable it. */
+#ifdef HAVE_LIBV4L2
+ if (g_getenv ("GST_V4L2_USE_LIBV4L2")) {
+ v4l2object->fd_open = v4l2_fd_open;
+ v4l2object->close = v4l2_close;
+ v4l2object->dup = v4l2_dup;
+ v4l2object->ioctl = v4l2_ioctl;
+ v4l2object->read = v4l2_read;
+ v4l2object->mmap = v4l2_mmap;
+ v4l2object->munmap = v4l2_munmap;
+ } else
+#endif
+ {
+ v4l2object->fd_open = NULL;
+ v4l2object->close = close;
+ v4l2object->dup = dup;
+ v4l2object->ioctl = ioctl;
+ v4l2object->read = read;
+ v4l2object->mmap = mmap;
+ v4l2object->munmap = munmap;
+ }
+
+ return v4l2object;
+}
+
+static gboolean gst_v4l2_object_clear_format_list (GstV4l2Object * v4l2object);
+
+
+void
+gst_v4l2_object_destroy (GstV4l2Object * v4l2object)
+{
+ g_return_if_fail (v4l2object != NULL);
+
+ g_free (v4l2object->videodev);
+
+ g_free (v4l2object->channel);
+
+ if (v4l2object->formats) {
+ gst_v4l2_object_clear_format_list (v4l2object);
+ }
+
+ if (v4l2object->probed_caps) {
+ gst_caps_unref (v4l2object->probed_caps);
+ }
+
+ if (v4l2object->extra_controls) {
+ gst_structure_free (v4l2object->extra_controls);
+ }
+
+ g_free (v4l2object);
+}
+
+
+static gboolean
+gst_v4l2_object_clear_format_list (GstV4l2Object * v4l2object)
+{
+ g_slist_foreach (v4l2object->formats, (GFunc) g_free, NULL);
+ g_slist_free (v4l2object->formats);
+ v4l2object->formats = NULL;
+
+ return TRUE;
+}
+
+static gint
+gst_v4l2_object_prop_to_cid (guint prop_id)
+{
+ gint cid = -1;
+
+ switch (prop_id) {
+ case PROP_BRIGHTNESS:
+ cid = V4L2_CID_BRIGHTNESS;
+ break;
+ case PROP_CONTRAST:
+ cid = V4L2_CID_CONTRAST;
+ break;
+ case PROP_SATURATION:
+ cid = V4L2_CID_SATURATION;
+ break;
+ case PROP_HUE:
+ cid = V4L2_CID_HUE;
+ break;
+ default:
+ GST_WARNING ("unmapped property id: %d", prop_id);
+ }
+ return cid;
+}
+
+
+gboolean
+gst_v4l2_object_set_property_helper (GstV4l2Object * v4l2object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ switch (prop_id) {
+ case PROP_DEVICE:
+ g_free (v4l2object->videodev);
+ v4l2object->videodev = g_value_dup_string (value);
+ break;
+ case PROP_BRIGHTNESS:
+ case PROP_CONTRAST:
+ case PROP_SATURATION:
+ case PROP_HUE:
+ {
+ gint cid = gst_v4l2_object_prop_to_cid (prop_id);
+
+ if (cid != -1) {
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ gst_v4l2_set_attribute (v4l2object, cid, g_value_get_int (value));
+ }
+ }
+ return TRUE;
+ }
+ break;
+ case PROP_TV_NORM:
+ v4l2object->tv_norm = g_value_get_enum (value);
+ break;
+#ifndef USE_V4L2_TARGET_NV
+ case PROP_CHANNEL:
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ GstTuner *tuner = GST_TUNER (v4l2object->element);
+ GstTunerChannel *channel = gst_tuner_find_channel_by_name (tuner,
+ (gchar *) g_value_get_string (value));
+
+ if (channel) {
+ /* like gst_tuner_set_channel (tuner, channel)
+ without g_object_notify */
+ gst_v4l2_tuner_set_channel (v4l2object, channel);
+ }
+ } else {
+ g_free (v4l2object->channel);
+ v4l2object->channel = g_value_dup_string (value);
+ }
+ break;
+ case PROP_FREQUENCY:
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ GstTuner *tuner = GST_TUNER (v4l2object->element);
+ GstTunerChannel *channel = gst_tuner_get_channel (tuner);
+
+ if (channel &&
+ GST_TUNER_CHANNEL_HAS_FLAG (channel, GST_TUNER_CHANNEL_FREQUENCY)) {
+ /* like
+ gst_tuner_set_frequency (tuner, channel, g_value_get_ulong (value))
+ without g_object_notify */
+ gst_v4l2_tuner_set_frequency (v4l2object, channel,
+ g_value_get_ulong (value));
+ }
+ } else {
+ v4l2object->frequency = g_value_get_ulong (value);
+ }
+ break;
+#endif
+
+ case PROP_IO_MODE:
+ v4l2object->req_mode = g_value_get_enum (value);
+ break;
+ case PROP_CAPTURE_IO_MODE:
+ g_return_val_if_fail (!V4L2_TYPE_IS_OUTPUT (v4l2object->type), FALSE);
+ v4l2object->req_mode = g_value_get_enum (value);
+ break;
+ case PROP_OUTPUT_IO_MODE:
+ g_return_val_if_fail (V4L2_TYPE_IS_OUTPUT (v4l2object->type), FALSE);
+ v4l2object->req_mode = g_value_get_enum (value);
+ break;
+ case PROP_EXTRA_CONTROLS:{
+ const GstStructure *s = gst_value_get_structure (value);
+
+ if (v4l2object->extra_controls)
+ gst_structure_free (v4l2object->extra_controls);
+
+ v4l2object->extra_controls = s ? gst_structure_copy (s) : NULL;
+ if (GST_V4L2_IS_OPEN (v4l2object))
+ gst_v4l2_set_controls (v4l2object, v4l2object->extra_controls);
+ break;
+ }
+ case PROP_PIXEL_ASPECT_RATIO:
+ if (v4l2object->par) {
+ g_value_unset (v4l2object->par);
+ g_free (v4l2object->par);
+ }
+ v4l2object->par = g_new0 (GValue, 1);
+ g_value_init (v4l2object->par, GST_TYPE_FRACTION);
+ if (!g_value_transform (value, v4l2object->par)) {
+ g_warning ("Could not transform string to aspect ratio");
+ gst_value_set_fraction (v4l2object->par, 1, 1);
+ }
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "set PAR to %d/%d",
+ gst_value_get_fraction_numerator (v4l2object->par),
+ gst_value_get_fraction_denominator (v4l2object->par));
+ break;
+ case PROP_FORCE_ASPECT_RATIO:
+ v4l2object->keep_aspect = g_value_get_boolean (value);
+ break;
+ default:
+ return FALSE;
+ break;
+ }
+ return TRUE;
+}
+
+
+gboolean
+gst_v4l2_object_get_property_helper (GstV4l2Object * v4l2object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ switch (prop_id) {
+ case PROP_DEVICE:
+ g_value_set_string (value, v4l2object->videodev);
+ break;
+ case PROP_DEVICE_NAME:
+ {
+ const guchar *new = NULL;
+
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ new = v4l2object->vcap.card;
+ } else if (gst_v4l2_open (v4l2object)) {
+ new = v4l2object->vcap.card;
+ gst_v4l2_close (v4l2object);
+ }
+ g_value_set_string (value, (gchar *) new);
+ break;
+ }
+ case PROP_DEVICE_FD:
+ {
+ if (GST_V4L2_IS_OPEN (v4l2object))
+ g_value_set_int (value, v4l2object->video_fd);
+ else
+ g_value_set_int (value, DEFAULT_PROP_DEVICE_FD);
+ break;
+ }
+ case PROP_FLAGS:
+ {
+ guint flags = 0;
+
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ flags |= v4l2object->device_caps &
+ (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_VBI_OUTPUT | V4L2_CAP_TUNER | V4L2_CAP_AUDIO);
+
+ if (v4l2object->device_caps & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
+ flags |= V4L2_CAP_VIDEO_CAPTURE;
+
+ if (v4l2object->device_caps & V4L2_CAP_VIDEO_OUTPUT_MPLANE)
+ flags |= V4L2_CAP_VIDEO_OUTPUT;
+ }
+ g_value_set_flags (value, flags);
+ break;
+ }
+ case PROP_BRIGHTNESS:
+ case PROP_CONTRAST:
+ case PROP_SATURATION:
+ case PROP_HUE:
+ {
+ gint cid = gst_v4l2_object_prop_to_cid (prop_id);
+
+ if (cid != -1) {
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ gint v;
+ if (gst_v4l2_get_attribute (v4l2object, cid, &v)) {
+ g_value_set_int (value, v);
+ }
+ }
+ }
+ return TRUE;
+ }
+ break;
+ case PROP_TV_NORM:
+ g_value_set_enum (value, v4l2object->tv_norm);
+ break;
+ case PROP_IO_MODE:
+ g_value_set_enum (value, v4l2object->req_mode);
+ break;
+ case PROP_CAPTURE_IO_MODE:
+ g_return_val_if_fail (!V4L2_TYPE_IS_OUTPUT (v4l2object->type), FALSE);
+ g_value_set_enum (value, v4l2object->req_mode);
+ break;
+ case PROP_OUTPUT_IO_MODE:
+ g_return_val_if_fail (V4L2_TYPE_IS_OUTPUT (v4l2object->type), FALSE);
+ g_value_set_enum (value, v4l2object->req_mode);
+ break;
+ case PROP_EXTRA_CONTROLS:
+ gst_value_set_structure (value, v4l2object->extra_controls);
+ break;
+ case PROP_PIXEL_ASPECT_RATIO:
+ if (v4l2object->par)
+ g_value_transform (v4l2object->par, value);
+ break;
+ case PROP_FORCE_ASPECT_RATIO:
+ g_value_set_boolean (value, v4l2object->keep_aspect);
+ break;
+ default:
+ return FALSE;
+ break;
+ }
+ return TRUE;
+}
+
+static void
+gst_v4l2_get_driver_min_buffers (GstV4l2Object * v4l2object)
+{
+ struct v4l2_control control = { 0, };
+
+ g_return_if_fail (GST_V4L2_IS_OPEN (v4l2object));
+
+ if (V4L2_TYPE_IS_OUTPUT (v4l2object->type))
+ control.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT;
+ else
+ control.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_CTRL, &control) == 0) {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "driver requires a minimum of %d buffers", control.value);
+ v4l2object->min_buffers = control.value;
+ } else {
+ v4l2object->min_buffers = 0;
+ }
+}
+
+#ifndef USE_V4L2_TARGET_NV
+static void
+gst_v4l2_set_defaults (GstV4l2Object * v4l2object)
+{
+ GstTunerNorm *norm = NULL;
+ GstTunerChannel *channel = NULL;
+ GstTuner *tuner;
+
+ if (!GST_IS_TUNER (v4l2object->element))
+ return;
+
+ tuner = GST_TUNER (v4l2object->element);
+
+ if (v4l2object->tv_norm)
+ norm = gst_v4l2_tuner_get_norm_by_std_id (v4l2object, v4l2object->tv_norm);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "tv_norm=0x%" G_GINT64_MODIFIER "x, "
+ "norm=%p", (guint64) v4l2object->tv_norm, norm);
+ if (norm) {
+ gst_tuner_set_norm (tuner, norm);
+ } else {
+ norm =
+ GST_TUNER_NORM (gst_tuner_get_norm (GST_TUNER (v4l2object->element)));
+ if (norm) {
+ v4l2object->tv_norm =
+ gst_v4l2_tuner_get_std_id_by_norm (v4l2object, norm);
+ gst_tuner_norm_changed (tuner, norm);
+ }
+ }
+
+ if (v4l2object->channel)
+ channel = gst_tuner_find_channel_by_name (tuner, v4l2object->channel);
+ if (channel) {
+ gst_tuner_set_channel (tuner, channel);
+ } else {
+ channel =
+ GST_TUNER_CHANNEL (gst_tuner_get_channel (GST_TUNER
+ (v4l2object->element)));
+ if (channel) {
+ g_free (v4l2object->channel);
+ v4l2object->channel = g_strdup (channel->label);
+ gst_tuner_channel_changed (tuner, channel);
+ }
+ }
+
+ if (channel
+ && GST_TUNER_CHANNEL_HAS_FLAG (channel, GST_TUNER_CHANNEL_FREQUENCY)) {
+ if (v4l2object->frequency != 0) {
+ gst_tuner_set_frequency (tuner, channel, v4l2object->frequency);
+ } else {
+ v4l2object->frequency = gst_tuner_get_frequency (tuner, channel);
+ if (v4l2object->frequency == 0) {
+ /* guess */
+ gst_tuner_set_frequency (tuner, channel, 1000);
+ } else {
+ }
+ }
+ }
+}
+#endif
+
+gboolean
+gst_v4l2_object_open (GstV4l2Object * v4l2object)
+{
+#ifndef USE_V4L2_TARGET_NV
+ if (gst_v4l2_open (v4l2object))
+ gst_v4l2_set_defaults (v4l2object);
+ else
+ return FALSE;
+#else
+ if (!gst_v4l2_open (v4l2object))
+ return FALSE;
+ v4l2object->is_encode = !g_strcmp0(v4l2object->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !g_strcmp0(v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT);
+#endif
+
+ return TRUE;
+}
+
+gboolean
+gst_v4l2_object_open_shared (GstV4l2Object * v4l2object, GstV4l2Object * other)
+{
+ gboolean ret;
+
+ ret = gst_v4l2_dup (v4l2object, other);
+
+ return ret;
+}
+
+gboolean
+gst_v4l2_object_close (GstV4l2Object * v4l2object)
+{
+ if (!gst_v4l2_close (v4l2object))
+ return FALSE;
+
+ gst_caps_replace (&v4l2object->probed_caps, NULL);
+
+ /* reset our copy of the device caps */
+ v4l2object->device_caps = 0;
+
+ if (v4l2object->formats) {
+ gst_v4l2_object_clear_format_list (v4l2object);
+ }
+
+ if (v4l2object->par) {
+ g_value_unset (v4l2object->par);
+ g_free (v4l2object->par);
+ v4l2object->par = NULL;
+ }
+
+ return TRUE;
+}
+
+static struct v4l2_fmtdesc *
+gst_v4l2_object_get_format_from_fourcc (GstV4l2Object * v4l2object,
+ guint32 fourcc)
+{
+ struct v4l2_fmtdesc *fmt;
+ GSList *walk;
+
+ if (fourcc == 0)
+ return NULL;
+
+ walk = gst_v4l2_object_get_format_list (v4l2object);
+ while (walk) {
+ fmt = (struct v4l2_fmtdesc *) walk->data;
+ if (fmt->pixelformat == fourcc)
+ return fmt;
+ /* special case for jpeg */
+ if (fmt->pixelformat == V4L2_PIX_FMT_MJPEG ||
+ fmt->pixelformat == V4L2_PIX_FMT_JPEG ||
+ fmt->pixelformat == V4L2_PIX_FMT_PJPG) {
+ if (fourcc == V4L2_PIX_FMT_JPEG || fourcc == V4L2_PIX_FMT_MJPEG ||
+ fourcc == V4L2_PIX_FMT_PJPG) {
+ return fmt;
+ }
+ }
+#ifdef USE_V4L2_TARGET_NV
+ if (fourcc == V4L2_PIX_FMT_P010M ||
+ fourcc == V4L2_PIX_FMT_P012M ||
+ fourcc == V4L2_PIX_FMT_NV24M ||
+ fourcc == V4L2_PIX_FMT_YUV444_10LE ||
+ fourcc == V4L2_PIX_FMT_YUV444_12LE) {
+ fmt->pixelformat = fourcc;
+ return fmt;
+ }
+#endif
+ walk = g_slist_next (walk);
+ }
+
+ return NULL;
+}
+
+
+
+/* complete made up ranking, the values themselves are meaningless */
+/* These ranks MUST be X such that X<<15 fits on a signed int - see
+ the comment at the end of gst_v4l2_object_format_get_rank. */
+#define YUV_BASE_RANK 1000
+#define JPEG_BASE_RANK 500
+#define DV_BASE_RANK 200
+#define RGB_BASE_RANK 100
+#define YUV_ODD_BASE_RANK 50
+#define RGB_ODD_BASE_RANK 25
+#define BAYER_BASE_RANK 15
+#define S910_BASE_RANK 10
+#define GREY_BASE_RANK 5
+#define PWC_BASE_RANK 1
+
+static gint
+gst_v4l2_object_format_get_rank (const struct v4l2_fmtdesc *fmt)
+{
+ guint32 fourcc = fmt->pixelformat;
+ gboolean emulated = ((fmt->flags & V4L2_FMT_FLAG_EMULATED) != 0);
+ gint rank = 0;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_MJPEG:
+ case V4L2_PIX_FMT_PJPG:
+ rank = JPEG_BASE_RANK;
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ rank = JPEG_BASE_RANK + 1;
+ break;
+ case V4L2_PIX_FMT_MPEG: /* MPEG */
+ rank = JPEG_BASE_RANK + 2;
+ break;
+
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_ARGB555X:
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_Y4:
+ case V4L2_PIX_FMT_Y6:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ case V4L2_PIX_FMT_Y10BPACK:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_NV12MT_16X16:
+ case V4L2_PIX_FMT_NV42:
+ case V4L2_PIX_FMT_H264_MVC:
+ rank = RGB_ODD_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ rank = RGB_BASE_RANK - 1;
+ break;
+
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_XRGB32:
+ rank = RGB_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_GREY: /* 8 Greyscale */
+ rank = GREY_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_NV12: /* 12 Y/CbCr 4:2:0 */
+ case V4L2_PIX_FMT_NV12M: /* Same as NV12 */
+ case V4L2_PIX_FMT_NV12MT: /* NV12 64x32 tile */
+ case V4L2_PIX_FMT_P010: /* Y/CbCr 4:2:0, 10 bits per channel */
+ case V4L2_PIX_FMT_P010M:
+
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_P012: /* Y/CbCr 4:2:0, 12 bits per channel */
+ case V4L2_PIX_FMT_P012M:
+ case V4L2_PIX_FMT_NV24M:
+ case V4L2_PIX_FMT_YUV444_10LE:
+ case V4L2_PIX_FMT_YUV444_12LE:
+#endif
+ case V4L2_PIX_FMT_NV21: /* 12 Y/CrCb 4:2:0 */
+ case V4L2_PIX_FMT_NV21M: /* Same as NV21 */
+ case V4L2_PIX_FMT_YYUV: /* 16 YUV 4:2:2 */
+ case V4L2_PIX_FMT_HI240: /* 8 8-bit color */
+ case V4L2_PIX_FMT_NV16: /* 16 Y/CbCr 4:2:2 */
+ case V4L2_PIX_FMT_NV16M: /* Same as NV16 */
+ case V4L2_PIX_FMT_NV61: /* 16 Y/CrCb 4:2:2 */
+ case V4L2_PIX_FMT_NV61M: /* Same as NV61 */
+ case V4L2_PIX_FMT_NV24: /* 24 Y/CrCb 4:4:4 */
+ rank = YUV_ODD_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_YVU410: /* YVU9, 9 bits per pixel */
+ rank = YUV_BASE_RANK + 3;
+ break;
+ case V4L2_PIX_FMT_YUV410: /* YUV9, 9 bits per pixel */
+ rank = YUV_BASE_RANK + 2;
+ break;
+ case V4L2_PIX_FMT_YUV420: /* I420, 12 bits per pixel */
+ case V4L2_PIX_FMT_YUV420M:
+ rank = YUV_BASE_RANK + 7;
+ break;
+ case V4L2_PIX_FMT_YUYV: /* YUY2, 16 bits per pixel */
+ rank = YUV_BASE_RANK + 10;
+ break;
+ case V4L2_PIX_FMT_YVU420: /* YV12, 12 bits per pixel */
+ rank = YUV_BASE_RANK + 6;
+ break;
+ case V4L2_PIX_FMT_UYVY: /* UYVY, 16 bits per pixel */
+ rank = YUV_BASE_RANK + 9;
+ break;
+ case V4L2_PIX_FMT_YUV444:
+ rank = YUV_BASE_RANK + 6;
+ break;
+ case V4L2_PIX_FMT_Y41P: /* Y41P, 12 bits per pixel */
+ rank = YUV_BASE_RANK + 5;
+ break;
+ case V4L2_PIX_FMT_YUV411P: /* Y41B, 12 bits per pixel */
+ rank = YUV_BASE_RANK + 4;
+ break;
+ case V4L2_PIX_FMT_YUV422P: /* Y42B, 16 bits per pixel */
+ rank = YUV_BASE_RANK + 8;
+ break;
+
+ case V4L2_PIX_FMT_DV:
+ rank = DV_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_WNVA: /* Winnov hw compres */
+ rank = 0;
+ break;
+
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ rank = BAYER_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_SN9C10X:
+ rank = S910_BASE_RANK;
+ break;
+
+ case V4L2_PIX_FMT_PWC1:
+ rank = PWC_BASE_RANK;
+ break;
+ case V4L2_PIX_FMT_PWC2:
+ rank = PWC_BASE_RANK;
+ break;
+
+ default:
+ rank = 0;
+ break;
+ }
+
+ /* All ranks are below 1<<15 so a shift by 15
+ * will a) make all non-emulated formats larger
+ * than emulated and b) will not overflow
+ */
+ if (!emulated)
+ rank <<= 15;
+
+ return rank;
+}
+
+
+
+static gint
+format_cmp_func (gconstpointer a, gconstpointer b)
+{
+ const struct v4l2_fmtdesc *fa = a;
+ const struct v4l2_fmtdesc *fb = b;
+
+ if (fa->pixelformat == fb->pixelformat)
+ return 0;
+
+ return gst_v4l2_object_format_get_rank (fb) -
+ gst_v4l2_object_format_get_rank (fa);
+}
+
+/******************************************************
+ * gst_v4l2_object_fill_format_list():
+ * create list of supported capture formats
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+static gboolean
+gst_v4l2_object_fill_format_list (GstV4l2Object * v4l2object,
+ enum v4l2_buf_type type)
+{
+ gint n;
+ struct v4l2_fmtdesc *format;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "getting src format enumerations");
+
+ /* format enumeration */
+ for (n = 0;; n++) {
+ format = g_new0 (struct v4l2_fmtdesc, 1);
+
+ format->index = n;
+ format->type = type;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_ENUM_FMT, format) < 0) {
+ if (errno == EINVAL) {
+ g_free (format);
+ break; /* end of enumeration */
+ } else {
+ goto failed;
+ }
+ }
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "index: %u", format->index);
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "type: %d", format->type);
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "flags: %08x", format->flags);
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "description: '%s'",
+ format->description);
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "pixelformat: %" GST_FOURCC_FORMAT,
+ GST_FOURCC_ARGS (format->pixelformat));
+
+ /* sort formats according to our preference; we do this, because caps
+ * are probed in the order the formats are in the list, and the order of
+ * formats in the final probed caps matters for things like fixation */
+ v4l2object->formats = g_slist_insert_sorted (v4l2object->formats, format,
+ (GCompareFunc) format_cmp_func);
+ }
+
+#ifndef GST_DISABLE_GST_DEBUG
+ {
+ GSList *l;
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj, "got %d format(s):", n);
+ for (l = v4l2object->formats; l != NULL; l = l->next) {
+ format = l->data;
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj,
+ " %" GST_FOURCC_FORMAT "%s", GST_FOURCC_ARGS (format->pixelformat),
+ ((format->flags & V4L2_FMT_FLAG_EMULATED)) ? " (emulated)" : "");
+ }
+ }
+#endif
+
+ return TRUE;
+
+ /* ERRORS */
+failed:
+ {
+ g_free (format);
+
+ if (v4l2object->element)
+ return FALSE;
+
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to enumerate possible video formats device '%s' can work "
+ "with"), v4l2object->videodev),
+ ("Failed to get number %d in pixelformat enumeration for %s. (%d - %s)",
+ n, v4l2object->videodev, errno, g_strerror (errno)));
+
+ return FALSE;
+ }
+}
+
+/*
+ * Get the list of supported capture formats, a list of
+ * struct v4l2_fmtdesc.
+ */
+static GSList *
+gst_v4l2_object_get_format_list (GstV4l2Object * v4l2object)
+{
+ if (!v4l2object->formats) {
+
+ /* check usual way */
+ gst_v4l2_object_fill_format_list (v4l2object, v4l2object->type);
+
+ /* if our driver supports multi-planar
+ * and if formats are still empty then we can workaround driver bug
+ * by also looking up formats as if our device was not supporting
+ * multiplanar */
+ if (!v4l2object->formats) {
+ switch (v4l2object->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ gst_v4l2_object_fill_format_list (v4l2object,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ gst_v4l2_object_fill_format_list (v4l2object,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+ return v4l2object->formats;
+}
+
+static GstVideoFormat
+gst_v4l2_object_v4l2fourcc_to_video_format (guint32 fourcc)
+{
+ GstVideoFormat format;
+ switch (fourcc) {
+ case V4L2_PIX_FMT_GREY: /* 8 Greyscale */
+ format = GST_VIDEO_FORMAT_GRAY8;
+ break;
+ case V4L2_PIX_FMT_Y16:
+ format = GST_VIDEO_FORMAT_GRAY16_LE;
+ break;
+ case V4L2_PIX_FMT_Y16_BE:
+ format = GST_VIDEO_FORMAT_GRAY16_BE;
+ break;
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_RGB555:
+ format = GST_VIDEO_FORMAT_RGB15;
+ break;
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_RGB555X:
+ format = GST_VIDEO_FORMAT_BGR15;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ format = GST_VIDEO_FORMAT_RGB16;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ format = GST_VIDEO_FORMAT_RGB;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ format = GST_VIDEO_FORMAT_BGR;
+ break;
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_RGB32:
+ format = GST_VIDEO_FORMAT_xRGB;
+ break;
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_BGR32:
+ format = GST_VIDEO_FORMAT_BGRx;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ format = GST_VIDEO_FORMAT_BGRA;
+ break;
+ case V4L2_PIX_FMT_ARGB32:
+ format = GST_VIDEO_FORMAT_ARGB;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
+ format = GST_VIDEO_FORMAT_NV12;
+ break;
+#ifdef USE_V4L2_TARGET_NV
+ /* NOTE: Currently video format GST_VIDEO_FORMAT_P010_10LE used for 10-bit NV12.
+ Once v4l2 defines video format for 10 bit semi-planer NV12 with videodev2.h,
+ will make required correction with relevant Gstreamer defined video formats to be used. */
+ case V4L2_PIX_FMT_P010:
+ case V4L2_PIX_FMT_P010M:
+ format = GST_VIDEO_FORMAT_P010_10LE;
+ break;
+
+ /* NOTE: Gstreamer have not defined video format for 12-bit NV12 yet.
+ Hence, Currently WAR applied using GST_VIDEO_FORMAT_I420_12LE format. */
+ case V4L2_PIX_FMT_P012:
+ case V4L2_PIX_FMT_P012M:
+ format = GST_VIDEO_FORMAT_I420_12LE;
+ break;
+ case V4L2_PIX_FMT_YUV444:
+ format = GST_VIDEO_FORMAT_Y444;
+ break;
+ case V4L2_PIX_FMT_YUV444_10LE:
+ format = GST_VIDEO_FORMAT_Y444_10LE;
+ break;
+ case V4L2_PIX_FMT_YUV444_12LE:
+ format = GST_VIDEO_FORMAT_Y444_12LE;
+ break;
+#endif
+ case V4L2_PIX_FMT_NV12MT:
+ format = GST_VIDEO_FORMAT_NV12_64Z32;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV21M:
+ format = GST_VIDEO_FORMAT_NV21;
+ break;
+ case V4L2_PIX_FMT_YVU410:
+ format = GST_VIDEO_FORMAT_YVU9;
+ break;
+ case V4L2_PIX_FMT_YUV410:
+ format = GST_VIDEO_FORMAT_YUV9;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV420M:
+ format = GST_VIDEO_FORMAT_I420;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ format = GST_VIDEO_FORMAT_YUY2;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ format = GST_VIDEO_FORMAT_YV12;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ format = GST_VIDEO_FORMAT_UYVY;
+ break;
+ case V4L2_PIX_FMT_YUV411P:
+ format = GST_VIDEO_FORMAT_Y41B;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_YUV422M:
+#endif
+ format = GST_VIDEO_FORMAT_Y42B;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ format = GST_VIDEO_FORMAT_YVYU;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV16M:
+ format = GST_VIDEO_FORMAT_NV16;
+ break;
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV61M:
+ format = GST_VIDEO_FORMAT_NV61;
+ break;
+ case V4L2_PIX_FMT_NV24:
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_NV24M:
+#endif
+ format = GST_VIDEO_FORMAT_NV24;
+ break;
+ default:
+ format = GST_VIDEO_FORMAT_UNKNOWN;
+ break;
+ }
+
+ return format;
+}
+
+static gboolean
+gst_v4l2_object_v4l2fourcc_is_rgb (guint32 fourcc)
+{
+ gboolean ret = FALSE;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_ARGB32:
+ ret = TRUE;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static GstStructure *
+gst_v4l2_object_v4l2fourcc_to_bare_struct (guint32 fourcc)
+{
+ GstStructure *structure = NULL;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_MJPEG: /* Motion-JPEG */
+ case V4L2_PIX_FMT_PJPG: /* Progressive-JPEG */
+ case V4L2_PIX_FMT_JPEG: /* JFIF JPEG */
+ structure = gst_structure_new_empty ("image/jpeg");
+ break;
+ case V4L2_PIX_FMT_MPEG1:
+ structure = gst_structure_new ("video/mpeg",
+ "mpegversion", G_TYPE_INT, 1, NULL);
+ break;
+ case V4L2_PIX_FMT_MPEG2:
+ structure = gst_structure_new ("video/mpeg",
+ "mpegversion", G_TYPE_INT, 2, NULL);
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_XVID:
+ structure = gst_structure_new ("video/mpeg",
+ "mpegversion", G_TYPE_INT, 4, "systemstream",
+ G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_DIVX4:
+ structure = gst_structure_new ("video/x-divx",
+ "divxversion", G_TYPE_INT, 4, "systemstream",
+ G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+ case V4L2_PIX_FMT_DIVX5:
+ structure = gst_structure_new ("video/x-divx",
+ "divxversion", G_TYPE_INT, 5, "systemstream",
+ G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+#endif
+ case V4L2_PIX_FMT_H263:
+ structure = gst_structure_new ("video/x-h263",
+ "variant", G_TYPE_STRING, "itu", NULL);
+ break;
+ case V4L2_PIX_FMT_H264: /* H.264 */
+ structure = gst_structure_new ("video/x-h264",
+ "stream-format", G_TYPE_STRING, "byte-stream", "alignment",
+ G_TYPE_STRING, "au", NULL);
+ break;
+ case V4L2_PIX_FMT_H264_NO_SC:
+ structure = gst_structure_new ("video/x-h264",
+ "stream-format", G_TYPE_STRING, "avc", "alignment",
+ G_TYPE_STRING, "au", NULL);
+ break;
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_H265: /* H.265 */
+ structure = gst_structure_new ("video/x-h265",
+ "stream-format", G_TYPE_STRING, "byte-stream", "alignment",
+ G_TYPE_STRING, "au", NULL);
+ break;
+ case V4L2_PIX_FMT_AV1: /* AV1 */
+ structure = gst_structure_new_empty ("video/x-av1");
+ break;
+#endif
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ structure = gst_structure_new ("video/x-wmv",
+ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WVC1", NULL);
+ break;
+ case V4L2_PIX_FMT_VP8:
+ structure = gst_structure_new_empty ("video/x-vp8");
+ break;
+ case V4L2_PIX_FMT_VP9:
+ structure = gst_structure_new_empty ("video/x-vp9");
+ break;
+ case V4L2_PIX_FMT_GREY: /* 8 Greyscale */
+ case V4L2_PIX_FMT_Y16:
+ case V4L2_PIX_FMT_Y16_BE:
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_XRGB555X:
+ case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_NV12: /* 12 Y/CbCr 4:2:0 */
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV12MT:
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_P010: /* Y/CbCr 4:2:0, 10 bits per channel */
+ case V4L2_PIX_FMT_P010M:
+ case V4L2_PIX_FMT_P012: /* Y/CbCr 4:2:0, 12 bits per channel */
+ case V4L2_PIX_FMT_P012M:
+#endif
+ case V4L2_PIX_FMT_NV21: /* 12 Y/CrCb 4:2:0 */
+ case V4L2_PIX_FMT_NV21M:
+ case V4L2_PIX_FMT_NV16: /* 16 Y/CbCr 4:2:2 */
+ case V4L2_PIX_FMT_NV16M:
+ case V4L2_PIX_FMT_NV61: /* 16 Y/CrCb 4:2:2 */
+ case V4L2_PIX_FMT_NV61M:
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_NV24M:
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_YUV444_10LE:
+ case V4L2_PIX_FMT_YUV444_12LE:
+#endif
+ case V4L2_PIX_FMT_NV24: /* 24 Y/CrCb 4:4:4 */
+ case V4L2_PIX_FMT_YVU410:
+ case V4L2_PIX_FMT_YUV410:
+ case V4L2_PIX_FMT_YUV420: /* I420/IYUV */
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUV422P:
+#ifdef USE_V4L2_TARGET_NV
+ case V4L2_PIX_FMT_YUV422M:
+#endif
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_YUV411P:{
+ GstVideoFormat format;
+ format = gst_v4l2_object_v4l2fourcc_to_video_format (fourcc);
+ if (format != GST_VIDEO_FORMAT_UNKNOWN)
+ structure = gst_structure_new ("video/x-raw",
+ "format", G_TYPE_STRING, gst_video_format_to_string (format), NULL);
+ break;
+ }
+ case V4L2_PIX_FMT_DV:
+ structure =
+ gst_structure_new ("video/x-dv", "systemstream", G_TYPE_BOOLEAN, TRUE,
+ NULL);
+ break;
+ case V4L2_PIX_FMT_MPEG: /* MPEG */
+ structure = gst_structure_new ("video/mpegts",
+ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
+ break;
+ case V4L2_PIX_FMT_WNVA: /* Winnov hw compres */
+ break;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ structure = gst_structure_new ("video/x-bayer", "format", G_TYPE_STRING,
+ fourcc == V4L2_PIX_FMT_SBGGR8 ? "bggr" :
+ fourcc == V4L2_PIX_FMT_SGBRG8 ? "gbrg" :
+ fourcc == V4L2_PIX_FMT_SGRBG8 ? "grbg" :
+ /* fourcc == V4L2_PIX_FMT_SRGGB8 ? */ "rggb", NULL);
+ break;
+ case V4L2_PIX_FMT_SN9C10X:
+ structure = gst_structure_new_empty ("video/x-sonix");
+ break;
+ case V4L2_PIX_FMT_PWC1:
+ structure = gst_structure_new_empty ("video/x-pwc1");
+ break;
+ case V4L2_PIX_FMT_PWC2:
+ structure = gst_structure_new_empty ("video/x-pwc2");
+ break;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_ARGB555X:
+ case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_YYUV: /* 16 YUV 4:2:2 */
+ case V4L2_PIX_FMT_HI240: /* 8 8-bit color */
+ case V4L2_PIX_FMT_Y4:
+ case V4L2_PIX_FMT_Y6:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ case V4L2_PIX_FMT_Y10BPACK:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_Y41P:
+ case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_NV12MT_16X16:
+ case V4L2_PIX_FMT_NV42:
+ case V4L2_PIX_FMT_H264_MVC:
+ default:
+ GST_DEBUG ("Unsupported fourcc 0x%08x %" GST_FOURCC_FORMAT,
+ fourcc, GST_FOURCC_ARGS (fourcc));
+ break;
+ }
+
+ return structure;
+}
+
+GstStructure *
+gst_v4l2_object_v4l2fourcc_to_structure (guint32 fourcc)
+{
+ GstStructure *template;
+#ifndef USE_V4L2_TARGET_NV
+ gint i;
+#else
+ guint i;
+#endif
+
+ template = gst_v4l2_object_v4l2fourcc_to_bare_struct (fourcc);
+
+ if (template == NULL)
+ goto done;
+
+ for (i = 0; i < GST_V4L2_FORMAT_COUNT; i++) {
+ if (gst_v4l2_formats[i].format != fourcc)
+ continue;
+
+ if (gst_v4l2_formats[i].dimensions) {
+ gst_structure_set (template,
+ "width", GST_TYPE_INT_RANGE, 1, GST_V4L2_MAX_SIZE,
+ "height", GST_TYPE_INT_RANGE, 1, GST_V4L2_MAX_SIZE,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+ }
+ break;
+ }
+
+done:
+ return template;
+}
+
+
+static GstCaps *
+gst_v4l2_object_get_caps_helper (GstV4L2FormatFlags flags)
+{
+ GstStructure *structure;
+ GstCaps *caps;
+ guint i;
+
+ caps = gst_caps_new_empty ();
+ for (i = 0; i < GST_V4L2_FORMAT_COUNT; i++) {
+
+ if ((gst_v4l2_formats[i].flags & flags) == 0)
+ continue;
+
+ structure =
+ gst_v4l2_object_v4l2fourcc_to_bare_struct (gst_v4l2_formats[i].format);
+
+ if (structure) {
+ GstStructure *alt_s = NULL;
+
+ if (gst_v4l2_formats[i].dimensions) {
+ gst_structure_set (structure,
+ "width", GST_TYPE_INT_RANGE, 1, GST_V4L2_MAX_SIZE,
+ "height", GST_TYPE_INT_RANGE, 1, GST_V4L2_MAX_SIZE,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+ }
+
+ switch (gst_v4l2_formats[i].format) {
+ case V4L2_PIX_FMT_RGB32:
+ alt_s = gst_structure_copy (structure);
+ gst_structure_set (alt_s, "format", G_TYPE_STRING, "ARGB", NULL);
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ alt_s = gst_structure_copy (structure);
+ gst_structure_set (alt_s, "format", G_TYPE_STRING, "BGRA", NULL);
+ default:
+ break;
+ }
+
+ gst_caps_append_structure (caps, structure);
+
+ if (alt_s)
+ gst_caps_append_structure (caps, alt_s);
+ }
+ }
+
+ return gst_caps_simplify (caps);
+}
+
+GstCaps *
+gst_v4l2_object_get_all_caps (void)
+{
+ static GstCaps *caps = NULL;
+
+ if (g_once_init_enter (&caps)) {
+ GstCaps *all_caps = gst_v4l2_object_get_caps_helper (GST_V4L2_ALL);
+#ifndef USE_V4L2_GST_HEADER_VER_1_8
+ GST_MINI_OBJECT_FLAG_SET (all_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
+#endif
+ g_once_init_leave (&caps, all_caps);
+ }
+
+ return caps;
+}
+
+GstCaps *
+gst_v4l2_object_get_raw_caps (void)
+{
+ static GstCaps *caps = NULL;
+
+ if (g_once_init_enter (&caps)) {
+ GstCaps *raw_caps = gst_v4l2_object_get_caps_helper (GST_V4L2_RAW);
+#ifndef USE_V4L2_GST_HEADER_VER_1_8
+ GST_MINI_OBJECT_FLAG_SET (raw_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
+#endif
+ g_once_init_leave (&caps, raw_caps);
+ }
+
+ return caps;
+}
+
+GstCaps *
+gst_v4l2_object_get_codec_caps (void)
+{
+ static GstCaps *caps = NULL;
+
+ if (g_once_init_enter (&caps)) {
+ GstCaps *codec_caps = gst_v4l2_object_get_caps_helper (GST_V4L2_CODEC);
+#ifndef USE_V4L2_GST_HEADER_VER_1_8
+ GST_MINI_OBJECT_FLAG_SET (codec_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
+#endif
+ g_once_init_leave (&caps, codec_caps);
+ }
+
+ return caps;
+}
+
+/* collect data for the given caps
+ * @caps: given input caps
+ * @format: location for the v4l format
+ * @w/@h: location for width and height
+ * @fps_n/@fps_d: location for framerate
+ * @size: location for expected size of the frame or 0 if unknown
+ */
+static gboolean
+gst_v4l2_object_get_caps_info (GstV4l2Object * v4l2object, GstCaps * caps,
+ struct v4l2_fmtdesc **format, GstVideoInfo * info)
+{
+ GstStructure *structure;
+ guint32 fourcc = 0, fourcc_nc = 0;
+ const gchar *mimetype;
+ struct v4l2_fmtdesc *fmt = NULL;
+
+ structure = gst_caps_get_structure (caps, 0);
+
+ mimetype = gst_structure_get_name (structure);
+
+ if (!gst_video_info_from_caps (info, caps))
+ goto invalid_format;
+
+ if (g_str_equal (mimetype, "video/x-raw")) {
+ switch (GST_VIDEO_INFO_FORMAT (info)) {
+ case GST_VIDEO_FORMAT_Y444:
+ fourcc = V4L2_PIX_FMT_YUV444;
+ break;
+ case GST_VIDEO_FORMAT_I420:
+ fourcc = V4L2_PIX_FMT_YUV420;
+ fourcc_nc = V4L2_PIX_FMT_YUV420M;
+ break;
+ case GST_VIDEO_FORMAT_YUY2:
+ fourcc = V4L2_PIX_FMT_YUYV;
+ break;
+ case GST_VIDEO_FORMAT_UYVY:
+ fourcc = V4L2_PIX_FMT_UYVY;
+ break;
+ case GST_VIDEO_FORMAT_YV12:
+ fourcc = V4L2_PIX_FMT_YVU420;
+ break;
+ case GST_VIDEO_FORMAT_Y41B:
+ fourcc = V4L2_PIX_FMT_YUV411P;
+ break;
+ case GST_VIDEO_FORMAT_Y42B:
+ fourcc = V4L2_PIX_FMT_YUV422P;
+ break;
+ case GST_VIDEO_FORMAT_NV12:
+ fourcc = V4L2_PIX_FMT_NV12;
+ fourcc_nc = V4L2_PIX_FMT_NV12M;
+ break;
+#ifdef USE_V4L2_TARGET_NV
+ case GST_VIDEO_FORMAT_P010_10LE:
+ fourcc = V4L2_PIX_FMT_P010;
+ fourcc_nc = V4L2_PIX_FMT_P010M;
+ break;
+ case GST_VIDEO_FORMAT_I420_12LE:
+ fourcc = V4L2_PIX_FMT_P012;
+ fourcc_nc = V4L2_PIX_FMT_P012M;
+ break;
+ case GST_VIDEO_FORMAT_Y444_10LE:
+ fourcc_nc = V4L2_PIX_FMT_YUV444_10LE;
+ break;
+ case GST_VIDEO_FORMAT_Y444_12LE:
+ fourcc_nc = V4L2_PIX_FMT_YUV444_12LE;
+ break;
+#endif
+ case GST_VIDEO_FORMAT_NV12_64Z32:
+ fourcc_nc = V4L2_PIX_FMT_NV12MT;
+ break;
+ case GST_VIDEO_FORMAT_NV21:
+ fourcc = V4L2_PIX_FMT_NV21;
+ fourcc_nc = V4L2_PIX_FMT_NV21M;
+ break;
+ case GST_VIDEO_FORMAT_NV16:
+ fourcc = V4L2_PIX_FMT_NV16;
+ fourcc_nc = V4L2_PIX_FMT_NV16M;
+ break;
+ case GST_VIDEO_FORMAT_NV61:
+ fourcc = V4L2_PIX_FMT_NV61;
+ fourcc_nc = V4L2_PIX_FMT_NV61M;
+ break;
+ case GST_VIDEO_FORMAT_NV24:
+ fourcc = V4L2_PIX_FMT_NV24;
+#ifdef USE_V4L2_TARGET_NV
+ fourcc_nc = V4L2_PIX_FMT_NV24M;
+#endif
+ break;
+ case GST_VIDEO_FORMAT_YVYU:
+ fourcc = V4L2_PIX_FMT_YVYU;
+ break;
+ case GST_VIDEO_FORMAT_RGB15:
+ fourcc = V4L2_PIX_FMT_RGB555;
+ fourcc_nc = V4L2_PIX_FMT_XRGB555;
+ break;
+ case GST_VIDEO_FORMAT_RGB16:
+ fourcc = V4L2_PIX_FMT_RGB565;
+ break;
+ case GST_VIDEO_FORMAT_RGB:
+ fourcc = V4L2_PIX_FMT_RGB24;
+ break;
+ case GST_VIDEO_FORMAT_BGR:
+ fourcc = V4L2_PIX_FMT_BGR24;
+ break;
+ case GST_VIDEO_FORMAT_xRGB:
+ fourcc = V4L2_PIX_FMT_RGB32;
+ fourcc_nc = V4L2_PIX_FMT_XRGB32;
+ break;
+ case GST_VIDEO_FORMAT_ARGB:
+ fourcc = V4L2_PIX_FMT_RGB32;
+ fourcc_nc = V4L2_PIX_FMT_ARGB32;
+ break;
+ case GST_VIDEO_FORMAT_BGRx:
+ fourcc = V4L2_PIX_FMT_BGR32;
+ fourcc_nc = V4L2_PIX_FMT_XBGR32;
+ break;
+ case GST_VIDEO_FORMAT_BGRA:
+ fourcc = V4L2_PIX_FMT_BGR32;
+ fourcc_nc = V4L2_PIX_FMT_ABGR32;
+ break;
+ case GST_VIDEO_FORMAT_GRAY8:
+ fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ case GST_VIDEO_FORMAT_GRAY16_LE:
+ fourcc = V4L2_PIX_FMT_Y16;
+ break;
+ case GST_VIDEO_FORMAT_GRAY16_BE:
+ fourcc = V4L2_PIX_FMT_Y16_BE;
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (g_str_equal (mimetype, "video/mpegts")) {
+ fourcc = V4L2_PIX_FMT_MPEG;
+#ifdef USE_V4L2_TARGET_NV
+ } else if (g_str_equal (mimetype, "video/x-divx")) {
+ fourcc = V4L2_PIX_FMT_MPEG4;
+#endif
+ } else if (g_str_equal (mimetype, "video/x-dv")) {
+ fourcc = V4L2_PIX_FMT_DV;
+ } else if (g_str_equal (mimetype, "image/jpeg")) {
+ fourcc = V4L2_PIX_FMT_JPEG;
+ } else if (g_str_equal (mimetype, "video/mpeg")) {
+ gint version;
+ if (gst_structure_get_int (structure, "mpegversion", &version)) {
+ switch (version) {
+ case 1:
+ fourcc = V4L2_PIX_FMT_MPEG1;
+ break;
+ case 2:
+ fourcc = V4L2_PIX_FMT_MPEG2;
+ break;
+ case 4:
+ fourcc = V4L2_PIX_FMT_MPEG4;
+ fourcc_nc = V4L2_PIX_FMT_XVID;
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (g_str_equal (mimetype, "video/x-h263")) {
+ fourcc = V4L2_PIX_FMT_H263;
+ } else if (g_str_equal (mimetype, "video/x-h264")) {
+ const gchar *stream_format =
+ gst_structure_get_string (structure, "stream-format");
+ if (g_str_equal (stream_format, "avc"))
+ fourcc = V4L2_PIX_FMT_H264_NO_SC;
+ else
+ fourcc = V4L2_PIX_FMT_H264;
+ } else if (g_str_equal (mimetype, "video/x-vp8")) {
+ fourcc = V4L2_PIX_FMT_VP8;
+ } else if (g_str_equal (mimetype, "video/x-vp9")) {
+ fourcc = V4L2_PIX_FMT_VP9;
+#ifdef USE_V4L2_TARGET_NV
+ } else if (g_str_equal (mimetype, "video/x-av1")) {
+ fourcc = V4L2_PIX_FMT_AV1;
+ } else if (g_str_equal (mimetype, "video/x-h265")) {
+ fourcc = V4L2_PIX_FMT_H265;
+#endif
+ } else if (g_str_equal (mimetype, "video/x-bayer")) {
+ const gchar *format = gst_structure_get_string (structure, "format");
+ if (format) {
+ if (!g_ascii_strcasecmp (format, "bggr"))
+ fourcc = V4L2_PIX_FMT_SBGGR8;
+ else if (!g_ascii_strcasecmp (format, "gbrg"))
+ fourcc = V4L2_PIX_FMT_SGBRG8;
+ else if (!g_ascii_strcasecmp (format, "grbg"))
+ fourcc = V4L2_PIX_FMT_SGRBG8;
+ else if (!g_ascii_strcasecmp (format, "rggb"))
+ fourcc = V4L2_PIX_FMT_SRGGB8;
+ }
+ } else if (g_str_equal (mimetype, "video/x-sonix")) {
+ fourcc = V4L2_PIX_FMT_SN9C10X;
+ } else if (g_str_equal (mimetype, "video/x-pwc1")) {
+ fourcc = V4L2_PIX_FMT_PWC1;
+ } else if (g_str_equal (mimetype, "video/x-pwc2")) {
+ fourcc = V4L2_PIX_FMT_PWC2;
+ }
+ }
+
+
+ /* Prefer the non-contiguous if supported */
+ v4l2object->prefered_non_contiguous = TRUE;
+
+ if (fourcc_nc)
+ fmt = gst_v4l2_object_get_format_from_fourcc (v4l2object, fourcc_nc);
+ else if (fourcc == 0)
+ goto unhandled_format;
+
+ if (fmt == NULL) {
+ fmt = gst_v4l2_object_get_format_from_fourcc (v4l2object, fourcc);
+ v4l2object->prefered_non_contiguous = FALSE;
+ }
+
+ if (fmt == NULL)
+ goto unsupported_format;
+
+ *format = fmt;
+
+ return TRUE;
+
+ /* ERRORS */
+invalid_format:
+ {
+ GST_DEBUG_OBJECT (v4l2object, "invalid format");
+ return FALSE;
+ }
+unhandled_format:
+ {
+ GST_DEBUG_OBJECT (v4l2object, "unhandled format");
+ return FALSE;
+ }
+unsupported_format:
+ {
+ GST_DEBUG_OBJECT (v4l2object, "unsupported format");
+ return FALSE;
+ }
+}
+
+static gboolean
+gst_v4l2_object_get_nearest_size (GstV4l2Object * v4l2object,
+ guint32 pixelformat, gint * width, gint * height);
+
+static void
+gst_v4l2_object_add_aspect_ratio (GstV4l2Object * v4l2object, GstStructure * s)
+{
+ if (v4l2object->keep_aspect && v4l2object->par)
+ gst_structure_set_value (s, "pixel-aspect-ratio", v4l2object->par);
+}
+
+/* returns TRUE if the value was changed in place, otherwise FALSE */
+static gboolean
+gst_v4l2src_value_simplify (GValue * val)
+{
+ /* simplify list of one value to one value */
+ if (GST_VALUE_HOLDS_LIST (val) && gst_value_list_get_size (val) == 1) {
+ const GValue *list_val;
+ GValue new_val = G_VALUE_INIT;
+
+ list_val = gst_value_list_get_value (val, 0);
+ g_value_init (&new_val, G_VALUE_TYPE (list_val));
+ g_value_copy (list_val, &new_val);
+ g_value_unset (val);
+ *val = new_val;
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static gboolean
+gst_v4l2_object_get_interlace_mode (enum v4l2_field field,
+ GstVideoInterlaceMode * interlace_mode)
+{
+ /* NB: If you add new return values, please fix mode_strings in
+ * gst_v4l2_object_add_interlace_mode */
+ switch (field) {
+ case V4L2_FIELD_ANY:
+ GST_ERROR
+ ("Driver bug detected - check driver with v4l2-compliance from http://git.linuxtv.org/v4l-utils.git\n");
+ /* fallthrough */
+ case V4L2_FIELD_NONE:
+ *interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;
+ return TRUE;
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ *interlace_mode = GST_VIDEO_INTERLACE_MODE_INTERLEAVED;
+ return TRUE;
+ default:
+ GST_ERROR ("Unknown enum v4l2_field %d", field);
+ return FALSE;
+ }
+}
+
+static gboolean
+gst_v4l2_object_get_colorspace (struct v4l2_format *fmt,
+ GstVideoColorimetry * cinfo)
+{
+ gboolean is_rgb =
+ gst_v4l2_object_v4l2fourcc_is_rgb (fmt->fmt.pix.pixelformat);
+ enum v4l2_colorspace colorspace;
+ enum v4l2_quantization range;
+ enum v4l2_ycbcr_encoding matrix;
+ enum v4l2_xfer_func transfer;
+ gboolean ret = TRUE;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (fmt->type)) {
+ colorspace = fmt->fmt.pix_mp.colorspace;
+ range = fmt->fmt.pix_mp.quantization;
+ matrix = fmt->fmt.pix_mp.ycbcr_enc;
+ transfer = fmt->fmt.pix_mp.xfer_func;
+ } else {
+ colorspace = fmt->fmt.pix.colorspace;
+ range = fmt->fmt.pix.quantization;
+ matrix = fmt->fmt.pix.ycbcr_enc;
+ transfer = fmt->fmt.pix.xfer_func;
+ }
+
+ /* First step, set the defaults for each primaries */
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT709;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
+ break;
+ case V4L2_COLORSPACE_REC709:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT709;
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT709;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
+ break;
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_JPEG:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_0_255;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ cinfo->transfer = GST_VIDEO_TRANSFER_SRGB;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
+ break;
+ case V4L2_COLORSPACE_ADOBERGB:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ cinfo->transfer = GST_VIDEO_TRANSFER_ADOBERGB;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_ADOBERGB;
+ break;
+ case V4L2_COLORSPACE_BT2020:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT2020;
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT2020_12;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_BT2020;
+ break;
+ case V4L2_COLORSPACE_SMPTE240M:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
+ cinfo->transfer = GST_VIDEO_TRANSFER_SMPTE240M;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M;
+ break;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT709;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
+ break;
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT709;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
+ break;
+ case V4L2_COLORSPACE_RAW:
+ /* Explicitly unknown */
+ cinfo->range = GST_VIDEO_COLOR_RANGE_UNKNOWN;
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN;
+ cinfo->transfer = GST_VIDEO_TRANSFER_UNKNOWN;
+ cinfo->primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
+ break;
+ default:
+ GST_DEBUG ("Unknown enum v4l2_colorspace %d", colorspace);
+ ret = FALSE;
+ break;
+ }
+
+ if (!ret)
+ goto done;
+
+ /* Second step, apply any custom variation */
+ switch (range) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_0_255;
+ break;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ break;
+ case V4L2_QUANTIZATION_DEFAULT:
+ /* replicated V4L2_MAP_QUANTIZATION_DEFAULT macro behavior */
+ if (is_rgb && colorspace == V4L2_COLORSPACE_BT2020)
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ else if (is_rgb || matrix == V4L2_YCBCR_ENC_XV601
+ || matrix == V4L2_YCBCR_ENC_XV709
+ || colorspace == V4L2_COLORSPACE_JPEG)
+ cinfo->range = GST_VIDEO_COLOR_RANGE_0_255;
+ else
+ cinfo->range = GST_VIDEO_COLOR_RANGE_16_235;
+ break;
+ default:
+ GST_WARNING ("Unknown enum v4l2_quantization value %d", range);
+ cinfo->range = GST_VIDEO_COLOR_RANGE_UNKNOWN;
+ break;
+ }
+
+ switch (matrix) {
+ case V4L2_YCBCR_ENC_XV601:
+ case V4L2_YCBCR_ENC_SYCC:
+ GST_FIXME ("XV601 and SYCC not defined, assuming 601");
+ /* fallthrough */
+ case V4L2_YCBCR_ENC_601:
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ break;
+ case V4L2_YCBCR_ENC_XV709:
+ GST_FIXME ("XV709 not defined, assuming 709");
+ /* fallthrough */
+ case V4L2_YCBCR_ENC_709:
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT709;
+ break;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ GST_FIXME ("BT2020 with constant luma is not defined, assuming BT2020");
+ /* fallthrough */
+ case V4L2_YCBCR_ENC_BT2020:
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_BT2020;
+ break;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
+ break;
+ case V4L2_YCBCR_ENC_DEFAULT:
+ /* nothing, just use defaults for colorspace */
+ break;
+ default:
+ GST_WARNING ("Unknown enum v4l2_ycbcr_encoding value %d", matrix);
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN;
+ break;
+ }
+
+ /* Set identity matrix for R'G'B' formats to avoid creating
+ * confusion. This though is cosmetic as it's now properly ignored by
+ * the video info API and videoconvert. */
+ if (is_rgb)
+ cinfo->matrix = GST_VIDEO_COLOR_MATRIX_RGB;
+
+ switch (transfer) {
+ case V4L2_XFER_FUNC_709:
+ if (fmt->fmt.pix.height >= 2160)
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT2020_12;
+ else
+ cinfo->transfer = GST_VIDEO_TRANSFER_BT709;
+ break;
+ case V4L2_XFER_FUNC_SRGB:
+ cinfo->transfer = GST_VIDEO_TRANSFER_SRGB;
+ break;
+ case V4L2_XFER_FUNC_ADOBERGB:
+ cinfo->transfer = GST_VIDEO_TRANSFER_ADOBERGB;
+ break;
+ case V4L2_XFER_FUNC_SMPTE240M:
+ cinfo->transfer = GST_VIDEO_TRANSFER_SMPTE240M;
+ break;
+ case V4L2_XFER_FUNC_NONE:
+ cinfo->transfer = GST_VIDEO_TRANSFER_GAMMA10;
+ break;
+ case V4L2_XFER_FUNC_DEFAULT:
+ /* nothing, just use defaults for colorspace */
+ break;
+ default:
+ GST_WARNING ("Unknown enum v4l2_xfer_func value %d", transfer);
+ cinfo->transfer = GST_VIDEO_TRANSFER_UNKNOWN;
+ break;
+ }
+
+done:
+ return ret;
+}
+
+static int
+gst_v4l2_object_try_fmt (GstV4l2Object * v4l2object,
+ struct v4l2_format *try_fmt)
+{
+ int fd = v4l2object->video_fd;
+ struct v4l2_format fmt;
+ int r;
+
+ memcpy (&fmt, try_fmt, sizeof (fmt));
+ r = v4l2object->ioctl (fd, VIDIOC_TRY_FMT, &fmt);
+
+ if (r < 0 && errno == ENOTTY) {
+ /* The driver might not implement TRY_FMT, in which case we will try
+ S_FMT to probe */
+ if (GST_V4L2_IS_ACTIVE (v4l2object))
+ goto error;
+
+ memcpy (&fmt, try_fmt, sizeof (fmt));
+ r = v4l2object->ioctl (fd, VIDIOC_S_FMT, &fmt);
+ }
+ memcpy (try_fmt, &fmt, sizeof (fmt));
+
+ return r;
+
+error:
+ memcpy (try_fmt, &fmt, sizeof (fmt));
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unable to try format: %s", g_strerror (errno));
+ return r;
+}
+
+
+static void
+gst_v4l2_object_add_interlace_mode (GstV4l2Object * v4l2object,
+ GstStructure * s, guint32 width, guint32 height, guint32 pixelformat)
+{
+ struct v4l2_format fmt;
+ GValue interlace_formats = { 0, };
+#ifndef USE_V4L2_TARGET_NV
+ GstVideoInterlaceMode interlace_mode, prev = -1;
+#else
+ GstVideoInterlaceMode interlace_mode = -1, prev = -1;
+#endif
+
+ const gchar *mode_strings[] = { "progressive",
+ "interleaved",
+ "mixed"
+ };
+
+ if (!g_str_equal (gst_structure_get_name (s), "video/x-raw"))
+ return;
+
+ if (v4l2object->never_interlaced) {
+ gst_structure_set (s, "interlace-mode", G_TYPE_STRING, "progressive", NULL);
+ return;
+ }
+
+ g_value_init (&interlace_formats, GST_TYPE_LIST);
+
+ /* Try twice - once for NONE, once for INTERLACED. */
+ memset (&fmt, 0, sizeof (fmt));
+ fmt.type = v4l2object->type;
+ fmt.fmt.pix.width = width;
+ fmt.fmt.pix.height = height;
+ fmt.fmt.pix.pixelformat = pixelformat;
+ fmt.fmt.pix.field = V4L2_FIELD_NONE;
+
+ if (gst_v4l2_object_try_fmt (v4l2object, &fmt) == 0 &&
+ gst_v4l2_object_get_interlace_mode (fmt.fmt.pix.field, &interlace_mode)) {
+ GValue interlace_enum = { 0, };
+ g_value_init (&interlace_enum, G_TYPE_STRING);
+ g_value_set_string (&interlace_enum, mode_strings[interlace_mode]);
+ gst_value_list_append_and_take_value (&interlace_formats, &interlace_enum);
+ prev = interlace_mode;
+ }
+
+ memset (&fmt, 0, sizeof (fmt));
+ fmt.type = v4l2object->type;
+ fmt.fmt.pix.width = width;
+ fmt.fmt.pix.height = height;
+ fmt.fmt.pix.pixelformat = pixelformat;
+ fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ if (gst_v4l2_object_try_fmt (v4l2object, &fmt) == 0 &&
+ gst_v4l2_object_get_interlace_mode (fmt.fmt.pix.field, &interlace_mode) &&
+ prev != interlace_mode) {
+ GValue interlace_enum = { 0, };
+ g_value_init (&interlace_enum, G_TYPE_STRING);
+ g_value_set_string (&interlace_enum, mode_strings[interlace_mode]);
+ gst_value_list_append_and_take_value (&interlace_formats, &interlace_enum);
+ }
+
+ if (gst_v4l2src_value_simplify (&interlace_formats)
+ || gst_value_list_get_size (&interlace_formats) > 0)
+ gst_structure_take_value (s, "interlace-mode", &interlace_formats);
+ else
+ GST_WARNING_OBJECT (v4l2object, "Failed to determine interlace mode");
+
+ g_value_unset(&interlace_formats);
+ return;
+}
+
+static void
+gst_v4l2_object_fill_colorimetry_list (GValue * list,
+ GstVideoColorimetry * cinfo)
+{
+ GValue colorimetry = G_VALUE_INIT;
+ guint size;
+ guint i;
+ gboolean found = FALSE;
+
+ g_value_init (&colorimetry, G_TYPE_STRING);
+ g_value_take_string (&colorimetry, gst_video_colorimetry_to_string (cinfo));
+
+ /* only insert if no duplicate */
+ size = gst_value_list_get_size (list);
+ for (i = 0; i < size; i++) {
+ const GValue *tmp;
+
+ tmp = gst_value_list_get_value (list, i);
+ if (gst_value_compare (&colorimetry, tmp) == GST_VALUE_EQUAL) {
+ found = TRUE;
+ break;
+ }
+ }
+
+ if (!found)
+ gst_value_list_append_and_take_value (list, &colorimetry);
+ else
+ g_value_unset (&colorimetry);
+}
+
+static void
+gst_v4l2_object_add_colorspace (GstV4l2Object * v4l2object, GstStructure * s,
+ guint32 width, guint32 height, guint32 pixelformat)
+{
+ struct v4l2_format fmt;
+ GValue list = G_VALUE_INIT;
+ GstVideoColorimetry cinfo;
+ enum v4l2_colorspace req_cspace;
+
+ memset (&fmt, 0, sizeof (fmt));
+ fmt.type = v4l2object->type;
+ fmt.fmt.pix.width = width;
+ fmt.fmt.pix.height = height;
+ fmt.fmt.pix.pixelformat = pixelformat;
+
+ g_value_init (&list, GST_TYPE_LIST);
+
+ /* step 1: get device default colorspace and insert it first as
+ * it should be the preferred one */
+ if (gst_v4l2_object_try_fmt (v4l2object, &fmt) == 0) {
+ if (gst_v4l2_object_get_colorspace (&fmt, &cinfo))
+ gst_v4l2_object_fill_colorimetry_list (&list, &cinfo);
+ }
+
+ /* step 2: probe all colorspace other than default
+ * We don't probe all colorspace, range, matrix and transfer combination to
+ * avoid ioctl flooding which could greatly increase initialization time
+ * with low-speed devices (UVC...) */
+ for (req_cspace = V4L2_COLORSPACE_SMPTE170M;
+ req_cspace <= V4L2_COLORSPACE_RAW; req_cspace++) {
+ /* V4L2_COLORSPACE_BT878 is deprecated and shall not be used, so skip */
+ if (req_cspace == V4L2_COLORSPACE_BT878)
+ continue;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (v4l2object->type))
+ fmt.fmt.pix_mp.colorspace = req_cspace;
+ else
+ fmt.fmt.pix.colorspace = req_cspace;
+
+ if (gst_v4l2_object_try_fmt (v4l2object, &fmt) == 0) {
+ enum v4l2_colorspace colorspace;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (v4l2object->type))
+ colorspace = fmt.fmt.pix_mp.colorspace;
+ else
+ colorspace = fmt.fmt.pix.colorspace;
+
+ if (colorspace == req_cspace) {
+ if (gst_v4l2_object_get_colorspace (&fmt, &cinfo))
+ gst_v4l2_object_fill_colorimetry_list (&list, &cinfo);
+ }
+ }
+ }
+
+ if (gst_value_list_get_size (&list) > 0)
+ gst_structure_take_value (s, "colorimetry", &list);
+ else
+ g_value_unset (&list);
+
+ return;
+}
+
+/* The frame interval enumeration code first appeared in Linux 2.6.19. */
+static GstStructure *
+gst_v4l2_object_probe_caps_for_format_and_size (GstV4l2Object * v4l2object,
+ guint32 pixelformat,
+ guint32 width, guint32 height, const GstStructure * template)
+{
+ gint fd = v4l2object->video_fd;
+ struct v4l2_frmivalenum ival;
+ guint32 num, denom;
+ GstStructure *s;
+ GValue rates = { 0, };
+
+ memset (&ival, 0, sizeof (struct v4l2_frmivalenum));
+ ival.index = 0;
+ ival.pixel_format = pixelformat;
+ ival.width = width;
+ ival.height = height;
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj,
+ "get frame interval for %ux%u, %" GST_FOURCC_FORMAT, width, height,
+ GST_FOURCC_ARGS (pixelformat));
+
+ /* keep in mind that v4l2 gives us frame intervals (durations); we invert the
+ * fraction to get framerate */
+ if (v4l2object->ioctl (fd, VIDIOC_ENUM_FRAMEINTERVALS, &ival) < 0)
+ goto enum_frameintervals_failed;
+
+ if (ival.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
+ GValue rate = { 0, };
+
+ g_value_init (&rates, GST_TYPE_LIST);
+ g_value_init (&rate, GST_TYPE_FRACTION);
+
+ do {
+ num = ival.discrete.numerator;
+ denom = ival.discrete.denominator;
+
+ if (num > G_MAXINT || denom > G_MAXINT) {
+ /* let us hope we don't get here... */
+ num >>= 1;
+ denom >>= 1;
+ }
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "adding discrete framerate: %d/%d",
+ denom, num);
+
+ /* swap to get the framerate */
+ gst_value_set_fraction (&rate, denom, num);
+ gst_value_list_append_value (&rates, &rate);
+
+ ival.index++;
+ } while (v4l2object->ioctl (fd, VIDIOC_ENUM_FRAMEINTERVALS, &ival) >= 0);
+ } else if (ival.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
+ GValue min = { 0, };
+ GValue step = { 0, };
+ GValue max = { 0, };
+ gboolean added = FALSE;
+ guint32 minnum, mindenom;
+ guint32 maxnum, maxdenom;
+
+ g_value_init (&rates, GST_TYPE_LIST);
+
+ g_value_init (&min, GST_TYPE_FRACTION);
+ g_value_init (&step, GST_TYPE_FRACTION);
+ g_value_init (&max, GST_TYPE_FRACTION);
+
+ /* get the min */
+ minnum = ival.stepwise.min.numerator;
+ mindenom = ival.stepwise.min.denominator;
+ if (minnum > G_MAXINT || mindenom > G_MAXINT) {
+ minnum >>= 1;
+ mindenom >>= 1;
+ }
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "stepwise min frame interval: %d/%d",
+ minnum, mindenom);
+ gst_value_set_fraction (&min, minnum, mindenom);
+
+ /* get the max */
+ maxnum = ival.stepwise.max.numerator;
+ maxdenom = ival.stepwise.max.denominator;
+ if (maxnum > G_MAXINT || maxdenom > G_MAXINT) {
+ maxnum >>= 1;
+ maxdenom >>= 1;
+ }
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "stepwise max frame interval: %d/%d",
+ maxnum, maxdenom);
+ gst_value_set_fraction (&max, maxnum, maxdenom);
+
+ /* get the step */
+ num = ival.stepwise.step.numerator;
+ denom = ival.stepwise.step.denominator;
+ if (num > G_MAXINT || denom > G_MAXINT) {
+ num >>= 1;
+ denom >>= 1;
+ }
+
+ if (num == 0 || denom == 0) {
+ /* in this case we have a wrong fraction or no step, set the step to max
+ * so that we only add the min value in the loop below */
+ num = maxnum;
+ denom = maxdenom;
+ }
+
+ /* since we only have gst_value_fraction_subtract and not add, negate the
+ * numerator */
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "stepwise step frame interval: %d/%d",
+ num, denom);
+ gst_value_set_fraction (&step, -num, denom);
+
+ while (gst_value_compare (&min, &max) != GST_VALUE_GREATER_THAN) {
+ GValue rate = { 0, };
+
+ num = gst_value_get_fraction_numerator (&min);
+ denom = gst_value_get_fraction_denominator (&min);
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "adding stepwise framerate: %d/%d",
+ denom, num);
+
+ /* invert to get the framerate */
+ g_value_init (&rate, GST_TYPE_FRACTION);
+ gst_value_set_fraction (&rate, denom, num);
+ gst_value_list_append_value (&rates, &rate);
+ added = TRUE;
+
+ /* we're actually adding because step was negated above. This is because
+ * there is no _add function... */
+ if (!gst_value_fraction_subtract (&min, &min, &step)) {
+ GST_WARNING_OBJECT (v4l2object->dbg_obj, "could not step fraction!");
+ break;
+ }
+ }
+ if (!added) {
+ /* no range was added, leave the default range from the template */
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "no range added, leaving default");
+ g_value_unset (&rates);
+ }
+ } else if (ival.type == V4L2_FRMIVAL_TYPE_CONTINUOUS) {
+ guint32 maxnum, maxdenom;
+
+ g_value_init (&rates, GST_TYPE_FRACTION_RANGE);
+
+ num = ival.stepwise.min.numerator;
+ denom = ival.stepwise.min.denominator;
+ if (num > G_MAXINT || denom > G_MAXINT) {
+ num >>= 1;
+ denom >>= 1;
+ }
+
+ maxnum = ival.stepwise.max.numerator;
+ maxdenom = ival.stepwise.max.denominator;
+ if (maxnum > G_MAXINT || maxdenom > G_MAXINT) {
+ maxnum >>= 1;
+ maxdenom >>= 1;
+ }
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj,
+ "continuous frame interval %d/%d to %d/%d", maxdenom, maxnum, denom,
+ num);
+
+ gst_value_set_fraction_range_full (&rates, maxdenom, maxnum, denom, num);
+ } else {
+ goto unknown_type;
+ }
+
+return_data:
+ s = gst_structure_copy (template);
+ gst_structure_set (s, "width", G_TYPE_INT, (gint) width,
+ "height", G_TYPE_INT, (gint) height, NULL);
+
+ gst_v4l2_object_add_aspect_ratio (v4l2object, s);
+
+ if (!v4l2object->skip_try_fmt_probes) {
+ gst_v4l2_object_add_interlace_mode (v4l2object, s, width, height,
+ pixelformat);
+ gst_v4l2_object_add_colorspace (v4l2object, s, width, height, pixelformat);
+ }
+
+ if (G_IS_VALUE (&rates)) {
+ gst_v4l2src_value_simplify (&rates);
+ /* only change the framerate on the template when we have a valid probed new
+ * value */
+ gst_structure_take_value (s, "framerate", &rates);
+ } else if (v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ gst_structure_set (s, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT,
+ 1, NULL);
+ }
+ return s;
+
+ /* ERRORS */
+enum_frameintervals_failed:
+ {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "Unable to enumerate intervals for %" GST_FOURCC_FORMAT "@%ux%u",
+ GST_FOURCC_ARGS (pixelformat), width, height);
+ goto return_data;
+ }
+unknown_type:
+ {
+ /* I don't see how this is actually an error, we ignore the format then */
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unknown frame interval type at %" GST_FOURCC_FORMAT "@%ux%u: %u",
+ GST_FOURCC_ARGS (pixelformat), width, height, ival.type);
+ return NULL;
+ }
+}
+
+static gint
+sort_by_frame_size (GstStructure * s1, GstStructure * s2)
+{
+ int w1, h1, w2, h2;
+
+ gst_structure_get_int (s1, "width", &w1);
+ gst_structure_get_int (s1, "height", &h1);
+ gst_structure_get_int (s2, "width", &w2);
+ gst_structure_get_int (s2, "height", &h2);
+
+ /* I think it's safe to assume that this won't overflow for a while */
+ return ((w2 * h2) - (w1 * h1));
+}
+
+static void
+gst_v4l2_object_update_and_append (GstV4l2Object * v4l2object,
+ guint32 format, GstCaps * caps, GstStructure * s)
+{
+ GstStructure *alt_s = NULL;
+
+ /* Encoded stream on output buffer need to be parsed */
+ if (v4l2object->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ v4l2object->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+#ifndef USE_V4L2_TARGET_NV
+ gint i = 0;
+#else
+ guint i = 0;
+#endif
+
+ for (; i < GST_V4L2_FORMAT_COUNT; i++) {
+ if (format == gst_v4l2_formats[i].format &&
+ gst_v4l2_formats[i].flags & GST_V4L2_CODEC &&
+ !(gst_v4l2_formats[i].flags & GST_V4L2_NO_PARSE)) {
+ gst_structure_set (s, "parsed", G_TYPE_BOOLEAN, TRUE, NULL);
+ break;
+ }
+ }
+ }
+
+ if (v4l2object->has_alpha_component &&
+ (v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ alt_s = gst_structure_copy (s);
+ gst_structure_set (alt_s, "format", G_TYPE_STRING, "ARGB", NULL);
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ alt_s = gst_structure_copy (s);
+ gst_structure_set (alt_s, "format", G_TYPE_STRING, "BGRA", NULL);
+ break;
+ default:
+ break;
+ }
+ }
+
+ gst_caps_append_structure (caps, s);
+
+ if (alt_s)
+ gst_caps_append_structure (caps, alt_s);
+}
+
+static GstCaps *
+gst_v4l2_object_probe_caps_for_format (GstV4l2Object * v4l2object,
+ guint32 pixelformat, const GstStructure * template)
+{
+ GstCaps *ret = gst_caps_new_empty ();
+ GstStructure *tmp;
+ gint fd = v4l2object->video_fd;
+ struct v4l2_frmsizeenum size;
+ GList *results = NULL;
+ guint32 w, h;
+
+ if (pixelformat == GST_MAKE_FOURCC ('M', 'P', 'E', 'G')) {
+ gst_caps_append_structure (ret, gst_structure_copy (template));
+ return ret;
+ }
+
+ memset (&size, 0, sizeof (struct v4l2_frmsizeenum));
+ size.index = 0;
+ size.pixel_format = pixelformat;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "Enumerating frame sizes for %" GST_FOURCC_FORMAT,
+ GST_FOURCC_ARGS (pixelformat));
+
+ if (v4l2object->ioctl (fd, VIDIOC_ENUM_FRAMESIZES, &size) < 0)
+ goto enum_framesizes_failed;
+
+ if (size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ do {
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "got discrete frame size %dx%d",
+ size.discrete.width, size.discrete.height);
+
+ w = MIN (size.discrete.width, G_MAXINT);
+ h = MIN (size.discrete.height, G_MAXINT);
+
+ if (w && h) {
+ tmp =
+ gst_v4l2_object_probe_caps_for_format_and_size (v4l2object,
+ pixelformat, w, h, template);
+
+ if (tmp)
+ results = g_list_prepend (results, tmp);
+ }
+
+ size.index++;
+ } while (v4l2object->ioctl (fd, VIDIOC_ENUM_FRAMESIZES, &size) >= 0);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "done iterating discrete frame sizes");
+ } else if (size.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
+ guint32 maxw, maxh, step_w, step_h;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "we have stepwise frame sizes:");
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "min width: %d",
+ size.stepwise.min_width);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "min height: %d",
+ size.stepwise.min_height);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "max width: %d",
+ size.stepwise.max_width);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "min height: %d",
+ size.stepwise.max_height);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "step width: %d",
+ size.stepwise.step_width);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "step height: %d",
+ size.stepwise.step_height);
+
+ w = MAX (size.stepwise.min_width, 1);
+ h = MAX (size.stepwise.min_height, 1);
+ maxw = MIN (size.stepwise.max_width, G_MAXINT);
+ maxh = MIN (size.stepwise.max_height, G_MAXINT);
+
+ step_w = MAX (size.stepwise.step_width, 1);
+ step_h = MAX (size.stepwise.step_height, 1);
+
+ /* FIXME: check for sanity and that min/max are multiples of the steps */
+
+ /* we only query details for the max width/height since it's likely the
+ * most restricted if there are any resolution-dependent restrictions */
+ tmp = gst_v4l2_object_probe_caps_for_format_and_size (v4l2object,
+ pixelformat, maxw, maxh, template);
+
+ if (tmp) {
+ GValue step_range = G_VALUE_INIT;
+
+ g_value_init (&step_range, GST_TYPE_INT_RANGE);
+ gst_value_set_int_range_step (&step_range, w, maxw, step_w);
+ gst_structure_set_value (tmp, "width", &step_range);
+
+ gst_value_set_int_range_step (&step_range, h, maxh, step_h);
+ gst_structure_take_value (tmp, "height", &step_range);
+
+ /* no point using the results list here, since there's only one struct */
+ gst_v4l2_object_update_and_append (v4l2object, pixelformat, ret, tmp);
+ }
+ } else if (size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ guint32 maxw, maxh;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "we have continuous frame sizes:");
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "min width: %d",
+ size.stepwise.min_width);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "min height: %d",
+ size.stepwise.min_height);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "max width: %d",
+ size.stepwise.max_width);
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "min height: %d",
+ size.stepwise.max_height);
+
+ w = MAX (size.stepwise.min_width, 1);
+ h = MAX (size.stepwise.min_height, 1);
+ maxw = MIN (size.stepwise.max_width, G_MAXINT);
+ maxh = MIN (size.stepwise.max_height, G_MAXINT);
+
+ tmp =
+ gst_v4l2_object_probe_caps_for_format_and_size (v4l2object, pixelformat,
+ w, h, template);
+ if (tmp) {
+ gst_structure_set (tmp, "width", GST_TYPE_INT_RANGE, (gint) w,
+ (gint) maxw, "height", GST_TYPE_INT_RANGE, (gint) h, (gint) maxh,
+ NULL);
+
+ /* no point using the results list here, since there's only one struct */
+ gst_v4l2_object_update_and_append (v4l2object, pixelformat, ret, tmp);
+ }
+ } else {
+ goto unknown_type;
+ }
+
+ /* we use an intermediary list to store and then sort the results of the
+ * probing because we can't make any assumptions about the order in which
+ * the driver will give us the sizes, but we want the final caps to contain
+ * the results starting with the highest resolution and having the lowest
+ * resolution last, since order in caps matters for things like fixation. */
+ results = g_list_sort (results, (GCompareFunc) sort_by_frame_size);
+ while (results != NULL) {
+ gst_v4l2_object_update_and_append (v4l2object, pixelformat, ret,
+ results->data);
+ results = g_list_delete_link (results, results);
+ }
+
+ if (gst_caps_is_empty (ret))
+ goto enum_framesizes_no_results;
+
+ return ret;
+
+ /* ERRORS */
+enum_framesizes_failed:
+ {
+ /* I don't see how this is actually an error */
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "Failed to enumerate frame sizes for pixelformat %" GST_FOURCC_FORMAT
+ " (%s)", GST_FOURCC_ARGS (pixelformat), g_strerror (errno));
+ goto default_frame_sizes;
+ }
+enum_framesizes_no_results:
+ {
+ /* it's possible that VIDIOC_ENUM_FRAMESIZES is defined but the driver in
+ * question doesn't actually support it yet */
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "No results for pixelformat %" GST_FOURCC_FORMAT
+ " enumerating frame sizes, trying fallback",
+ GST_FOURCC_ARGS (pixelformat));
+ goto default_frame_sizes;
+ }
+unknown_type:
+ {
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unknown frame sizeenum type for pixelformat %" GST_FOURCC_FORMAT
+ ": %u", GST_FOURCC_ARGS (pixelformat), size.type);
+ goto default_frame_sizes;
+ }
+
+default_frame_sizes:
+ {
+ gint min_w, max_w, min_h, max_h, fix_num = 0, fix_denom = 0;
+
+ /* This code is for Linux < 2.6.19 */
+ min_w = min_h = 1;
+ max_w = max_h = GST_V4L2_MAX_SIZE;
+ if (!gst_v4l2_object_get_nearest_size (v4l2object, pixelformat, &min_w,
+ &min_h)) {
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Could not probe minimum capture size for pixelformat %"
+ GST_FOURCC_FORMAT, GST_FOURCC_ARGS (pixelformat));
+ }
+ if (!gst_v4l2_object_get_nearest_size (v4l2object, pixelformat, &max_w,
+ &max_h)) {
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Could not probe maximum capture size for pixelformat %"
+ GST_FOURCC_FORMAT, GST_FOURCC_ARGS (pixelformat));
+ }
+
+#ifndef USE_V4L2_TARGET_NV
+ /* Since we can't get framerate directly, try to use the current norm */
+ if (v4l2object->tv_norm && v4l2object->norms) {
+ GList *norms;
+ GstTunerNorm *norm = NULL;
+ GstTunerNorm *current =
+ gst_v4l2_tuner_get_norm_by_std_id (v4l2object, v4l2object->tv_norm);
+
+ for (norms = v4l2object->norms; norms != NULL; norms = norms->next) {
+ norm = (GstTunerNorm *) norms->data;
+ if (!strcmp (norm->label, current->label))
+ break;
+ }
+ /* If it's possible, set framerate to that (discrete) value */
+ if (norm) {
+ fix_num = gst_value_get_fraction_numerator (&norm->framerate);
+ fix_denom = gst_value_get_fraction_denominator (&norm->framerate);
+ }
+ }
+#endif
+
+ tmp = gst_structure_copy (template);
+ if (fix_num) {
+ gst_structure_set (tmp, "framerate", GST_TYPE_FRACTION, fix_num,
+ fix_denom, NULL);
+ } else if (v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* if norm can't be used, copy the template framerate */
+ gst_structure_set (tmp, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1,
+ G_MAXINT, 1, NULL);
+ }
+
+ if (min_w == max_w)
+ gst_structure_set (tmp, "width", G_TYPE_INT, max_w, NULL);
+ else
+ gst_structure_set (tmp, "width", GST_TYPE_INT_RANGE, min_w, max_w, NULL);
+
+ if (min_h == max_h)
+ gst_structure_set (tmp, "height", G_TYPE_INT, max_h, NULL);
+ else
+ gst_structure_set (tmp, "height", GST_TYPE_INT_RANGE, min_h, max_h, NULL);
+
+ gst_v4l2_object_add_aspect_ratio (v4l2object, tmp);
+
+ if (!v4l2object->skip_try_fmt_probes) {
+ /* We could consider setting interlace mode from min and max. */
+ gst_v4l2_object_add_interlace_mode (v4l2object, tmp, max_w, max_h,
+ pixelformat);
+ /* We could consider to check colorspace for min too, in case it depends on
+ * the size. But in this case, min and max could not be enough */
+ gst_v4l2_object_add_colorspace (v4l2object, tmp, max_w, max_h,
+ pixelformat);
+ }
+
+ gst_v4l2_object_update_and_append (v4l2object, pixelformat, ret, tmp);
+ return ret;
+ }
+}
+
+static gboolean
+gst_v4l2_object_get_nearest_size (GstV4l2Object * v4l2object,
+ guint32 pixelformat, gint * width, gint * height)
+{
+ struct v4l2_format fmt;
+ gboolean ret = FALSE;
+ GstVideoInterlaceMode interlace_mode;
+
+ g_return_val_if_fail (width != NULL, FALSE);
+ g_return_val_if_fail (height != NULL, FALSE);
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj,
+ "getting nearest size to %dx%d with format %" GST_FOURCC_FORMAT,
+ *width, *height, GST_FOURCC_ARGS (pixelformat));
+
+ memset (&fmt, 0, sizeof (struct v4l2_format));
+
+ /* get size delimiters */
+ memset (&fmt, 0, sizeof (fmt));
+ fmt.type = v4l2object->type;
+ fmt.fmt.pix.width = *width;
+ fmt.fmt.pix.height = *height;
+ fmt.fmt.pix.pixelformat = pixelformat;
+ fmt.fmt.pix.field = V4L2_FIELD_ANY;
+
+ if (gst_v4l2_object_try_fmt (v4l2object, &fmt) < 0)
+ goto error;
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj,
+ "got nearest size %dx%d", fmt.fmt.pix.width, fmt.fmt.pix.height);
+
+ *width = fmt.fmt.pix.width;
+ *height = fmt.fmt.pix.height;
+
+ if (!gst_v4l2_object_get_interlace_mode (fmt.fmt.pix.field, &interlace_mode)) {
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unsupported field type for %" GST_FOURCC_FORMAT "@%ux%u: %u",
+ GST_FOURCC_ARGS (pixelformat), *width, *height, fmt.fmt.pix.field);
+ goto error;
+ }
+
+ ret = TRUE;
+
+error:
+ if (!ret) {
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unable to try format: %s", g_strerror (errno));
+ }
+
+ return ret;
+}
+
+#ifndef USE_V4L2_TARGET_NV
+static gboolean
+gst_v4l2_object_is_dmabuf_supported (GstV4l2Object * v4l2object)
+{
+ gboolean ret = TRUE;
+ struct v4l2_exportbuffer expbuf = {
+ .type = v4l2object->type,
+ .index = -1,
+ .plane = -1,
+ .flags = O_CLOEXEC | O_RDWR,
+ };
+
+ /* Expected to fail, but ENOTTY tells us that it is not implemented. */
+ v4l2object->ioctl (v4l2object->video_fd, VIDIOC_EXPBUF, &expbuf);
+ if (errno == ENOTTY)
+ ret = FALSE;
+
+ return ret;
+}
+#endif
+
+static gboolean
+gst_v4l2_object_setup_pool (GstV4l2Object * v4l2object, GstCaps * caps)
+{
+ GstV4l2IOMode mode;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "initializing the %s system",
+ V4L2_TYPE_IS_OUTPUT (v4l2object->type) ? "output" : "capture");
+
+ GST_V4L2_CHECK_OPEN (v4l2object);
+ GST_V4L2_CHECK_NOT_ACTIVE (v4l2object);
+
+ /* find transport */
+ mode = v4l2object->req_mode;
+
+ if (v4l2object->device_caps & V4L2_CAP_READWRITE) {
+ if (v4l2object->req_mode == GST_V4L2_IO_AUTO)
+ mode = GST_V4L2_IO_RW;
+ } else if (v4l2object->req_mode == GST_V4L2_IO_RW)
+ goto method_not_supported;
+
+#ifndef USE_V4L2_TARGET_NV
+ if (v4l2object->device_caps & V4L2_CAP_STREAMING) {
+ if (v4l2object->req_mode == GST_V4L2_IO_AUTO) {
+ if (!V4L2_TYPE_IS_OUTPUT (v4l2object->type) &&
+ gst_v4l2_object_is_dmabuf_supported (v4l2object)) {
+ mode = GST_V4L2_IO_DMABUF;
+ } else {
+ mode = GST_V4L2_IO_MMAP;
+ }
+ }
+ } else if (v4l2object->req_mode == GST_V4L2_IO_MMAP)
+ goto method_not_supported;
+#else
+ /* TODO : Remove forced mode setting once supported */
+ if (v4l2object->device_caps & V4L2_CAP_STREAMING) {
+ if (v4l2object->req_mode == GST_V4L2_IO_AUTO) {
+ if ((V4L2_TYPE_IS_OUTPUT (v4l2object->type)) &&
+ (!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT))) {
+ /* Currently, DMABUF_IMPORT io mode is used on encoder
+ output plane, when default mode V4L2_IO_AUTO is set */
+
+ if (is_cuvid == TRUE) {
+ mode = GST_V4L2_IO_MMAP; //TODO make this default to dmabuf_import
+ } else if (is_cuvid == FALSE) {
+ mode = GST_V4L2_IO_DMABUF_IMPORT;
+ }
+ } else {
+ if (is_cuvid == TRUE){
+ mode = GST_V4L2_IO_MMAP;
+ } else {
+ /* Currently, MMAP io mode is used on encoder
+ capture plane, when default mode V4L2_IO_AUTO is set.
+ Currently, MMAP io mode is used on decoder
+ capture plane and USERPTR io mode is used on decoder
+ output plane, when default mode V4L2_IO_AUTO is set. */
+ if (V4L2_TYPE_IS_OUTPUT (v4l2object->type)) {
+ mode = GST_V4L2_IO_USERPTR;
+ if((GST_V4L2_PIXELFORMAT(v4l2object) == V4L2_PIX_FMT_MJPEG) ||
+ (GST_V4L2_PIXELFORMAT(v4l2object) == V4L2_PIX_FMT_JPEG))
+ /* Note: Currently, MMAP io mode is used on JPEG decoder output and
+ * capture plane, when default mode V4L2_IO_AUTO is set.
+ * */
+ mode = GST_V4L2_IO_MMAP; //TODO: Support userptr mode for JPEG
+ } else {
+ mode = GST_V4L2_IO_MMAP;
+ }
+ }
+ }
+ } else if (v4l2object->req_mode == GST_V4L2_IO_USERPTR) {
+ /* Currently, USERPTR io mode only supported on decoder
+ output plane */
+ mode = GST_V4L2_IO_USERPTR;
+ } else if (v4l2object->req_mode == GST_V4L2_IO_MMAP) {
+ mode = GST_V4L2_IO_MMAP;
+ } else if (v4l2object->req_mode == GST_V4L2_IO_DMABUF_IMPORT) {
+ /* Currently, DMABUF_IMPORT io mode only supported on encoder
+ output plane */
+ mode = GST_V4L2_IO_DMABUF_IMPORT;
+ }
+ } else if (v4l2object->req_mode == GST_V4L2_IO_MMAP)
+ goto method_not_supported;
+#endif
+
+ /* if still no transport selected, error out */
+ if (mode == GST_V4L2_IO_AUTO)
+ goto no_supported_capture_method;
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj, "accessing buffers via mode %d", mode);
+ v4l2object->mode = mode;
+
+ /* If min_buffers is not set, the driver either does not support the control or
+ it has not been asked yet via propose_allocation/decide_allocation. */
+ if (!v4l2object->min_buffers)
+ gst_v4l2_get_driver_min_buffers (v4l2object);
+
+ /* Map the buffers */
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "initiating buffer pool");
+
+ if (!(v4l2object->pool = gst_v4l2_buffer_pool_new (v4l2object, caps)))
+ goto buffer_pool_new_failed;
+
+ GST_V4L2_SET_ACTIVE (v4l2object);
+
+ return TRUE;
+
+ /* ERRORS */
+buffer_pool_new_failed:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, READ,
+ (_("Could not map buffers from device '%s'"),
+ v4l2object->videodev),
+ ("Failed to create buffer pool: %s", g_strerror (errno)));
+ return FALSE;
+ }
+method_not_supported:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, READ,
+ (_("The driver of device '%s' does not support the IO method %d"),
+ v4l2object->videodev, mode), (NULL));
+ return FALSE;
+ }
+no_supported_capture_method:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, READ,
+ (_("The driver of device '%s' does not support any known IO "
+ "method."), v4l2object->videodev), (NULL));
+ return FALSE;
+ }
+}
+
+static void
+gst_v4l2_object_set_stride (GstVideoInfo * info, GstVideoAlignment * align,
+ gint plane, gint stride)
+{
+ const GstVideoFormatInfo *finfo = info->finfo;
+
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
+ gint x_tiles, y_tiles, ws, hs, tile_height, padded_height;
+
+
+ ws = GST_VIDEO_FORMAT_INFO_TILE_WS (finfo);
+ hs = GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
+ tile_height = 1 << hs;
+
+ padded_height = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (finfo, plane,
+ info->height + align->padding_top + align->padding_bottom);
+ padded_height = GST_ROUND_UP_N (padded_height, tile_height);
+
+ x_tiles = stride >> ws;
+ y_tiles = padded_height >> hs;
+ info->stride[plane] = GST_VIDEO_TILE_MAKE_STRIDE (x_tiles, y_tiles);
+ } else {
+ info->stride[plane] = stride;
+ }
+}
+
+static void
+gst_v4l2_object_extrapolate_info (GstV4l2Object * v4l2object,
+ GstVideoInfo * info, GstVideoAlignment * align, gint stride)
+{
+ const GstVideoFormatInfo *finfo = info->finfo;
+#ifndef USE_V4L2_TARGET_NV
+ gint i, estride, padded_height;
+#else
+ guint i, estride, padded_height;
+#endif
+ gsize offs = 0;
+
+ g_return_if_fail (v4l2object->n_v4l2_planes == 1);
+
+ padded_height = info->height + align->padding_top + align->padding_bottom;
+
+ for (i = 0; i < finfo->n_planes; i++) {
+ estride = gst_v4l2_object_extrapolate_stride (finfo, i, stride);
+
+ gst_v4l2_object_set_stride (info, align, i, estride);
+
+ info->offset[i] = offs;
+ offs += estride *
+ GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (finfo, i, padded_height);
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "Extrapolated for plane %d with base stride %d: "
+ "stride %d, offset %" G_GSIZE_FORMAT, i, stride, info->stride[i],
+ info->offset[i]);
+ }
+
+ /* Update the image size according the amount of data we are going to
+ * read/write. This workaround bugs in driver where the sizeimage provided
+ * by TRY/S_FMT represent the buffer length (maximum size) rather then the expected
+ * bytesused (buffer size). */
+ if (offs < info->size)
+ info->size = offs;
+}
+
+static void
+gst_v4l2_object_save_format (GstV4l2Object * v4l2object,
+ struct v4l2_fmtdesc *fmtdesc, struct v4l2_format *format,
+ GstVideoInfo * info, GstVideoAlignment * align)
+{
+ const GstVideoFormatInfo *finfo = info->finfo;
+ gboolean standard_stride = TRUE;
+#ifndef USE_V4L2_TARGET_NV
+ gint stride, pstride, padded_width, padded_height, i;
+#else
+ gint stride, pstride;
+ guint padded_width, padded_height, i;
+#endif
+
+ if (GST_VIDEO_INFO_FORMAT (info) == GST_VIDEO_FORMAT_ENCODED) {
+ v4l2object->n_v4l2_planes = 1;
+ info->size = format->fmt.pix.sizeimage;
+ goto store_info;
+ }
+
+ /* adjust right padding */
+ if (V4L2_TYPE_IS_MULTIPLANAR (v4l2object->type))
+ stride = format->fmt.pix_mp.plane_fmt[0].bytesperline;
+ else
+ stride = format->fmt.pix.bytesperline;
+
+ pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (finfo, 0);
+ if (pstride) {
+ padded_width = stride / pstride;
+ } else {
+ /* pstride can be 0 for complex formats */
+ GST_WARNING_OBJECT (v4l2object->element,
+ "format %s has a pstride of 0, cannot compute padded with",
+ gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (info)));
+ padded_width = stride;
+ }
+
+ if (padded_width < format->fmt.pix.width)
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Driver bug detected, stride (%d) is too small for the width (%d)",
+ padded_width, format->fmt.pix.width);
+
+ align->padding_right = padded_width - info->width - align->padding_left;
+
+ /* adjust bottom padding */
+ padded_height = format->fmt.pix.height;
+
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
+ guint hs, tile_height;
+
+ hs = GST_VIDEO_FORMAT_INFO_TILE_HS (finfo);
+ tile_height = 1 << hs;
+
+ padded_height = GST_ROUND_UP_N (padded_height, tile_height);
+ }
+
+ align->padding_bottom = padded_height - info->height - align->padding_top;
+
+ /* setup the strides and offset */
+ if (V4L2_TYPE_IS_MULTIPLANAR (v4l2object->type)) {
+ struct v4l2_pix_format_mplane *pix_mp = &format->fmt.pix_mp;
+
+ /* figure out the frame layout */
+ v4l2object->n_v4l2_planes = MAX (1, pix_mp->num_planes);
+ info->size = 0;
+#ifndef USE_V4L2_TARGET_NV
+ for (i = 0; i < v4l2object->n_v4l2_planes; i++) {
+#else
+ for (i = 0; i < (guint) v4l2object->n_v4l2_planes; i++) {
+#endif
+ stride = pix_mp->plane_fmt[i].bytesperline;
+
+ if (info->stride[i] != stride)
+ standard_stride = FALSE;
+
+ gst_v4l2_object_set_stride (info, align, i, stride);
+ info->offset[i] = info->size;
+ info->size += pix_mp->plane_fmt[i].sizeimage;
+ }
+
+ /* Extrapolate stride if planar format are being set in 1 v4l2 plane */
+#ifndef USE_V4L2_TARGET_NV
+ if (v4l2object->n_v4l2_planes < finfo->n_planes) {
+#else
+ if ((guint) v4l2object->n_v4l2_planes < finfo->n_planes) {
+#endif
+ stride = format->fmt.pix_mp.plane_fmt[0].bytesperline;
+ gst_v4l2_object_extrapolate_info (v4l2object, info, align, stride);
+ }
+ } else {
+ /* only one plane in non-MPLANE mode */
+ v4l2object->n_v4l2_planes = 1;
+ info->size = format->fmt.pix.sizeimage;
+ stride = format->fmt.pix.bytesperline;
+
+ if (info->stride[0] != stride)
+ standard_stride = FALSE;
+
+ gst_v4l2_object_extrapolate_info (v4l2object, info, align, stride);
+ }
+
+ /* adjust the offset to take into account left and top */
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) {
+ if ((align->padding_left + align->padding_top) > 0)
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Left and top padding is not permitted for tiled formats");
+ } else {
+ for (i = 0; i < finfo->n_planes; i++) {
+ gint vedge, hedge;
+
+ /* FIXME we assume plane as component as this is true for all supported
+ * format we support. */
+
+ hedge = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (finfo, i, align->padding_left);
+ vedge = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (finfo, i, align->padding_top);
+
+ info->offset[i] += (vedge * info->stride[i]) +
+ (hedge * GST_VIDEO_INFO_COMP_PSTRIDE (info, i));
+ }
+ }
+
+store_info:
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Got sizeimage %" G_GSIZE_FORMAT,
+ info->size);
+
+ /* to avoid copies we need video meta if there is padding */
+ v4l2object->need_video_meta =
+ ((align->padding_top + align->padding_left + align->padding_right +
+ align->padding_bottom) != 0);
+
+ /* ... or if stride is non "standard" */
+ if (!standard_stride)
+ v4l2object->need_video_meta = TRUE;
+
+ /* ... or also video meta if we use multiple, non-contiguous, planes */
+ if (v4l2object->n_v4l2_planes > 1)
+ v4l2object->need_video_meta = TRUE;
+
+ v4l2object->info = *info;
+ v4l2object->align = *align;
+ v4l2object->format = *format;
+ v4l2object->fmtdesc = fmtdesc;
+
+ /* if we have a framerate pre-calculate duration */
+ if (info->fps_n > 0 && info->fps_d > 0) {
+ v4l2object->duration = gst_util_uint64_scale_int (GST_SECOND, info->fps_d,
+ info->fps_n);
+ } else {
+ v4l2object->duration = GST_CLOCK_TIME_NONE;
+ }
+}
+
+gint
+gst_v4l2_object_extrapolate_stride (const GstVideoFormatInfo * finfo,
+ gint plane, gint stride)
+{
+ gint estride;
+
+ switch (finfo->format) {
+ case GST_VIDEO_FORMAT_NV12:
+ case GST_VIDEO_FORMAT_NV12_64Z32:
+ case GST_VIDEO_FORMAT_NV21:
+ case GST_VIDEO_FORMAT_NV16:
+ case GST_VIDEO_FORMAT_NV61:
+ case GST_VIDEO_FORMAT_NV24:
+ #ifdef USE_V4L2_TARGET_NV
+ case GST_VIDEO_FORMAT_P010_10LE:
+ case GST_VIDEO_FORMAT_I420_12LE:
+#endif
+ estride = (plane == 0 ? 1 : 2) *
+ GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (finfo, plane, stride);
+ break;
+ default:
+ estride = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (finfo, plane, stride);
+ break;
+ }
+
+ return estride;
+}
+
+static gboolean
+gst_v4l2_object_set_format_full (GstV4l2Object * v4l2object, GstCaps * caps,
+ gboolean try_only, GstV4l2Error * error)
+{
+ gint fd = v4l2object->video_fd;
+ struct v4l2_format format;
+ struct v4l2_streamparm streamparm;
+ enum v4l2_field field;
+ guint32 pixelformat;
+ struct v4l2_fmtdesc *fmtdesc;
+ GstVideoInfo info;
+ GstVideoAlignment align;
+#ifndef USE_V4L2_TARGET_NV
+ gint width, height, fps_n, fps_d;
+#else
+ struct v4l2_ext_control ctl;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+ guint width, height, fps_n, fps_d;
+ GstV4l2VideoEnc *videoenc = NULL;
+ GstV4l2VideoDec *videodec = NULL;
+ if (!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) {
+ videoenc = GST_V4L2_VIDEO_ENC (v4l2object->element);
+ }
+ if (!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) {
+ videodec = GST_V4L2_VIDEO_DEC (v4l2object->element);
+ }
+ GstV4l2VideoEncClass *klass = NULL;
+ if (is_cuvid == FALSE) {
+ if (!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC)
+ || !strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVENC_ALT)) {
+ klass = GST_V4L2_VIDEO_ENC_GET_CLASS (v4l2object->element);
+ }
+ }
+#endif
+ gint n_v4l_planes;
+ gint i = 0;
+ gboolean is_mplane;
+ enum v4l2_colorspace colorspace = 0;
+ enum v4l2_quantization range = 0;
+ enum v4l2_ycbcr_encoding matrix = 0;
+ enum v4l2_xfer_func transfer = 0;
+ GstStructure *s;
+
+ g_return_val_if_fail (!v4l2object->skip_try_fmt_probes ||
+ gst_caps_is_writable (caps), FALSE);
+
+ GST_V4L2_CHECK_OPEN (v4l2object);
+ if (!try_only)
+ GST_V4L2_CHECK_NOT_ACTIVE (v4l2object);
+
+ is_mplane = V4L2_TYPE_IS_MULTIPLANAR (v4l2object->type);
+
+ gst_video_info_init (&info);
+ gst_video_alignment_reset (&align);
+
+ if (!gst_v4l2_object_get_caps_info (v4l2object, caps, &fmtdesc, &info))
+ goto invalid_caps;
+
+ pixelformat = fmtdesc->pixelformat;
+ width = GST_VIDEO_INFO_WIDTH (&info);
+ height = GST_VIDEO_INFO_HEIGHT (&info);
+ fps_n = GST_VIDEO_INFO_FPS_N (&info);
+ fps_d = GST_VIDEO_INFO_FPS_D (&info);
+
+ /* if encoded format (GST_VIDEO_INFO_N_PLANES return 0)
+ * or if contiguous is prefered */
+ n_v4l_planes = GST_VIDEO_INFO_N_PLANES (&info);
+ if (!n_v4l_planes || !v4l2object->prefered_non_contiguous)
+ n_v4l_planes = 1;
+
+ if (GST_VIDEO_INFO_IS_INTERLACED (&info)) {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "interlaced video");
+ /* ideally we would differentiate between types of interlaced video
+ * but there is not sufficient information in the caps..
+ */
+ field = V4L2_FIELD_INTERLACED;
+ } else {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "progressive video");
+ field = V4L2_FIELD_NONE;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT (v4l2object->type)) {
+ /* We first pick th main colorspace from the primaries */
+ switch (info.colorimetry.primaries) {
+ case GST_VIDEO_COLOR_PRIMARIES_BT709:
+ /* There is two colorspaces using these primaries, use the range to
+ * differentiate */
+ if (info.colorimetry.range == GST_VIDEO_COLOR_RANGE_16_235)
+ colorspace = V4L2_COLORSPACE_REC709;
+ else
+ colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT2020:
+ colorspace = V4L2_COLORSPACE_BT2020;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT470M:
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT470BG:
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_SMPTE170M:
+ colorspace = V4L2_COLORSPACE_SMPTE170M;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_SMPTE240M:
+ colorspace = V4L2_COLORSPACE_SMPTE240M;
+ break;
+
+ case GST_VIDEO_COLOR_PRIMARIES_FILM:
+ case GST_VIDEO_COLOR_PRIMARIES_UNKNOWN:
+ /* We don't know, we will guess */
+ break;
+
+ default:
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unknown colorimetry primaries %d", info.colorimetry.primaries);
+ break;
+ }
+
+ switch (info.colorimetry.range) {
+ case GST_VIDEO_COLOR_RANGE_0_255:
+ range = V4L2_QUANTIZATION_FULL_RANGE;
+ break;
+ case GST_VIDEO_COLOR_RANGE_16_235:
+ range = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ case GST_VIDEO_COLOR_RANGE_UNKNOWN:
+ /* We let the driver pick a default one */
+ break;
+ default:
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unknown colorimetry range %d", info.colorimetry.range);
+ break;
+ }
+
+ switch (info.colorimetry.matrix) {
+ case GST_VIDEO_COLOR_MATRIX_RGB:
+ /* Unspecified, leave to default */
+ break;
+ /* FCC is about the same as BT601 with less digit */
+ case GST_VIDEO_COLOR_MATRIX_FCC:
+ case GST_VIDEO_COLOR_MATRIX_BT601:
+ matrix = V4L2_YCBCR_ENC_601;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT709:
+ matrix = V4L2_YCBCR_ENC_709;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_SMPTE240M:
+ matrix = V4L2_YCBCR_ENC_SMPTE240M;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT2020:
+ matrix = V4L2_YCBCR_ENC_BT2020;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_UNKNOWN:
+ /* We let the driver pick a default one */
+ break;
+ default:
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unknown colorimetry matrix %d", info.colorimetry.matrix);
+ break;
+ }
+
+ switch (info.colorimetry.transfer) {
+ case GST_VIDEO_TRANSFER_GAMMA18:
+ case GST_VIDEO_TRANSFER_GAMMA20:
+ case GST_VIDEO_TRANSFER_GAMMA22:
+ case GST_VIDEO_TRANSFER_GAMMA28:
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "GAMMA 18, 20, 22, 28 transfer functions not supported");
+ /* fallthrough */
+ case GST_VIDEO_TRANSFER_GAMMA10:
+ transfer = V4L2_XFER_FUNC_NONE;
+ break;
+ case GST_VIDEO_TRANSFER_BT2020_12:
+ case GST_VIDEO_TRANSFER_BT709:
+ transfer = V4L2_XFER_FUNC_709;
+ break;
+ case GST_VIDEO_TRANSFER_SMPTE240M:
+ transfer = V4L2_XFER_FUNC_SMPTE240M;
+ break;
+ case GST_VIDEO_TRANSFER_SRGB:
+ transfer = V4L2_XFER_FUNC_SRGB;
+ break;
+ case GST_VIDEO_TRANSFER_LOG100:
+ case GST_VIDEO_TRANSFER_LOG316:
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "LOG 100, 316 transfer functions not supported");
+ /* FIXME No known sensible default, maybe AdobeRGB ? */
+ break;
+ case GST_VIDEO_TRANSFER_UNKNOWN:
+ /* We let the driver pick a default one */
+ break;
+ default:
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Unknown colorimetry tranfer %d", info.colorimetry.transfer);
+ break;
+ }
+
+ if (colorspace == 0) {
+ /* Try to guess colorspace according to pixelformat and size */
+ if (GST_VIDEO_INFO_IS_YUV (&info)) {
+ /* SD streams likely use SMPTE170M and HD streams REC709 */
+ if (width <= 720 && height <= 576)
+ colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ colorspace = V4L2_COLORSPACE_REC709;
+ } else if (GST_VIDEO_INFO_IS_RGB (&info)) {
+ colorspace = V4L2_COLORSPACE_SRGB;
+ transfer = V4L2_XFER_FUNC_NONE;
+ }
+ }
+ }
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Desired format %dx%d, format "
+ "%" GST_FOURCC_FORMAT " stride: %d", width, height,
+ GST_FOURCC_ARGS (pixelformat), GST_VIDEO_INFO_PLANE_STRIDE (&info, 0));
+
+ memset (&format, 0x00, sizeof (struct v4l2_format));
+ format.type = v4l2object->type;
+
+ if (is_mplane) {
+ format.type = v4l2object->type;
+ format.fmt.pix_mp.pixelformat = pixelformat;
+ format.fmt.pix_mp.width = width;
+ format.fmt.pix_mp.height = height;
+ format.fmt.pix_mp.field = field;
+ format.fmt.pix_mp.num_planes = n_v4l_planes;
+
+ /* try to ask our prefered stride but it's not a failure if not
+ * accepted */
+ for (i = 0; i < n_v4l_planes; i++) {
+ gint stride = GST_VIDEO_INFO_PLANE_STRIDE (&info, i);
+
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (info.finfo))
+ stride = GST_VIDEO_TILE_X_TILES (stride) <<
+ GST_VIDEO_FORMAT_INFO_TILE_WS (info.finfo);
+
+ format.fmt.pix_mp.plane_fmt[i].bytesperline = stride;
+ }
+
+ if (GST_VIDEO_INFO_FORMAT (&info) == GST_VIDEO_FORMAT_ENCODED)
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = ENCODED_BUFFER_SIZE;
+ } else {
+ gint stride = GST_VIDEO_INFO_PLANE_STRIDE (&info, 0);
+
+ format.type = v4l2object->type;
+
+ format.fmt.pix.width = width;
+ format.fmt.pix.height = height;
+ format.fmt.pix.pixelformat = pixelformat;
+ format.fmt.pix.field = field;
+
+ if (GST_VIDEO_FORMAT_INFO_IS_TILED (info.finfo))
+ stride = GST_VIDEO_TILE_X_TILES (stride) <<
+ GST_VIDEO_FORMAT_INFO_TILE_WS (info.finfo);
+
+ /* try to ask our prefered stride */
+ format.fmt.pix.bytesperline = stride;
+
+ if (GST_VIDEO_INFO_FORMAT (&info) == GST_VIDEO_FORMAT_ENCODED)
+ format.fmt.pix.sizeimage = ENCODED_BUFFER_SIZE;
+ }
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Desired format is %dx%d, format "
+ "%" GST_FOURCC_FORMAT ", nb planes %d", format.fmt.pix.width,
+ format.fmt.pix_mp.height,
+ GST_FOURCC_ARGS (format.fmt.pix.pixelformat),
+ is_mplane ? format.fmt.pix_mp.num_planes : 1);
+
+#ifndef GST_DISABLE_GST_DEBUG
+ if (is_mplane) {
+ for (i = 0; i < format.fmt.pix_mp.num_planes; i++)
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, " stride %d",
+ format.fmt.pix_mp.plane_fmt[i].bytesperline);
+ } else {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, " stride %d",
+ format.fmt.pix.bytesperline);
+ }
+#endif
+
+ if (V4L2_TYPE_IS_OUTPUT (v4l2object->type)) {
+ if (is_mplane) {
+ format.fmt.pix_mp.colorspace = colorspace;
+ format.fmt.pix_mp.quantization = range;
+ format.fmt.pix_mp.ycbcr_enc = matrix;
+ format.fmt.pix_mp.xfer_func = transfer;
+ } else {
+ format.fmt.pix.colorspace = colorspace;
+ format.fmt.pix.quantization = range;
+ format.fmt.pix.ycbcr_enc = matrix;
+ format.fmt.pix.xfer_func = transfer;
+ }
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Desired colorspace is %d:%d:%d:%d",
+ colorspace, range, matrix, transfer);
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if ((((!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVDEC)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (v4l2object->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) && (is_cuvid == TRUE))) &&
+ (v4l2object->open_mjpeg_block == TRUE) &&
+ (g_str_equal(gst_structure_get_name(gst_caps_get_structure (caps, 0)), "image/jpeg")))
+ format.fmt.pix_mp.pixelformat = pixelformat = V4L2_PIX_FMT_MJPEG;
+#endif
+
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == true) {
+ if (videodec) {
+ /*
+ Video sequences without B-frames i.e., All-Intra frames and IPPP... frames
+ should not have any decoding/display latency.
+ nvcuvid decoder has inherent display latency for some video contents
+ which do not have num_reorder_frames=0 in the VUI.
+ Strictly adhering to the standard, this display latency is expected.
+ In case, the user wants to force zero display latency for such contents, we set
+ bForce_zero_latency.
+
+ This CL adds the necessary support for zero display latency, if the video stream
+ is All-Intra or IPPP...
+ */
+ ctl.id = V4L2_CID_MPEG_VIDEO_CUDA_LOW_LATENCY;
+ ctl.value = videodec->cudadec_low_latency;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ goto invalid_ctrl;
+ }
+ }
+#endif
+
+ if (try_only) {
+ if (v4l2object->ioctl (fd, VIDIOC_TRY_FMT, &format) < 0)
+ goto try_fmt_failed;
+ } else {
+ if (v4l2object->ioctl (fd, VIDIOC_S_FMT, &format) < 0)
+ goto set_fmt_failed;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ struct v4l2_event_subscription sub;
+
+ /*Subscribe eos event*/
+ memset(&sub, 0, sizeof(struct v4l2_event_subscription));
+ sub.type = V4L2_EVENT_EOS;
+
+ ret = v4l2object->ioctl(fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
+ if (ret < 0)
+ goto invalid_ctrl;
+
+ memset(&sub, 0, sizeof(struct v4l2_event_subscription));
+ sub.type = V4L2_EVENT_RESOLUTION_CHANGE;
+ sub.id = 0;
+ sub.flags = 0;
+
+ ret = v4l2object->ioctl(fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
+ if (ret < 0)
+ goto invalid_ctrl;
+
+ if (is_cuvid == FALSE) {
+ if (videoenc) {
+ if (V4L2_TYPE_IS_OUTPUT (v4l2object->type)) {
+ if (strcmp (klass->codec_name, "H264") == 0
+ || strcmp (klass->codec_name, "H265") == 0
+ || strcmp (klass->codec_name, "AV1") == 0) {
+ if (!klass->set_encoder_properties (&videoenc->parent)) {
+ g_print ("set_encoder_properties failed\n");
+ return FALSE;
+ }
+ }
+
+ if (!klass->set_video_encoder_properties (&videoenc->parent)) {
+ g_print ("set_video_encoder_properties failed\n");
+ return FALSE;
+ }
+ }
+ }
+ }
+
+ if (is_cuvid == FALSE) {
+ if (videoenc) {
+ if (pixelformat == V4L2_PIX_FMT_VP8 || pixelformat == V4L2_PIX_FMT_VP9) {
+ set_v4l2_video_mpeg_class (videoenc->v4l2capture,
+ V4L2_CID_MPEG_VIDEOENC_VPX_HEADERS_WITH_FRAME, videoenc->v4l2capture->Enable_headers);
+ }
+ if (pixelformat == V4L2_PIX_FMT_AV1) {
+ set_v4l2_video_mpeg_class (videoenc->v4l2capture,
+ V4L2_CID_MPEG_VIDEOENC_AV1_HEADERS_WITH_FRAME, videoenc->v4l2capture->Enable_headers);
+ }
+ }
+ }
+ else if (is_cuvid == TRUE) {
+ if (videoenc) {
+ ctl.id = V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID;
+ ctl.value = videoenc->cudaenc_gpu_id;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ goto invalid_ctrl;
+
+ ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_PRESET_ID;
+ ctl.value = videoenc->cudaenc_preset_id;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ goto invalid_ctrl;
+
+ v4l2_ctrl_video_constqp constqp;
+ constqp.constQpI = videoenc->constQpI;
+ constqp.constQpP = videoenc->constQpP;
+ constqp.constQpB = videoenc->constQpB;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_CONSTQP;
+ ctl.string = (gchar *) &constqp;
+ ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ goto invalid_ctrl;
+
+ v4l2_ctrl_video_init_qp init_qp;
+ init_qp.IInitQP = videoenc->IInitQP;
+ init_qp.PInitQP = videoenc->PInitQP;
+ init_qp.BInitQP = videoenc->BInitQP;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ctl.id = V4L2_CID_MPEG_VIDEOENC_INIT_FRAME_QP;
+ ctl.string = (gchar *) &init_qp;
+
+ ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ goto invalid_ctrl;
+
+
+ ctl.id = V4L2_CID_MPEG_VIDEOENC_CUDA_TUNING_INFO;
+ ctl.value = videoenc->cudaenc_tuning_info_id;
+ ctrls.count = 1;
+ ctrls.controls = &ctl ;
+ ret = v4l2object->ioctl (fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret)
+ goto invalid_ctrl;
+ }
+ }
+#endif
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Got format of %dx%d, format "
+ "%" GST_FOURCC_FORMAT ", nb planes %d, colorspace %d",
+ format.fmt.pix.width, format.fmt.pix_mp.height,
+ GST_FOURCC_ARGS (format.fmt.pix.pixelformat),
+ is_mplane ? format.fmt.pix_mp.num_planes : 1,
+ is_mplane ? format.fmt.pix_mp.colorspace : format.fmt.pix.colorspace);
+
+#ifndef GST_DISABLE_GST_DEBUG
+ if (is_mplane) {
+ for (i = 0; i < format.fmt.pix_mp.num_planes; i++)
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, " stride %d, sizeimage %d",
+ format.fmt.pix_mp.plane_fmt[i].bytesperline,
+ format.fmt.pix_mp.plane_fmt[i].sizeimage);
+ } else {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, " stride %d, sizeimage %d",
+ format.fmt.pix.bytesperline, format.fmt.pix.sizeimage);
+ }
+#endif
+
+ if (format.fmt.pix.pixelformat != pixelformat)
+ goto invalid_pixelformat;
+
+ /* Only negotiate size with raw data.
+ * For some codecs the dimensions are *not* in the bitstream, IIRC VC1
+ * in ASF mode for example, there is also not reason for a driver to
+ * change the size. */
+ if (info.finfo->format != GST_VIDEO_FORMAT_ENCODED) {
+ /* We can crop larger images */
+ if (format.fmt.pix.width < width || format.fmt.pix.height < height)
+ goto invalid_dimensions;
+
+ /* Note, this will be adjusted if upstream has non-centered cropping. */
+ align.padding_top = 0;
+ align.padding_bottom = format.fmt.pix.height - height;
+ align.padding_left = 0;
+ align.padding_right = format.fmt.pix.width - width;
+ }
+
+ if (is_mplane && format.fmt.pix_mp.num_planes != n_v4l_planes)
+ goto invalid_planes;
+
+ if ((is_mplane && format.fmt.pix_mp.field != field)
+ || format.fmt.pix.field != field)
+ goto invalid_field;
+
+ gst_v4l2_object_get_colorspace (&format, &info.colorimetry);
+
+ s = gst_caps_get_structure (caps, 0);
+#ifndef USE_V4L2_TARGET_NV
+ if (gst_structure_has_field (s, "colorimetry")) {
+ GstVideoColorimetry ci;
+ if (!gst_video_colorimetry_from_string (&ci,
+ gst_structure_get_string (s, "colorimetry"))
+ || !gst_video_colorimetry_is_equal (&ci, &info.colorimetry))
+ goto invalid_colorimetry;
+ }
+#endif
+
+ /* In case we have skipped the try_fmt probes, we'll need to set the
+ * colorimetry and interlace-mode back into the caps. */
+ if (v4l2object->skip_try_fmt_probes) {
+ if (!gst_structure_has_field (s, "colorimetry")) {
+ gchar *str = gst_video_colorimetry_to_string (&info.colorimetry);
+ gst_structure_set (s, "colorimetry", G_TYPE_STRING, str, NULL);
+ g_free (str);
+ }
+
+ if (!gst_structure_has_field (s, "interlace-mode"))
+ gst_structure_set (s, "interlace-mode", G_TYPE_STRING,
+ gst_video_interlace_mode_to_string (info.interlace_mode), NULL);
+ }
+
+ if (try_only) /* good enough for trying only */
+ return TRUE;
+
+ if (GST_VIDEO_INFO_HAS_ALPHA (&info)) {
+ struct v4l2_control ctl = { 0, };
+ ctl.id = V4L2_CID_ALPHA_COMPONENT;
+ ctl.value = 0xff;
+
+ if (v4l2object->ioctl (fd, VIDIOC_S_CTRL, &ctl) < 0)
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Failed to set alpha component value");
+ }
+
+ /* Is there a reason we require the caller to always specify a framerate? */
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Desired framerate: %u/%u", fps_n,
+ fps_d);
+
+ memset (&streamparm, 0x00, sizeof (struct v4l2_streamparm));
+ streamparm.type = v4l2object->type;
+
+ if (v4l2object->ioctl (fd, VIDIOC_G_PARM, &streamparm) < 0)
+ goto get_parm_failed;
+
+ if (v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+ || v4l2object->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ GST_VIDEO_INFO_FPS_N (&info) =
+ streamparm.parm.capture.timeperframe.denominator;
+ GST_VIDEO_INFO_FPS_D (&info) =
+ streamparm.parm.capture.timeperframe.numerator;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Got capture framerate: %u/%u",
+ streamparm.parm.capture.timeperframe.denominator,
+ streamparm.parm.capture.timeperframe.numerator);
+
+ /* We used to skip frame rate setup if the camera was already setup
+ * with the requested frame rate. This breaks some cameras though,
+ * causing them to not output data (several models of Thinkpad cameras
+ * have this problem at least).
+ * So, don't skip. */
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "Setting capture framerate to %u/%u",
+ fps_n, fps_d);
+ /* We want to change the frame rate, so check whether we can. Some cheap USB
+ * cameras don't have the capability */
+ if ((streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) == 0) {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "Not setting capture framerate (not supported)");
+ goto done;
+ }
+
+ /* Note: V4L2 wants the frame interval, we have the frame rate */
+ streamparm.parm.capture.timeperframe.numerator = fps_d;
+ streamparm.parm.capture.timeperframe.denominator = fps_n;
+
+ /* some cheap USB cam's won't accept any change */
+ if (v4l2object->ioctl (fd, VIDIOC_S_PARM, &streamparm) < 0)
+ goto set_parm_failed;
+
+ if (streamparm.parm.capture.timeperframe.numerator > 0 &&
+ streamparm.parm.capture.timeperframe.denominator > 0) {
+ /* get new values */
+ fps_d = streamparm.parm.capture.timeperframe.numerator;
+ fps_n = streamparm.parm.capture.timeperframe.denominator;
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj, "Set capture framerate to %u/%u",
+ fps_n, fps_d);
+ } else {
+ /* fix v4l2 capture driver to provide framerate values */
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Reuse caps framerate %u/%u - fix v4l2 capture driver", fps_n, fps_d);
+ }
+
+ GST_VIDEO_INFO_FPS_N (&info) = fps_n;
+ GST_VIDEO_INFO_FPS_D (&info) = fps_d;
+ } else if (v4l2object->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
+ || v4l2object->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ GST_VIDEO_INFO_FPS_N (&info) =
+ streamparm.parm.output.timeperframe.denominator;
+ GST_VIDEO_INFO_FPS_D (&info) =
+ streamparm.parm.output.timeperframe.numerator;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Got output framerate: %u/%u",
+ streamparm.parm.output.timeperframe.denominator,
+ streamparm.parm.output.timeperframe.numerator);
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "Setting output framerate to %u/%u",
+ fps_n, fps_d);
+ if ((streamparm.parm.output.capability & V4L2_CAP_TIMEPERFRAME) == 0) {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "Not setting output framerate (not supported)");
+ goto done;
+ }
+
+ /* Note: V4L2 wants the frame interval, we have the frame rate */
+ streamparm.parm.output.timeperframe.numerator = fps_d;
+ streamparm.parm.output.timeperframe.denominator = fps_n;
+
+ if (v4l2object->ioctl (fd, VIDIOC_S_PARM, &streamparm) < 0)
+ goto set_parm_failed;
+
+ if (streamparm.parm.output.timeperframe.numerator > 0 &&
+ streamparm.parm.output.timeperframe.denominator > 0) {
+ /* get new values */
+ fps_d = streamparm.parm.output.timeperframe.numerator;
+ fps_n = streamparm.parm.output.timeperframe.denominator;
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj, "Set output framerate to %u/%u",
+ fps_n, fps_d);
+ } else {
+ /* fix v4l2 output driver to provide framerate values */
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Reuse caps framerate %u/%u - fix v4l2 output driver", fps_n, fps_d);
+ }
+
+ GST_VIDEO_INFO_FPS_N (&info) = fps_n;
+ GST_VIDEO_INFO_FPS_D (&info) = fps_d;
+ }
+
+done:
+ /* add boolean return, so we can fail on drivers bugs */
+ gst_v4l2_object_save_format (v4l2object, fmtdesc, &format, &info, &align);
+
+ /* now configure the pool */
+ if (!gst_v4l2_object_setup_pool (v4l2object, caps))
+ goto pool_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+#ifdef USE_V4L2_TARGET_NV
+invalid_ctrl:
+ {
+ if (errno == EINVAL) {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Invalid control.")),
+ GST_ERROR_SYSTEM);
+ }
+ return FALSE;
+ }
+#endif
+invalid_caps:
+ {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "can't parse caps %" GST_PTR_FORMAT,
+ caps);
+ return FALSE;
+ }
+try_fmt_failed:
+ {
+ if (errno == EINVAL) {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' has no supported format"), v4l2object->videodev),
+ ("Call to TRY_FMT failed for %" GST_FOURCC_FORMAT " @ %dx%d: %s",
+ GST_FOURCC_ARGS (pixelformat), width, height,
+ g_strerror (errno)));
+ } else {
+ GST_V4L2_ERROR (error, RESOURCE, FAILED,
+ (_("Device '%s' failed during initialization"),
+ v4l2object->videodev),
+ ("Call to TRY_FMT failed for %" GST_FOURCC_FORMAT " @ %dx%d: %s",
+ GST_FOURCC_ARGS (pixelformat), width, height,
+ g_strerror (errno)));
+ }
+ return FALSE;
+ }
+set_fmt_failed:
+ {
+ if (errno == EBUSY) {
+ GST_V4L2_ERROR (error, RESOURCE, BUSY,
+ (_("Device '%s' is busy"), v4l2object->videodev),
+ ("Call to S_FMT failed for %" GST_FOURCC_FORMAT " @ %dx%d: %s",
+ GST_FOURCC_ARGS (pixelformat), width, height,
+ g_strerror (errno)));
+ } else if (errno == EINVAL) {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' has no supported format"), v4l2object->videodev),
+ ("Call to S_FMT failed for %" GST_FOURCC_FORMAT " @ %dx%d: %s",
+ GST_FOURCC_ARGS (pixelformat), width, height,
+ g_strerror (errno)));
+ } else {
+ GST_V4L2_ERROR (error, RESOURCE, FAILED,
+ (_("Device '%s' failed during initialization"),
+ v4l2object->videodev),
+ ("Call to S_FMT failed for %" GST_FOURCC_FORMAT " @ %dx%d: %s",
+ GST_FOURCC_ARGS (pixelformat), width, height,
+ g_strerror (errno)));
+ }
+ return FALSE;
+ }
+invalid_dimensions:
+ {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' cannot capture at %dx%d"),
+ v4l2object->videodev, width, height),
+ ("Tried to capture at %dx%d, but device returned size %dx%d",
+ width, height, format.fmt.pix.width, format.fmt.pix.height));
+ return FALSE;
+ }
+invalid_pixelformat:
+ {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' cannot capture in the specified format"),
+ v4l2object->videodev),
+ ("Tried to capture in %" GST_FOURCC_FORMAT
+ ", but device returned format" " %" GST_FOURCC_FORMAT,
+ GST_FOURCC_ARGS (pixelformat),
+ GST_FOURCC_ARGS (format.fmt.pix.pixelformat)));
+ return FALSE;
+ }
+invalid_planes:
+ {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' does support non-contiguous planes"),
+ v4l2object->videodev),
+ ("Device wants %d planes", format.fmt.pix_mp.num_planes));
+ return FALSE;
+ }
+invalid_field:
+ {
+ enum v4l2_field wanted_field;
+
+ if (is_mplane)
+ wanted_field = format.fmt.pix_mp.field;
+ else
+ wanted_field = format.fmt.pix.field;
+
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' does not support %s interlacing"),
+ v4l2object->videodev,
+ field == V4L2_FIELD_NONE ? "progressive" : "interleaved"),
+ ("Device wants %s interlacing",
+ wanted_field == V4L2_FIELD_NONE ? "progressive" : "interleaved"));
+ return FALSE;
+ }
+#ifndef USE_V4L2_TARGET_NV
+invalid_colorimetry:
+ {
+ gchar *wanted_colorimetry;
+
+ wanted_colorimetry = gst_video_colorimetry_to_string (&info.colorimetry);
+
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Device '%s' does not support %s colorimetry"),
+ v4l2object->videodev, gst_structure_get_string (s, "colorimetry")),
+ ("Device wants %s colorimetry", wanted_colorimetry));
+
+ g_free (wanted_colorimetry);
+ return FALSE;
+ }
+#endif
+get_parm_failed:
+ {
+ /* it's possible that this call is not supported */
+ if (errno != EINVAL && errno != ENOTTY) {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Could not get parameters on device '%s'"),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+ }
+ goto done;
+ }
+set_parm_failed:
+ {
+ GST_V4L2_ERROR (error, RESOURCE, SETTINGS,
+ (_("Video device did not accept new frame rate setting.")),
+ GST_ERROR_SYSTEM);
+ goto done;
+ }
+pool_failed:
+ {
+ /* setup_pool already send the error */
+ return FALSE;
+ }
+}
+
+gboolean
+gst_v4l2_object_set_format (GstV4l2Object * v4l2object, GstCaps * caps,
+ GstV4l2Error * error)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Setting format to %" GST_PTR_FORMAT,
+ caps);
+ return gst_v4l2_object_set_format_full (v4l2object, caps, FALSE, error);
+}
+
+gboolean
+gst_v4l2_object_try_format (GstV4l2Object * v4l2object, GstCaps * caps,
+ GstV4l2Error * error)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Trying format %" GST_PTR_FORMAT,
+ caps);
+ return gst_v4l2_object_set_format_full (v4l2object, caps, TRUE, error);
+}
+
+/**
+ * gst_v4l2_object_acquire_format:
+ * @v4l2object the object
+ * @info a GstVideoInfo to be filled
+ *
+ * Acquire the driver choosen format. This is useful in decoder or encoder elements where
+ * the output format is choosen by the HW.
+ *
+ * Returns: %TRUE on success, %FALSE on failure.
+ */
+gboolean
+gst_v4l2_object_acquire_format (GstV4l2Object * v4l2object, GstVideoInfo * info)
+{
+ struct v4l2_fmtdesc *fmtdesc;
+ struct v4l2_format fmt;
+ struct v4l2_crop crop;
+ struct v4l2_selection sel;
+ struct v4l2_rect *r = NULL;
+ GstVideoFormat format;
+ guint width, height;
+ GstVideoAlignment align;
+
+ gst_video_info_init (info);
+ gst_video_alignment_reset (&align);
+
+ memset (&fmt, 0x00, sizeof (struct v4l2_format));
+ fmt.type = v4l2object->type;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_FMT, &fmt) < 0)
+ goto get_fmt_failed;
+
+ fmtdesc = gst_v4l2_object_get_format_from_fourcc (v4l2object,
+ fmt.fmt.pix.pixelformat);
+ if (fmtdesc == NULL)
+ goto unsupported_format;
+
+ /* No need to care about mplane, the four first params are the same */
+ format = gst_v4l2_object_v4l2fourcc_to_video_format (fmt.fmt.pix.pixelformat);
+
+ /* fails if we do no translate the fmt.pix.pixelformat to GstVideoFormat */
+ if (format == GST_VIDEO_FORMAT_UNKNOWN)
+ goto unsupported_format;
+
+ if (fmt.fmt.pix.width == 0 || fmt.fmt.pix.height == 0)
+ goto invalid_dimensions;
+
+ width = fmt.fmt.pix.width;
+ height = fmt.fmt.pix.height;
+
+ /* Use the default compose rectangle */
+ memset (&sel, 0, sizeof (struct v4l2_selection));
+ sel.type = v4l2object->type;
+ sel.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_SELECTION, &sel) >= 0) {
+ r = &sel.r;
+ } else {
+ /* For ancient kernels, fall back to G_CROP */
+ memset (&crop, 0, sizeof (struct v4l2_crop));
+ crop.type = v4l2object->type;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_CROP, &crop) >= 0)
+ r = &crop.c;
+ }
+ if (r) {
+ align.padding_left = r->left;
+ align.padding_top = r->top;
+ align.padding_right = width - r->width - r->left;
+ align.padding_bottom = height - r->height - r->top;
+ width = r->width;
+ height = r->height;
+ }
+
+ gst_video_info_set_format (info, format, width, height);
+
+#ifdef USE_V4L2_TARGET_NV
+ /* Currently gst plugins base doesn't have support for P012_12LE or NV12 12 bit format.
+ So we can only pass GST_VIDEO_FORMAT_I420_12LE to gst_video_format_get_info() method
+ which returns num planes as 3 and creates an assertion in gst_v4l2_object_extrapolate_info().
+ Once the support for P012_12LE or NV12 12 bit format are added correctly in gst plugins base,
+ We no longer need this check. */
+ if (format == GST_VIDEO_FORMAT_I420_12LE) {
+ memcpy (&video_info, info->finfo, sizeof(video_info));
+ video_info.n_planes = 2;
+ info->finfo = &video_info;
+ }
+#endif
+
+ switch (fmt.fmt.pix.field) {
+ case V4L2_FIELD_ANY:
+ case V4L2_FIELD_NONE:
+ info->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ info->interlace_mode = GST_VIDEO_INTERLACE_MODE_INTERLEAVED;
+ break;
+ default:
+ goto unsupported_field;
+ }
+
+ gst_v4l2_object_get_colorspace (&fmt, &info->colorimetry);
+
+ gst_v4l2_object_save_format (v4l2object, fmtdesc, &fmt, info, &align);
+
+ /* Shall we setup the pool ? */
+
+ return TRUE;
+
+get_fmt_failed:
+ {
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Video device did not provide output format.")), GST_ERROR_SYSTEM);
+ return FALSE;
+ }
+invalid_dimensions:
+ {
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Video device returned invalid dimensions.")),
+ ("Expected non 0 dimensions, got %dx%d", fmt.fmt.pix.width,
+ fmt.fmt.pix.height));
+ return FALSE;
+ }
+unsupported_field:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Video device uses an unsupported interlacing method.")),
+ ("V4L2 field type %d not supported", fmt.fmt.pix.field));
+ return FALSE;
+ }
+unsupported_format:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Video device uses an unsupported pixel format.")),
+ ("V4L2 format %" GST_FOURCC_FORMAT " not supported",
+ GST_FOURCC_ARGS (fmt.fmt.pix.pixelformat)));
+ return FALSE;
+ }
+}
+
+gboolean
+gst_v4l2_object_set_crop (GstV4l2Object * obj)
+{
+ struct v4l2_crop crop = { 0 };
+
+ crop.type = obj->type;
+ crop.c.left = obj->align.padding_left;
+ crop.c.top = obj->align.padding_top;
+ crop.c.width = obj->info.width;
+ crop.c.height = obj->info.height;
+
+ if (obj->align.padding_left + obj->align.padding_top +
+ obj->align.padding_right + obj->align.padding_bottom == 0) {
+ GST_DEBUG_OBJECT (obj->dbg_obj, "no cropping needed");
+ return TRUE;
+ }
+
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "Desired cropping left %u, top %u, size %ux%u", crop.c.left, crop.c.top,
+ crop.c.width, crop.c.height);
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_S_CROP, &crop) < 0) {
+ GST_WARNING_OBJECT (obj->dbg_obj, "VIDIOC_S_CROP failed");
+ return FALSE;
+ }
+
+ if (obj->ioctl (obj->video_fd, VIDIOC_G_CROP, &crop) < 0) {
+ GST_WARNING_OBJECT (obj->dbg_obj, "VIDIOC_G_CROP failed");
+ return FALSE;
+ }
+
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "Got cropping left %u, top %u, size %ux%u", crop.c.left, crop.c.top,
+ crop.c.width, crop.c.height);
+
+ return TRUE;
+}
+
+gboolean
+gst_v4l2_object_caps_equal (GstV4l2Object * v4l2object, GstCaps * caps)
+{
+ GstStructure *config;
+ GstCaps *oldcaps;
+ gboolean ret;
+
+ if (!v4l2object->pool)
+ return FALSE;
+
+ config = gst_buffer_pool_get_config (v4l2object->pool);
+ gst_buffer_pool_config_get_params (config, &oldcaps, NULL, NULL, NULL);
+
+ ret = oldcaps && gst_caps_is_equal (caps, oldcaps);
+
+ gst_structure_free (config);
+
+ return ret;
+}
+
+gboolean
+gst_v4l2_object_unlock (GstV4l2Object * v4l2object)
+{
+ gboolean ret = TRUE;
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "start flushing");
+
+ if (v4l2object->pool && gst_buffer_pool_is_active (v4l2object->pool))
+ gst_buffer_pool_set_flushing (v4l2object->pool, TRUE);
+
+ return ret;
+}
+
+gboolean
+gst_v4l2_object_unlock_stop (GstV4l2Object * v4l2object)
+{
+ gboolean ret = TRUE;
+
+ GST_LOG_OBJECT (v4l2object->dbg_obj, "stop flushing");
+
+ if (v4l2object->pool && gst_buffer_pool_is_active (v4l2object->pool))
+ gst_buffer_pool_set_flushing (v4l2object->pool, FALSE);
+
+ return ret;
+}
+
+gboolean
+gst_v4l2_object_stop (GstV4l2Object * v4l2object)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "stopping");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ goto done;
+ if (!GST_V4L2_IS_ACTIVE (v4l2object))
+ goto done;
+
+ if (v4l2object->pool) {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "deactivating pool");
+ gst_buffer_pool_set_active (v4l2object->pool, FALSE);
+ gst_object_unref (v4l2object->pool);
+ v4l2object->pool = NULL;
+ }
+
+ GST_V4L2_SET_INACTIVE (v4l2object);
+
+done:
+ return TRUE;
+}
+
+GstCaps *
+gst_v4l2_object_probe_caps (GstV4l2Object * v4l2object, GstCaps * filter)
+{
+ GstCaps *ret;
+ GSList *walk;
+ GSList *formats;
+
+ formats = gst_v4l2_object_get_format_list (v4l2object);
+
+ ret = gst_caps_new_empty ();
+
+ if (v4l2object->keep_aspect && !v4l2object->par) {
+ struct v4l2_cropcap cropcap;
+
+ memset (&cropcap, 0, sizeof (cropcap));
+
+ cropcap.type = v4l2object->type;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_CROPCAP, &cropcap) < 0) {
+ if (errno != ENOTTY)
+ GST_WARNING_OBJECT (v4l2object->dbg_obj,
+ "Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: %s",
+ g_strerror (errno));
+ } else {
+ v4l2object->par = g_new0 (GValue, 1);
+ g_value_init (v4l2object->par, GST_TYPE_FRACTION);
+ gst_value_set_fraction (v4l2object->par, cropcap.pixelaspect.numerator,
+ cropcap.pixelaspect.denominator);
+ }
+ }
+
+ for (walk = formats; walk; walk = walk->next) {
+ struct v4l2_fmtdesc *format;
+ GstStructure *template;
+ GstCaps *tmp;
+
+ format = (struct v4l2_fmtdesc *) walk->data;
+
+ template = gst_v4l2_object_v4l2fourcc_to_bare_struct (format->pixelformat);
+
+ if (!template) {
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "unknown format %" GST_FOURCC_FORMAT,
+ GST_FOURCC_ARGS (format->pixelformat));
+ continue;
+ }
+
+ /* If we have a filter, check if we need to probe this format or not */
+ if (filter) {
+ GstCaps *format_caps = gst_caps_new_empty ();
+
+ gst_caps_append_structure (format_caps, gst_structure_copy (template));
+
+ if (!gst_caps_can_intersect (format_caps, filter)) {
+ gst_caps_unref (format_caps);
+ gst_structure_free (template);
+ continue;
+ }
+
+ gst_caps_unref (format_caps);
+ }
+
+ tmp = gst_v4l2_object_probe_caps_for_format (v4l2object,
+ format->pixelformat, template);
+ if (tmp)
+ gst_caps_append (ret, tmp);
+
+ gst_structure_free (template);
+ }
+
+ if (filter) {
+ GstCaps *tmp;
+
+ tmp = ret;
+ ret = gst_caps_intersect_full (filter, ret, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (tmp);
+ }
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj, "probed caps: %" GST_PTR_FORMAT, ret);
+
+ return ret;
+}
+
+GstCaps *
+gst_v4l2_object_get_caps (GstV4l2Object * v4l2object, GstCaps * filter)
+{
+ GstCaps *ret;
+
+ if (v4l2object->probed_caps == NULL)
+ v4l2object->probed_caps = gst_v4l2_object_probe_caps (v4l2object, NULL);
+
+ if (filter) {
+ ret = gst_caps_intersect_full (filter, v4l2object->probed_caps,
+ GST_CAPS_INTERSECT_FIRST);
+ } else {
+ ret = gst_caps_ref (v4l2object->probed_caps);
+ }
+
+ return ret;
+}
+
+gboolean
+gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
+{
+ GstCaps *caps;
+ GstBufferPool *pool = NULL, *other_pool = NULL;
+ GstStructure *config;
+ guint size, min, max, own_min = 0;
+ gboolean update;
+ gboolean has_video_meta;
+ gboolean can_share_own_pool, pushing_from_our_pool = FALSE;
+ GstAllocator *allocator = NULL;
+ GstAllocationParams params = { 0 };
+
+ GST_DEBUG_OBJECT (obj->dbg_obj, "decide allocation");
+
+ g_return_val_if_fail (obj->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ obj->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, FALSE);
+
+ gst_query_parse_allocation (query, &caps, NULL);
+
+ if (obj->pool == NULL) {
+ if (!gst_v4l2_object_setup_pool (obj, caps))
+ goto pool_failed;
+ }
+
+ if (gst_query_get_n_allocation_params (query) > 0)
+ gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
+
+ if (gst_query_get_n_allocation_pools (query) > 0) {
+ gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
+ update = TRUE;
+ } else {
+ pool = NULL;
+ min = max = 0;
+ size = 0;
+ update = FALSE;
+ }
+
+ GST_DEBUG_OBJECT (obj->dbg_obj, "allocation: size:%u min:%u max:%u pool:%"
+ GST_PTR_FORMAT, size, min, max, pool);
+
+ has_video_meta =
+ gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL);
+
+ /* TODO : Why has_video_meta is not set when sink is not overlay */
+#ifndef USE_V4L2_TARGET_NV
+ can_share_own_pool = (has_video_meta || !obj->need_video_meta);
+#else
+ can_share_own_pool = 1;
+#endif
+
+ gst_v4l2_get_driver_min_buffers (obj);
+ /* We can't share our own pool, if it exceed V4L2 capacity */
+#ifdef USE_V4L2_TARGET_NV
+ if (min + obj->min_buffers + 1 > NV_VIDEO_MAX_FRAME)
+ can_share_own_pool = FALSE;
+#else
+ if (min + obj->min_buffers + 1 > VIDEO_MAX_FRAME)
+ can_share_own_pool = FALSE;
+#endif
+
+ /* select a pool */
+ switch (obj->mode) {
+ case GST_V4L2_IO_RW:
+ if (pool) {
+ /* in READ/WRITE mode, prefer a downstream pool because our own pool
+ * doesn't help much, we have to write to it as well */
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "read/write mode: using downstream pool");
+ /* use the bigest size, when we use our own pool we can't really do any
+ * other size than what the hardware gives us but for downstream pools
+ * we can try */
+ size = MAX (size, obj->info.size);
+ } else if (can_share_own_pool) {
+ /* no downstream pool, use our own then */
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "read/write mode: no downstream pool, using our own");
+ pool = gst_object_ref (obj->pool);
+ size = obj->info.size;
+ pushing_from_our_pool = TRUE;
+ }
+ break;
+
+ case GST_V4L2_IO_USERPTR:
+ case GST_V4L2_IO_DMABUF_IMPORT:
+ /* in importing mode, prefer our own pool, and pass the other pool to
+ * our own, so it can serve itself */
+ if (pool == NULL)
+ goto no_downstream_pool;
+ gst_v4l2_buffer_pool_set_other_pool (GST_V4L2_BUFFER_POOL (obj->pool),
+ pool);
+ other_pool = pool;
+ gst_object_unref (pool);
+ pool = gst_object_ref (obj->pool);
+ size = obj->info.size;
+ break;
+
+ case GST_V4L2_IO_MMAP:
+ case GST_V4L2_IO_DMABUF:
+ /* in streaming mode, prefer our own pool */
+ /* Check if we can use it ... */
+ if (can_share_own_pool) {
+ if (pool)
+ gst_object_unref (pool);
+ pool = gst_object_ref (obj->pool);
+ size = obj->info.size;
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "streaming mode: using our own pool %" GST_PTR_FORMAT, pool);
+ pushing_from_our_pool = TRUE;
+ } else if (pool) {
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "streaming mode: copying to downstream pool %" GST_PTR_FORMAT,
+ pool);
+ } else {
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "streaming mode: no usable pool, copying to generic pool");
+ size = MAX (size, obj->info.size);
+ }
+ break;
+ case GST_V4L2_IO_AUTO:
+ default:
+ GST_WARNING_OBJECT (obj->dbg_obj, "unhandled mode");
+ break;
+ }
+
+ if (size == 0)
+ goto no_size;
+
+ /* If pushing from our own pool, configure it with queried minimum,
+ * otherwise use the minimum required */
+ if (pushing_from_our_pool) {
+ /* When pushing from our own pool, we need what downstream one, to be able
+ * to fill the pipeline, the minimum required to decoder according to the
+ * driver and 2 more, so we don't endup up with everything downstream or
+ * held by the decoder. We account 2 buffers for v4l2 so when one is being
+ * pushed downstream the other one can already be queued for the next
+ * frame. */
+#ifdef USE_V4L2_TARGET_NV
+ if (((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_ALT)) && (is_cuvid == FALSE)) ||
+ ((!strcmp (obj->videodev, V4L2_DEVICE_PATH_NVDEC_MCCOY)) && (is_cuvid == TRUE))) {
+ GstV4l2VideoDec *videodec = NULL;
+ videodec = GST_V4L2_VIDEO_DEC (obj->element);
+ own_min = min + obj->min_buffers + videodec->num_extra_surfaces;
+ }
+ else
+ own_min = min + obj->min_buffers + 2;
+#else
+ own_min = min + obj->min_buffers + 2;
+#endif
+
+ /* If no allocation parameters where provided, allow for a little more
+ * buffers and enable copy threshold */
+ if (!update) {
+#ifndef USE_V4L2_TARGET_NV
+ own_min += 2;
+#endif
+ gst_v4l2_buffer_pool_copy_at_threshold (GST_V4L2_BUFFER_POOL (pool),
+ TRUE);
+ } else {
+#ifdef USE_V4L2_TARGET_NV
+ gst_v4l2_buffer_pool_copy_at_threshold (GST_V4L2_BUFFER_POOL (pool),
+ TRUE);
+#else
+ gst_v4l2_buffer_pool_copy_at_threshold (GST_V4L2_BUFFER_POOL (pool),
+ FALSE);
+#endif
+ }
+
+ } else {
+ /* In this case we'll have to configure two buffer pool. For our buffer
+ * pool, we'll need what the driver one, and one more, so we can dequeu */
+ own_min = obj->min_buffers + 1;
+ own_min = MAX (own_min, GST_V4L2_MIN_BUFFERS);
+
+ /* for the downstream pool, we keep what downstream wants, though ensure
+ * at least a minimum if downstream didn't suggest anything (we are
+ * expecting the base class to create a default one for the context) */
+ min = MAX (min, GST_V4L2_MIN_BUFFERS);
+
+ /* To import we need the other pool to hold at least own_min */
+ if (obj->pool == pool)
+ min += own_min;
+ }
+
+ /* Request a bigger max, if one was suggested but it's too small */
+ if (max != 0)
+ max = MAX (min, max);
+
+ /* First step, configure our own pool */
+ config = gst_buffer_pool_get_config (obj->pool);
+
+ if (obj->need_video_meta || has_video_meta) {
+ GST_DEBUG_OBJECT (obj->dbg_obj, "activate Video Meta");
+ gst_buffer_pool_config_add_option (config,
+ GST_BUFFER_POOL_OPTION_VIDEO_META);
+ }
+
+ gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
+ gst_buffer_pool_config_set_params (config, caps, size, own_min, 0);
+
+ GST_DEBUG_OBJECT (obj->dbg_obj, "setting own pool config to %"
+ GST_PTR_FORMAT, config);
+
+ /* Our pool often need to adjust the value */
+ if (!gst_buffer_pool_set_config (obj->pool, config)) {
+ config = gst_buffer_pool_get_config (obj->pool);
+
+ GST_DEBUG_OBJECT (obj->dbg_obj, "own pool config changed to %"
+ GST_PTR_FORMAT, config);
+
+ /* our pool will adjust the maximum buffer, which we are fine with */
+ if (!gst_buffer_pool_set_config (obj->pool, config))
+ goto config_failed;
+ }
+
+ /* Now configure the other pool if different */
+ if (obj->pool != pool)
+ other_pool = pool;
+
+ if (other_pool) {
+ config = gst_buffer_pool_get_config (other_pool);
+ gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
+ gst_buffer_pool_config_set_params (config, caps, size, min, max);
+
+ GST_DEBUG_OBJECT (obj->dbg_obj, "setting other pool config to %"
+ GST_PTR_FORMAT, config);
+
+ /* if downstream supports video metadata, add this to the pool config */
+ if (has_video_meta) {
+ GST_DEBUG_OBJECT (obj->dbg_obj, "activate Video Meta");
+ gst_buffer_pool_config_add_option (config,
+ GST_BUFFER_POOL_OPTION_VIDEO_META);
+ }
+
+ if (!gst_buffer_pool_set_config (other_pool, config)) {
+ config = gst_buffer_pool_get_config (other_pool);
+
+ if (!gst_buffer_pool_config_validate_params (config, caps, size, min,
+ max)) {
+ gst_structure_free (config);
+ goto config_failed;
+ }
+
+ if (!gst_buffer_pool_set_config (other_pool, config))
+ goto config_failed;
+ }
+ }
+
+ if (pool) {
+ /* For simplicity, simply read back the active configuration, so our base
+ * class get the right information */
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_get_params (config, NULL, &size, &min, &max);
+ gst_structure_free (config);
+ }
+
+ if (update)
+ gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
+ else
+ gst_query_add_allocation_pool (query, pool, size, min, max);
+
+ if (allocator)
+ gst_object_unref (allocator);
+
+ if (pool)
+ gst_object_unref (pool);
+
+ return TRUE;
+
+pool_failed:
+ {
+ /* setup_pool already send the error */
+ goto cleanup;
+ }
+config_failed:
+ {
+ GST_ELEMENT_ERROR (obj->element, RESOURCE, SETTINGS,
+ (_("Failed to configure internal buffer pool.")), (NULL));
+ goto cleanup;
+ }
+no_size:
+ {
+ GST_ELEMENT_ERROR (obj->element, RESOURCE, SETTINGS,
+ (_("Video device did not suggest any buffer size.")), (NULL));
+ goto cleanup;
+ }
+cleanup:
+ {
+ if (allocator)
+ gst_object_unref (allocator);
+
+ if (pool)
+ gst_object_unref (pool);
+ return FALSE;
+ }
+no_downstream_pool:
+ {
+ GST_ELEMENT_ERROR (obj->element, RESOURCE, SETTINGS,
+ (_("No downstream pool to import from.")),
+ ("When importing DMABUF or USERPTR, we need a pool to import from"));
+ return FALSE;
+ }
+}
+
+gboolean
+gst_v4l2_object_propose_allocation (GstV4l2Object * obj, GstQuery * query)
+{
+ GstBufferPool *pool;
+ /* we need at least 2 buffers to operate */
+ guint size, min, max;
+ GstCaps *caps;
+ gboolean need_pool;
+
+ /* Set defaults allocation parameters */
+ size = obj->info.size;
+ min = GST_V4L2_MIN_BUFFERS;
+#ifdef USE_V4L2_TARGET_NV
+ max = NV_VIDEO_MAX_FRAME;
+#else
+ max = VIDEO_MAX_FRAME;
+#endif
+
+ gst_query_parse_allocation (query, &caps, &need_pool);
+
+ if (caps == NULL)
+ goto no_caps;
+
+ if ((pool = obj->pool))
+ gst_object_ref (pool);
+
+ if (pool != NULL) {
+ GstCaps *pcaps;
+ GstStructure *config;
+
+ /* we had a pool, check caps */
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_get_params (config, &pcaps, NULL, NULL, NULL);
+
+ GST_DEBUG_OBJECT (obj->dbg_obj,
+ "we had a pool with caps %" GST_PTR_FORMAT, pcaps);
+ if (!gst_caps_is_equal (caps, pcaps)) {
+ gst_structure_free (config);
+ gst_object_unref (pool);
+ goto different_caps;
+ }
+ gst_structure_free (config);
+ }
+ gst_v4l2_get_driver_min_buffers (obj);
+
+ min = MAX (obj->min_buffers, GST_V4L2_MIN_BUFFERS);
+
+ gst_query_add_allocation_pool (query, pool, size, min, max);
+
+ /* we also support various metadata */
+ gst_query_add_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL);
+
+ if (pool)
+ gst_object_unref (pool);
+
+ return TRUE;
+
+ /* ERRORS */
+no_caps:
+ {
+ GST_DEBUG_OBJECT (obj->dbg_obj, "no caps specified");
+ return FALSE;
+ }
+different_caps:
+ {
+ /* different caps, we can't use this pool */
+ GST_DEBUG_OBJECT (obj->dbg_obj, "pool has different caps");
+ return FALSE;
+ }
+}
+
+#ifdef USE_V4L2_TARGET_NV
+gboolean
+set_v4l2_video_mpeg_class (GstV4l2Object * v4l2object, guint label,
+ gint params)
+{
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+ v4l2_enc_virtual_buffer_size buffer_size = { params };
+
+ if (!GST_V4L2_IS_OPEN (v4l2object)) {
+ g_print ("V4L2 device is not open\n");
+ return FALSE;
+ }
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = label;
+
+ if (control.id == V4L2_CID_MPEG_VIDEOENC_VIRTUALBUFFER_SIZE) {
+ control.string = (gchar *) &buffer_size;
+ } else if ((control.id == V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM) ||
+ (control.id == V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES)) {
+ control.string = (gchar *) ¶ms;
+ } else {
+ control.value = params;
+ }
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting IOCTL\n");
+ if (errno == EINVAL)
+ g_print ("Invalid control\n");
+
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif
diff --git a/gst-v4l2/gstv4l2object.h b/gst-v4l2/gstv4l2object.h
new file mode 100644
index 0000000..149ac82
--- /dev/null
+++ b/gst-v4l2/gstv4l2object.h
@@ -0,0 +1,387 @@
+/* GStreamer
+ *
+ * Copyright (C) 2001-2002 Ronald Bultje
+ * 2006 Edgard Lima
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * gstv4l2object.h: base class for V4L2 elements
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __GST_V4L2_OBJECT_H__
+#define __GST_V4L2_OBJECT_H__
+
+#include "linux/videodev2.h"
+#ifdef HAVE_LIBV4L2
+# include
+#endif
+
+#include "v4l2-utils.h"
+
+#ifdef USE_V4L2_TARGET_NV
+#include "nvbufsurface.h"
+#include "v4l2_nv_extensions.h"
+#endif
+
+#include
+#include
+
+#include
+
+typedef struct _GstV4l2Object GstV4l2Object;
+typedef struct _GstV4l2ObjectClassHelper GstV4l2ObjectClassHelper;
+
+#include
+
+/* size of v4l2 buffer pool in streaming case */
+#define GST_V4L2_MIN_BUFFERS 2
+
+#ifdef USE_V4L2_TARGET_NV
+#define V4L2_DEVICE_BASENAME_NVDEC "nvdec"
+#define V4L2_DEVICE_BASENAME_NVENC "msenc"
+#define V4L2_DEVICE_PATH_NVDEC "/dev/nvhost-nvdec"
+#define V4L2_DEVICE_PATH_NVDEC_ALT "/dev/v4l2-nvdec"
+#define V4L2_DEVICE_PATH_NVDEC_MCCOY "/dev/nvidia0"
+#define V4L2_DEVICE_PATH_NVENC "/dev/nvhost-msenc"
+#define V4L2_DEVICE_PATH_NVENC_ALT "/dev/v4l2-nvenc"
+#define V4L2_DEVICE_PATH_TEGRA_INFO "/sys/firmware/devicetree/base/compatible"
+#endif
+
+/* max frame width/height */
+#define GST_V4L2_MAX_SIZE (1<<15) /* 2^15 == 32768 */
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_V4L2_IO_MODE (gst_v4l2_io_mode_get_type ())
+GType gst_v4l2_io_mode_get_type (void);
+
+#ifdef USE_V4L2_TARGET_NV
+#define GST_TYPE_V4L2_DEC_OUTPUT_IO_MODE (gst_v4l2_dec_output_io_mode_get_type ())
+GType gst_v4l2_dec_output_io_mode_get_type (void);
+#define GST_TYPE_V4L2_DEC_CAPTURE_IO_MODE (gst_v4l2_dec_capture_io_mode_get_type ())
+GType gst_v4l2_dec_capture_io_mode_get_type (void);
+#define GST_TYPE_V4L2_ENC_OUTPUT_IO_MODE (gst_v4l2_enc_output_io_mode_get_type ())
+GType gst_v4l2_enc_output_io_mode_get_type (void);
+#define GST_TYPE_V4L2_ENC_CAPTURE_IO_MODE (gst_v4l2_enc_capture_io_mode_get_type ())
+GType gst_v4l2_enc_capture_io_mode_get_type (void);
+#endif
+
+#define GST_V4L2_OBJECT(obj) (GstV4l2Object *)(obj)
+
+extern gboolean is_cuvid;
+
+typedef enum {
+ GST_V4L2_IO_AUTO = 0,
+ GST_V4L2_IO_RW = 1,
+ GST_V4L2_IO_MMAP = 2,
+ GST_V4L2_IO_USERPTR = 3,
+ GST_V4L2_IO_DMABUF = 4,
+ GST_V4L2_IO_DMABUF_IMPORT = 5
+} GstV4l2IOMode;
+
+typedef gboolean (*GstV4l2GetInOutFunction) (GstV4l2Object * v4l2object, gint * input);
+typedef gboolean (*GstV4l2SetInOutFunction) (GstV4l2Object * v4l2object, gint input);
+typedef gboolean (*GstV4l2UpdateFpsFunction) (GstV4l2Object * v4l2object);
+
+#define GST_V4L2_WIDTH(o) (GST_VIDEO_INFO_WIDTH (&(o)->info))
+#define GST_V4L2_HEIGHT(o) (GST_VIDEO_INFO_HEIGHT (&(o)->info))
+#define GST_V4L2_PIXELFORMAT(o) ((o)->fmtdesc->pixelformat)
+#define GST_V4L2_FPS_N(o) (GST_VIDEO_INFO_FPS_N (&(o)->info))
+#define GST_V4L2_FPS_D(o) (GST_VIDEO_INFO_FPS_D (&(o)->info))
+
+/* simple check whether the device is open */
+#define GST_V4L2_IS_OPEN(o) ((o)->video_fd > 0)
+
+/* check whether the device is 'active' */
+#define GST_V4L2_IS_ACTIVE(o) ((o)->active)
+#define GST_V4L2_SET_ACTIVE(o) ((o)->active = TRUE)
+#define GST_V4L2_SET_INACTIVE(o) ((o)->active = FALSE)
+
+/* checks whether the current v4lv4l2object has already been open()'ed or not */
+#define GST_V4L2_CHECK_OPEN(v4l2object) \
+ if (!GST_V4L2_IS_OPEN(v4l2object)) \
+ { \
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS, \
+ (_("Device is not open.")), (NULL)); \
+ return FALSE; \
+ }
+
+/* checks whether the current v4lv4l2object is close()'ed or whether it is still open */
+#define GST_V4L2_CHECK_NOT_OPEN(v4l2object) \
+ if (GST_V4L2_IS_OPEN(v4l2object)) \
+ { \
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS, \
+ (_("Device is open.")), (NULL)); \
+ return FALSE; \
+ }
+
+/* checks whether we're out of capture mode or not */
+#define GST_V4L2_CHECK_NOT_ACTIVE(v4l2object) \
+ if (GST_V4L2_IS_ACTIVE(v4l2object)) \
+ { \
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS, \
+ (NULL), ("Device is in streaming mode")); \
+ return FALSE; \
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+/* Structure to hold the video info inorder to modify the contents, incase of
+ * GST_VIDEO_FORMAT_I420_12LE format */
+ GstVideoFormatInfo video_info;
+#endif
+
+struct _GstV4l2Object {
+ GstElement * element;
+ GstObject * dbg_obj;
+
+ enum v4l2_buf_type type; /* V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_BUF_TYPE_VIDEO_OUTPUT */
+
+ /* the video device */
+ char *videodev;
+
+#ifdef USE_V4L2_TARGET_NV
+ gboolean is_encode;
+#endif
+
+ /* the video-device's file descriptor */
+ gint video_fd;
+ GstV4l2IOMode mode;
+
+ gboolean active;
+ gboolean streaming;
+
+ /* the current format */
+ struct v4l2_fmtdesc *fmtdesc;
+ struct v4l2_format format;
+ GstVideoInfo info;
+ GstVideoAlignment align;
+
+ /* Features */
+ gboolean need_video_meta;
+ gboolean has_alpha_component;
+
+ /* only used if the device supports MPLANE
+ * nb planes is meaning of v4l2 planes
+ * the gstreamer equivalent is gst_buffer_n_memory
+ */
+ gint n_v4l2_planes;
+
+ /* We cache the frame duration if known */
+ GstClockTime duration;
+
+ /* if the MPLANE device support both contiguous and non contiguous
+ * it allows to select which one we want. But we prefered_non_contiguous
+ * non contiguous mode.
+ */
+ gboolean prefered_non_contiguous;
+
+ /* This will be set if supported in decide_allocation. It can be used to
+ * calculate the minimum latency. */
+ guint32 min_buffers;
+
+ /* wanted mode */
+ GstV4l2IOMode req_mode;
+
+ /* optional pool */
+ GstBufferPool *pool;
+
+ /* the video device's capabilities */
+ struct v4l2_capability vcap;
+ /* opened device specific capabilities */
+ guint32 device_caps;
+
+ /* lists... */
+ GSList *formats; /* list of available capture formats */
+ GstCaps *probed_caps;
+
+ GList *colors;
+ GList *norms;
+ GList *channels;
+ GData *controls;
+
+ /* properties */
+ v4l2_std_id tv_norm;
+ gchar *channel;
+ gulong frequency;
+ GstStructure *extra_controls;
+ gboolean keep_aspect;
+ GValue *par;
+#ifdef USE_V4L2_TARGET_NV
+ gboolean enableMVBufferMeta;
+ gboolean Enable_frame_type_reporting;
+ gboolean Enable_error_check;
+ gboolean Enable_headers;
+ gint ProcessedFrames;
+ gboolean open_mjpeg_block;
+ gboolean capture_plane_stopped;
+ GCond cplane_stopped_cond;
+ GMutex cplane_stopped_lock;
+ guint sei_payload_size;
+ void* sei_payload;
+#endif
+
+ /* funcs */
+ GstV4l2GetInOutFunction get_in_out_func;
+ GstV4l2SetInOutFunction set_in_out_func;
+ GstV4l2UpdateFpsFunction update_fps_func;
+
+ /* syscalls */
+ gint (*fd_open) (gint fd, gint v4l2_flags);
+ gint (*close) (gint fd);
+ gint (*dup) (gint fd);
+ gint (*ioctl) (gint fd, gulong request, ...);
+ gssize (*read) (gint fd, gpointer buffer, gsize n);
+ gpointer (*mmap) (gpointer start, gsize length, gint prot, gint flags,
+ gint fd, off_t offset);
+ gint (*munmap) (gpointer _start, gsize length);
+
+ /* Quirks */
+ /* Skips interlacing probes */
+ gboolean never_interlaced;
+ /* Allow to skip reading initial format through G_FMT. Some devices
+ * just fails if you don't call S_FMT first. (ex: M2M decoders) */
+ gboolean no_initial_format;
+ /* Avoid any try_fmt probe. This is used by v4l2src to speedup start up time
+ * on slow USB firmwares. When this is set, gst_v4l2_set_format() will modify
+ * the caps to reflect what was negotiated during fixation */
+ gboolean skip_try_fmt_probes;
+};
+
+struct _GstV4l2ObjectClassHelper {
+ /* probed devices */
+ GList *devices;
+};
+
+GType gst_v4l2_object_get_type (void);
+
+#define V4L2_STD_OBJECT_PROPS \
+ PROP_DEVICE, \
+ PROP_DEVICE_NAME, \
+ PROP_DEVICE_FD, \
+ PROP_FLAGS, \
+ PROP_BRIGHTNESS, \
+ PROP_CONTRAST, \
+ PROP_SATURATION, \
+ PROP_HUE, \
+ PROP_TV_NORM, \
+ PROP_IO_MODE, \
+ PROP_OUTPUT_IO_MODE, \
+ PROP_CAPTURE_IO_MODE, \
+ PROP_EXTRA_CONTROLS, \
+ PROP_PIXEL_ASPECT_RATIO, \
+ PROP_FORCE_ASPECT_RATIO
+
+/* create/destroy */
+GstV4l2Object* gst_v4l2_object_new (GstElement * element,
+ GstObject * dbg_obj,
+ enum v4l2_buf_type type,
+ const char * default_device,
+ GstV4l2GetInOutFunction get_in_out_func,
+ GstV4l2SetInOutFunction set_in_out_func,
+ GstV4l2UpdateFpsFunction update_fps_func);
+
+void gst_v4l2_object_destroy (GstV4l2Object * v4l2object);
+
+/* properties */
+
+void gst_v4l2_object_install_properties_helper (GObjectClass * gobject_class,
+ const char * default_device);
+
+void gst_v4l2_object_install_m2m_properties_helper (GObjectClass * gobject_class);
+#ifdef USE_V4L2_TARGET_NV
+void gst_v4l2_object_install_m2m_dec_iomode_properties_helper (GObjectClass * gobject_class);
+
+void gst_v4l2_object_install_m2m_enc_iomode_properties_helper (GObjectClass * gobject_class);
+#endif
+gboolean gst_v4l2_object_set_property_helper (GstV4l2Object * v4l2object,
+ guint prop_id,
+ const GValue * value,
+ GParamSpec * pspec);
+gboolean gst_v4l2_object_get_property_helper (GstV4l2Object *v4l2object,
+ guint prop_id, GValue * value,
+ GParamSpec * pspec);
+/* open/close */
+gboolean gst_v4l2_object_open (GstV4l2Object * v4l2object);
+gboolean gst_v4l2_object_open_shared (GstV4l2Object * v4l2object, GstV4l2Object * other);
+gboolean gst_v4l2_object_close (GstV4l2Object * v4l2object);
+
+/* probing */
+
+GstCaps* gst_v4l2_object_get_all_caps (void);
+
+GstCaps* gst_v4l2_object_get_raw_caps (void);
+
+GstCaps* gst_v4l2_object_get_codec_caps (void);
+
+gint gst_v4l2_object_extrapolate_stride (const GstVideoFormatInfo * finfo,
+ gint plane, gint stride);
+
+gboolean gst_v4l2_object_set_format (GstV4l2Object * v4l2object, GstCaps * caps, GstV4l2Error * error);
+gboolean gst_v4l2_object_try_format (GstV4l2Object * v4l2object, GstCaps * caps, GstV4l2Error * error);
+
+gboolean gst_v4l2_object_caps_equal (GstV4l2Object * v4l2object, GstCaps * caps);
+
+gboolean gst_v4l2_object_unlock (GstV4l2Object * v4l2object);
+gboolean gst_v4l2_object_unlock_stop (GstV4l2Object * v4l2object);
+
+gboolean gst_v4l2_object_stop (GstV4l2Object * v4l2object);
+
+GstCaps * gst_v4l2_object_probe_caps (GstV4l2Object * v4l2object, GstCaps * filter);
+GstCaps * gst_v4l2_object_get_caps (GstV4l2Object * v4l2object, GstCaps * filter);
+
+gboolean gst_v4l2_object_acquire_format (GstV4l2Object * v4l2object, GstVideoInfo * info);
+
+gboolean gst_v4l2_object_set_crop (GstV4l2Object * obj);
+
+gboolean gst_v4l2_object_decide_allocation (GstV4l2Object * v4l2object, GstQuery * query);
+
+gboolean gst_v4l2_object_propose_allocation (GstV4l2Object * obj, GstQuery * query);
+
+GstStructure * gst_v4l2_object_v4l2fourcc_to_structure (guint32 fourcc);
+
+/* TODO Move to proper namespace */
+/* open/close the device */
+gboolean gst_v4l2_open (GstV4l2Object * v4l2object);
+gboolean gst_v4l2_dup (GstV4l2Object * v4l2object, GstV4l2Object * other);
+gboolean gst_v4l2_close (GstV4l2Object * v4l2object);
+
+/* norm/input/output */
+gboolean gst_v4l2_get_norm (GstV4l2Object * v4l2object, v4l2_std_id * norm);
+gboolean gst_v4l2_set_norm (GstV4l2Object * v4l2object, v4l2_std_id norm);
+gboolean gst_v4l2_get_input (GstV4l2Object * v4l2object, gint * input);
+gboolean gst_v4l2_set_input (GstV4l2Object * v4l2object, gint input);
+gboolean gst_v4l2_get_output (GstV4l2Object * v4l2object, gint * output);
+gboolean gst_v4l2_set_output (GstV4l2Object * v4l2object, gint output);
+
+/* frequency control */
+gboolean gst_v4l2_get_frequency (GstV4l2Object * v4l2object, gint tunernum, gulong * frequency);
+gboolean gst_v4l2_set_frequency (GstV4l2Object * v4l2object, gint tunernum, gulong frequency);
+gboolean gst_v4l2_signal_strength (GstV4l2Object * v4l2object, gint tunernum, gulong * signal);
+
+/* attribute control */
+gboolean gst_v4l2_get_attribute (GstV4l2Object * v4l2object, int attribute, int * value);
+gboolean gst_v4l2_set_attribute (GstV4l2Object * v4l2object, int attribute, const int value);
+gboolean gst_v4l2_set_controls (GstV4l2Object * v4l2object, GstStructure * controls);
+
+#ifdef USE_V4L2_TARGET_NV
+gboolean set_v4l2_video_mpeg_class (GstV4l2Object * v4l2object, guint label,
+ gint params);
+#endif
+
+G_END_DECLS
+
+#endif /* __GST_V4L2_OBJECT_H__ */
diff --git a/gst-v4l2/gstv4l2videodec.c b/gst-v4l2/gstv4l2videodec.c
new file mode 100644
index 0000000..bd014be
--- /dev/null
+++ b/gst-v4l2/gstv4l2videodec.c
@@ -0,0 +1,2977 @@
+/*
+ * Copyright (C) 2014-2023 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include "gstv4l2h26xparser.h"
+#include "gstv4l2object.h"
+#include "gstv4l2videodec.h"
+#include "gstnvdsseimeta.h"
+#include "gst-nvcustomevent.h"
+
+#include "stdlib.h"
+
+#include
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_video_dec_debug);
+#define GST_CAT_DEFAULT gst_v4l2_video_dec_debug
+#define ENABLE_DRAIN 1
+#ifdef USE_V4L2_TARGET_NV
+
+typedef enum {
+ CAP_BUF_DYNAMIC_ALLOC_DISABLED,
+ CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_PLAYBACK,
+ CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_RW_PLAYBACK,
+ CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_RW_PLAYBACK
+} CaptureBufferDynamicAllocationModes;
+
+#define DEFAULT_SKIP_FRAME_TYPE V4L2_SKIP_FRAMES_TYPE_NONE
+#define DEFAULT_DISABLE_DPB FALSE
+#define DEFAULT_FULL_FRAME FALSE
+#define DEFAULT_FRAME_TYPR_REPORTING FALSE
+#define DEFAULT_ERROR_CHECK FALSE
+#define DEFAULT_MAX_PERFORMANCE FALSE
+#define DEFAULT_CAP_BUF_DYNAMIC_ALLOCATION CAP_BUF_DYNAMIC_ALLOC_DISABLED
+#define GST_TYPE_V4L2_VID_DEC_SKIP_FRAMES (gst_video_dec_skip_frames ())
+#define GST_TYPE_V4L2_DEC_CAP_BUF_DYNAMIC_ALLOC (gst_video_dec_capture_buffer_dynamic_allocation ())
+
+gboolean default_cudadec_low_latency;
+gboolean default_sei_extract_data;
+gint default_num_extra_surfaces;
+
+static gboolean enable_latency_measurement = FALSE;
+extern uint8_t *parse_sei_data (uint8_t *bs, guint size, uint32_t *payload_size, char *sei_uuid_string);
+
+#ifdef USE_V4L2_TARGET_NV
+GstVideoCodecFrame *
+gst_v4l2_video_dec_find_nearest_frame (GstBuffer * buf, GList * frames);
+
+static void
+gst_v4l2_video_dec_clean_older_frames (GstV4l2VideoDec * self,
+ GstBuffer * buf, GList * frames);
+
+#endif
+
+static gdouble get_current_system_timestamp(void)
+{
+ struct timeval t1;
+ double elapsedTime = 0;
+ gettimeofday(&t1, NULL);
+ elapsedTime = (t1.tv_sec) * 1000.0;
+ elapsedTime += (t1.tv_usec) / 1000.0;
+ return elapsedTime;
+}
+
+static GType
+gst_video_dec_skip_frames (void)
+{
+ static GType qtype = 0;
+
+ if (qtype == 0) {
+ static const GEnumValue values[] = {
+ {V4L2_SKIP_FRAMES_TYPE_NONE, "Decode all frames", "decode_all"},
+ {V4L2_SKIP_FRAMES_TYPE_NONREF, "Decode non-ref frames",
+ "decode_non_ref"},
+ {V4L2_SKIP_FRAMES_TYPE_DECODE_IDR_ONLY, "decode key frames",
+ "decode_key"},
+ {0, NULL, NULL}
+ };
+
+ qtype = g_enum_register_static ("SkipFrame", values);
+ }
+ return qtype;
+}
+
+static GType
+gst_video_dec_capture_buffer_dynamic_allocation (void)
+{
+ static GType qtype = 0;
+
+ if (qtype == 0) {
+ static const GEnumValue values[] = {
+ {CAP_BUF_DYNAMIC_ALLOC_DISABLED,
+ "Capture buffer dynamic allocation disabled", "cap_buf_dyn_alloc_disabled"},
+ {CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_PLAYBACK,
+ "Capture buffer dynamic allocation enabled for forward playback", "fw_cap_buf_dyn_alloc_enabled"},
+ {CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_RW_PLAYBACK,
+ "Capture buffer dynamic allocation enabled for reverse playback", "rw_cap_buf_dyn_alloc_enabled"},
+ {CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_RW_PLAYBACK,
+ "Capture buffer dynamic allocation enabled for forward and reverse playback", "fw_rw_cap_buf_dyn_alloc_enabled"},
+ {0, NULL, NULL}
+ };
+
+ qtype = g_enum_register_static ("CaptureBufferDynamicAllocationModes", values);
+ }
+ return qtype;
+}
+
+/* Properties specifically applicable on GPU*/
+#define GST_TYPE_V4L2_VID_CUDADEC_MEM_TYPE (gst_video_cudadec_mem_type ())
+
+#ifndef __aarch64__
+#define DEFAULT_CUDADEC_MEM_TYPE V4L2_CUDA_MEM_TYPE_DEVICE
+#else
+#define DEFAULT_CUDADEC_MEM_TYPE V4L2_CUDA_MEM_TYPE_DEVICE
+#endif
+
+#define DEFAULT_CUDADEC_GPU_ID 0
+#define MAX_CUDADEC_NUM_SURFACES 20
+#define DEFAULT_CUDADEC_NUM_SURFACES 20
+
+static GType
+gst_video_cudadec_mem_type (void)
+{
+ static GType qtype = 0;
+
+ if (qtype == 0) {
+ static const GEnumValue values[] = {
+ {V4L2_CUDA_MEM_TYPE_DEVICE, "Memory type Device", "memtype_device"},
+ {V4L2_CUDA_MEM_TYPE_PINNED, "Memory type Host Pinned",
+ "memtype_pinned"},
+ {V4L2_CUDA_MEM_TYPE_UNIFIED, "Memory type Unified",
+ "memtype_unified"},
+ {0, NULL, NULL}
+ };
+
+ qtype = g_enum_register_static ("CudaDecMemType", values);
+ }
+ return qtype;
+}
+
+#define CAPTURE_CAPS \
+ "video/x-raw(memory:NVMM), " \
+ "width = (gint) [ 1, MAX ], " \
+ "height = (gint) [ 1, MAX ], " \
+ "framerate = (fraction) [ 0, MAX ];"
+
+//Caps on Tegra/dGPU
+static GstStaticPadTemplate gst_v4l2dec_sink_template =
+ GST_STATIC_PAD_TEMPLATE ("sink",
+ GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("image/jpeg"
+ ";"
+ "video/x-h264,"
+ "stream-format = (string) { byte-stream },"
+ "alignment = (string) { au }"
+ ";"
+ "video/x-h265,"
+ "stream-format = (string) { byte-stream },"
+ "alignment = (string) { au }"
+ ";"
+ "video/mpeg, "
+ "mpegversion= (int) 4, "
+ "systemstream=(boolean) false, "
+ "parsed=(boolean) true, "
+ "width=(gint) [1,MAX],"
+ "height=(gint) [1,MAX]"
+ ";"
+ "video/mpeg, "
+ "mpegversion= (int) [1, 2], "
+ "systemstream=(boolean) false, "
+ "parsed=(boolean) true, "
+ "width=(gint) [1,MAX],"
+ "height=(gint) [1,MAX]"
+ ";"
+ "video/x-divx, "
+ "divxversion=(int) [4, 5], "
+ "width=(int) [1,MAX], " "height=(int) [1,MAX]"
+ ";"
+ "video/x-av1"
+ ";"
+ "video/x-vp8"
+ ";" "video/x-vp9," "width=(gint) [1,MAX]," "height=(gint) [1,MAX]" ";")
+ );
+
+static GstStaticPadTemplate gst_v4l2dec_src_template =
+GST_STATIC_PAD_TEMPLATE ("src",
+ GST_PAD_SRC,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS (CAPTURE_CAPS));
+#endif
+
+typedef struct
+{
+ gchar *device;
+ GstCaps *sink_caps;
+ GstCaps *src_caps;
+ const gchar *longname;
+ const gchar *description;
+} GstV4l2VideoDecCData;
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+#ifdef USE_V4L2_TARGET_NV
+/*Common properties*/
+ PROP_SKIP_FRAME,
+ PROP_DROP_FRAME_INTERVAL,
+ PROP_NUM_EXTRA_SURFACES,
+/*Properties exposed on Tegra only */
+ PROP_DISABLE_DPB,
+ PROP_USE_FULL_FRAME,
+ PROP_ENABLE_FRAME_TYPE_REPORTING,
+ PROP_ENABLE_ERROR_CHECK,
+ PROP_ENABLE_MAX_PERFORMANCE,
+ PROP_OPEN_MJPEG_BLOCK,
+/*Properties exposed on dGPU only*/
+ PROP_CUDADEC_MEM_TYPE,
+ PROP_CUDADEC_GPU_ID,
+ PROP_CUDADEC_LOW_LATENCY,
+ PROP_EXTRACT_SEI_TYPE5_DATA,
+ PROP_SEI_UUID_STRING,
+ PROP_CAP_BUF_DYNAMIC_ALLOCATION,
+#endif
+};
+
+#define gst_v4l2_video_dec_parent_class parent_class
+G_DEFINE_ABSTRACT_TYPE (GstV4l2VideoDec, gst_v4l2_video_dec,
+ GST_TYPE_VIDEO_DECODER);
+
+#ifdef USE_V4L2_TARGET_NV
+GType
+gst_v4l2_dec_output_io_mode_get_type (void)
+{
+ static GType v4l2_dec_output_io_mode = 0;
+
+ if (!v4l2_dec_output_io_mode) {
+ static const GEnumValue dec_output_io_modes[] = {
+ {GST_V4L2_IO_AUTO, "GST_V4L2_IO_AUTO", "auto"},
+ {GST_V4L2_IO_MMAP, "GST_V4L2_IO_MMAP", "mmap"},
+ {GST_V4L2_IO_USERPTR, "GST_V4L2_IO_USERPTR", "userptr"},
+ {0, NULL, NULL}
+ };
+
+ v4l2_dec_output_io_mode = g_enum_register_static ("GstNvV4l2DecOutputIOMode",
+ dec_output_io_modes);
+ }
+ return v4l2_dec_output_io_mode;
+}
+
+GType
+gst_v4l2_dec_capture_io_mode_get_type (void)
+{
+ static GType v4l2_dec_capture_io_mode = 0;
+
+ if (!v4l2_dec_capture_io_mode) {
+ static const GEnumValue dec_capture_io_modes[] = {
+ {GST_V4L2_IO_AUTO, "GST_V4L2_IO_AUTO", "auto"},
+ {GST_V4L2_IO_MMAP, "GST_V4L2_IO_MMAP", "mmap"},
+ {0, NULL, NULL}
+ };
+
+ v4l2_dec_capture_io_mode = g_enum_register_static ("GstNvV4l2DecCaptureIOMode",
+ dec_capture_io_modes);
+ }
+ return v4l2_dec_capture_io_mode;
+}
+#endif
+
+/* prototypes */
+static GstFlowReturn gst_v4l2_video_dec_finish (GstVideoDecoder * decoder);
+
+#ifdef USE_V4L2_GST_HEADER_VER_1_8
+/**
+ * TODO: This function gst_pad_get_task_state is introduced in newer gstreamer
+ * version which we use on Tegra but on Desktop we are still at 1.8.3 (16.04)
+ * At the moment I am copying the code here but eventually when we move to 1.14,
+ * this shouldn't be needed.
+ * gst_pad_get_task_state:
+ * @pad: the #GstPad to get task state from
+ *
+ * Get @pad task state. If no task is currently
+ * set, GST_TASK_STOPPED is returned.
+ *
+ * Returns: The current state of @pad's task.
+ */
+GstTaskState
+gst_pad_get_task_state (GstPad * pad)
+{
+ GstTask *task;
+ GstTaskState res;
+
+ g_return_val_if_fail (GST_IS_PAD (pad), GST_TASK_STOPPED);
+
+ GST_OBJECT_LOCK (pad);
+ task = GST_PAD_TASK (pad);
+ if (task == NULL)
+ goto no_task;
+ res = gst_task_get_state (task);
+ GST_OBJECT_UNLOCK (pad);
+
+ return res;
+
+no_task:
+ {
+ GST_DEBUG_OBJECT (pad, "pad has no task");
+ GST_OBJECT_UNLOCK (pad);
+ return GST_TASK_STOPPED;
+ }
+}
+#endif
+
+static void
+gst_v4l2_video_dec_set_property_tegra (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_SKIP_FRAME:
+ self->skip_frames = g_value_get_enum (value);
+ break;
+
+ case PROP_DROP_FRAME_INTERVAL:
+ self->drop_frame_interval = g_value_get_uint (value);
+ break;
+
+ case PROP_NUM_EXTRA_SURFACES:
+ self->num_extra_surfaces = g_value_get_uint (value);
+ break;
+
+ case PROP_DISABLE_DPB:
+ self->disable_dpb = g_value_get_boolean (value);
+ break;
+
+ case PROP_USE_FULL_FRAME:
+ self->enable_full_frame = g_value_get_boolean (value);
+ break;
+
+ case PROP_ENABLE_FRAME_TYPE_REPORTING:
+ self->enable_frame_type_reporting = g_value_get_boolean (value);
+ self->v4l2capture->Enable_frame_type_reporting =
+ g_value_get_boolean (value);
+ break;
+
+ case PROP_ENABLE_ERROR_CHECK:
+ self->enable_error_check = g_value_get_boolean (value);
+ self->v4l2capture->Enable_error_check = g_value_get_boolean (value);
+ break;
+
+ case PROP_ENABLE_MAX_PERFORMANCE:
+ self->enable_max_performance = g_value_get_boolean (value);
+ break;
+
+ case PROP_OPEN_MJPEG_BLOCK:
+ self->v4l2output->open_mjpeg_block = g_value_get_boolean (value);
+ break;
+
+ case PROP_CAP_BUF_DYNAMIC_ALLOCATION:
+ self->cap_buf_dynamic_allocation = g_value_get_enum (value);
+ break;
+
+#endif
+ /* By default, only set on output */
+ default:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static void
+gst_v4l2_video_dec_set_property_cuvid (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_SKIP_FRAME:
+ self->skip_frames = g_value_get_enum (value);
+ break;
+
+ case PROP_DROP_FRAME_INTERVAL:
+ self->drop_frame_interval = g_value_get_uint (value);
+ break;
+
+ case PROP_NUM_EXTRA_SURFACES:
+ self->num_extra_surfaces = g_value_get_uint (value);
+ break;
+
+ case PROP_CUDADEC_MEM_TYPE:
+ self->cudadec_mem_type = g_value_get_enum (value);
+ break;
+
+ case PROP_CUDADEC_GPU_ID:
+ self->cudadec_gpu_id = g_value_get_uint (value);
+ break;
+
+ case PROP_CUDADEC_LOW_LATENCY:
+ self->cudadec_low_latency = g_value_get_boolean (value);
+ break;
+
+ case PROP_EXTRACT_SEI_TYPE5_DATA:
+ self->extract_sei_type5_data = g_value_get_boolean (value);
+ break;
+
+ case PROP_SEI_UUID_STRING:
+ self->sei_uuid_string = (gchar *)g_value_dup_string (value);
+ break;
+
+ case PROP_CAP_BUF_DYNAMIC_ALLOCATION:
+ self->cap_buf_dynamic_allocation = g_value_get_enum (value);
+ break;
+#endif
+ /* By default, only set on output */
+ default:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static void
+gst_v4l2_video_dec_get_property_tegra (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_SKIP_FRAME:
+ g_value_set_enum (value, self->skip_frames);
+ break;
+
+ case PROP_DROP_FRAME_INTERVAL:
+ g_value_set_uint (value, self->drop_frame_interval);
+ break;
+
+ case PROP_NUM_EXTRA_SURFACES:
+ g_value_set_uint (value, self->num_extra_surfaces);
+ break;
+
+ case PROP_DISABLE_DPB:
+ g_value_set_boolean (value, self->disable_dpb);
+ break;
+
+ case PROP_USE_FULL_FRAME:
+ g_value_set_boolean (value, self->enable_full_frame);
+ break;
+
+ case PROP_ENABLE_FRAME_TYPE_REPORTING:
+ g_value_set_boolean (value, self->enable_frame_type_reporting);
+ break;
+
+ case PROP_ENABLE_ERROR_CHECK:
+ g_value_set_boolean (value, self->enable_error_check);
+ break;
+
+ case PROP_ENABLE_MAX_PERFORMANCE:
+ g_value_set_boolean (value, self->enable_max_performance);
+ break;
+
+ case PROP_OPEN_MJPEG_BLOCK:
+ g_value_set_boolean (value, self->v4l2output->open_mjpeg_block);
+ break;
+
+ case PROP_CAP_BUF_DYNAMIC_ALLOCATION:
+ g_value_set_enum (value, self->cap_buf_dynamic_allocation);
+ break;
+#endif
+ /* By default read from output */
+ default:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static void
+gst_v4l2_video_dec_get_property_cuvid (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_SKIP_FRAME:
+ g_value_set_enum (value, self->skip_frames);
+ break;
+
+ case PROP_DROP_FRAME_INTERVAL:
+ g_value_set_uint (value, self->drop_frame_interval);
+ break;
+
+ case PROP_NUM_EXTRA_SURFACES:
+ g_value_set_uint (value, self->num_extra_surfaces);
+ break;
+
+ case PROP_CUDADEC_MEM_TYPE:
+ g_value_set_enum(value, self->cudadec_mem_type);
+ break;
+
+ case PROP_CUDADEC_GPU_ID:
+ g_value_set_uint(value, self->cudadec_gpu_id);
+ break;
+
+ case PROP_CUDADEC_LOW_LATENCY:
+ g_value_set_boolean (value, self->cudadec_low_latency);
+ break;
+
+ case PROP_EXTRACT_SEI_TYPE5_DATA:
+ g_value_set_boolean (value, self->extract_sei_type5_data);
+ break;
+
+ case PROP_SEI_UUID_STRING:
+ g_value_set_string (value, self->sei_uuid_string);
+ break;
+
+ case PROP_CAP_BUF_DYNAMIC_ALLOCATION:
+ g_value_set_enum (value, self->cap_buf_dynamic_allocation);
+ break;
+#endif
+ /* By default read from output */
+ default:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static gboolean
+gst_v4l2_video_dec_open (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ GstCaps *codec_caps;
+
+ GST_DEBUG_OBJECT (self, "Opening");
+
+ if (!gst_v4l2_object_open (self->v4l2output))
+ goto failure;
+
+ if (!gst_v4l2_object_open_shared (self->v4l2capture, self->v4l2output))
+ goto failure;
+
+#ifdef USE_V4L2_TARGET_NV
+ // Downstream plugins may export this environment variable
+ if (is_cuvid == TRUE) {
+ g_setenv ("DS_NEW_BUFAPI", "1", TRUE);
+ }
+#endif
+
+ codec_caps = gst_pad_get_pad_template_caps (decoder->sinkpad);
+ self->probed_sinkcaps = gst_v4l2_object_probe_caps (self->v4l2output,
+ codec_caps);
+ gst_caps_unref (codec_caps);
+
+ if (gst_caps_is_empty (self->probed_sinkcaps))
+ goto no_encoded_format;
+
+ self->probed_srccaps = gst_v4l2_object_probe_caps (self->v4l2capture,
+ gst_v4l2_object_get_raw_caps ());
+
+ if (gst_caps_is_empty (self->probed_srccaps))
+ goto no_raw_format;
+
+ return TRUE;
+
+no_encoded_format:
+ GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
+ (_("Encoder on device %s has no supported input format"),
+ self->v4l2output->videodev), (NULL));
+ goto failure;
+
+
+no_raw_format:
+ GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
+ (_("Encoder on device %s has no supported output format"),
+ self->v4l2output->videodev), (NULL));
+ goto failure;
+
+failure:
+ if (GST_V4L2_IS_OPEN (self->v4l2output))
+ gst_v4l2_object_close (self->v4l2output);
+
+ if (GST_V4L2_IS_OPEN (self->v4l2capture))
+ gst_v4l2_object_close (self->v4l2capture);
+
+ gst_caps_replace (&self->probed_srccaps, NULL);
+ gst_caps_replace (&self->probed_sinkcaps, NULL);
+
+ return FALSE;
+}
+
+static gboolean
+gst_v4l2_video_dec_close (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ GST_DEBUG_OBJECT (self, "Closing");
+
+ gst_v4l2_object_close (self->v4l2output);
+ gst_v4l2_object_close (self->v4l2capture);
+ gst_caps_replace (&self->probed_srccaps, NULL);
+ gst_caps_replace (&self->probed_sinkcaps, NULL);
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_dec_start (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ GST_DEBUG_OBJECT (self, "Starting");
+
+ gst_v4l2_object_unlock (self->v4l2output);
+ g_atomic_int_set (&self->active, TRUE);
+ self->output_flow = GST_FLOW_OK;
+#if USE_V4L2_TARGET_NV
+ self->decoded_picture_cnt = 0;
+#endif
+
+ self->hash_pts_systemtime = g_hash_table_new(NULL, NULL);
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_dec_stop (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ GST_DEBUG_OBJECT (self, "Stopping");
+
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_v4l2_object_unlock (self->v4l2capture);
+
+ /* Wait for capture thread to stop */
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (decoder->srcpad);
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ self->output_flow = GST_FLOW_OK;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ /* Should have been flushed already */
+ g_assert (g_atomic_int_get (&self->active) == FALSE);
+
+ gst_v4l2_object_stop (self->v4l2output);
+ gst_v4l2_object_stop (self->v4l2capture);
+
+ g_hash_table_destroy (self->hash_pts_systemtime);
+
+ if (self->input_state) {
+ gst_video_codec_state_unref (self->input_state);
+ self->input_state = NULL;
+ }
+
+ GST_DEBUG_OBJECT (self, "Stopped");
+
+ return TRUE;
+}
+
+#ifdef USE_V4L2_TARGET_NV
+gboolean set_v4l2_controls (GstV4l2VideoDec *self)
+{
+ GST_DEBUG_OBJECT(self, "set_v4l2_controls");
+ {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT, 0)) {
+ g_print ("S_EXT_CTRLS for DISABLE_COMPLETE_FRAME_INPUT failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->skip_frames != V4L2_SKIP_FRAMES_TYPE_NONE) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_SKIP_FRAMES,
+ self->skip_frames)) {
+ g_print ("S_EXT_CTRLS for SKIP_FRAMES failed\n");
+ return FALSE;
+ }
+ }
+
+#if 0
+ /* *
+ * TODO: From low level library remove support of drop frame interval after
+ * analyzing high CPU utilization in initial implementation.
+ * */
+ if (self->drop_frame_interval != 0) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEODEC_DROP_FRAME_INTERVAL,
+ self->drop_frame_interval)) {
+ g_print ("S_EXT_CTRLS for DROP_FRAME_INTERVAL failed\n");
+ return FALSE;
+ }
+ }
+#endif
+ if (self->disable_dpb != DEFAULT_DISABLE_DPB) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_DISABLE_DPB,
+ self->disable_dpb)) {
+ g_print ("S_EXT_CTRLS for DISABLE_DPB failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->enable_full_frame != DEFAULT_FULL_FRAME) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT,
+ self->enable_full_frame)) {
+ g_print ("S_EXT_CTRLS for DISABLE_COMPLETE_FRAME_INPUT failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->enable_frame_type_reporting) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_ERROR_REPORTING,
+ self->enable_frame_type_reporting)) {
+ g_print ("S_EXT_CTRLS for ERROR_REPORTING failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->enable_error_check) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_ERROR_REPORTING,
+ self->enable_error_check)) {
+ g_print ("S_EXT_CTRLS for ERROR_REPORTING failed\n");
+ return FALSE;
+ }
+ }
+
+ if (self->enable_max_performance != DEFAULT_MAX_PERFORMANCE) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_MAX_PERFORMANCE,
+ self->enable_max_performance)) {
+ g_print ("S_EXT_CTRLS for MAX_PERFORMANCE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (is_cuvid == TRUE) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE,
+ self->cudadec_mem_type)) {
+ g_print ("S_EXT_CTRLS for CUDA_MEM_TYPE failed\n");
+ return FALSE;
+ }
+
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID,
+ self->cudadec_gpu_id)) {
+ g_print ("S_EXT_CTRLS for CUDA_GPU_ID failed\n");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+#endif
+
+static gboolean
+gst_v4l2_video_dec_set_format (GstVideoDecoder * decoder,
+ GstVideoCodecState * state)
+{
+ GstV4l2Error error = GST_V4L2_ERROR_INIT;
+ gboolean ret = TRUE;
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);
+
+ if (self->input_state) {
+#ifndef USE_V4L2_TARGET_NV
+ if (gst_v4l2_object_caps_equal (self->v4l2output, state->caps)) {
+ GST_DEBUG_OBJECT (self, "Compatible caps");
+ goto done;
+ }
+#else
+ if (is_cuvid == TRUE) {
+ GstV4l2BufferPool *v4l2pool = GST_V4L2_BUFFER_POOL(self->v4l2output->pool);
+ GstV4l2Object *obj = v4l2pool->obj;
+ if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
+ (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
+ {
+ GST_INFO_OBJECT(self, "pix format is h264 or h265. skipping");
+ self->output_flow = GST_FLOW_OK;
+ self->set_format = true;
+ return TRUE;
+ }
+ } else if (is_cuvid == FALSE) {
+ {
+ GstStructure *config;
+ GstCaps *oldcaps;
+ GstStructure *structure;
+ gint width = 0;
+
+ config = gst_buffer_pool_get_config(self->v4l2output->pool);
+ gst_buffer_pool_config_get_params(config, &oldcaps, NULL, NULL, NULL);
+ structure = gst_caps_get_structure(oldcaps, 0);
+ if (gst_structure_get_int(structure, "width", &width))
+ self->is_drc = TRUE;
+ else
+ self->is_drc = FALSE;
+ gst_structure_free(config);
+ }
+ if (self->is_drc == FALSE)
+ return TRUE;
+ else
+ self->idr_received = FALSE;
+ }
+#endif
+ gst_video_codec_state_unref (self->input_state);
+ self->input_state = NULL;
+
+ gst_v4l2_video_dec_finish (decoder);
+ gst_v4l2_object_stop (self->v4l2output);
+
+ /* The renegotiation flow don't blend with the base class flow. To
+ * properly stop the capture pool we need to reclaim our buffers, which
+ * will happend through the allocation query. The allocation query is
+ * triggered by gst_video_decoder_negotiate() which requires the output
+ * caps to be set, but we can't know this information as we rely on the
+ * decoder, which requires the capture queue to be stopped.
+ *
+ * To workaround this issue, we simply run an allocation query with the
+ * old negotiated caps in order to drain/reclaim our buffers. That breaks
+ * the complexity and should not have much impact in performance since the
+ * following allocation query will happen on a drained pipeline and won't
+ * block. */
+ {
+ GstCaps *caps = gst_pad_get_current_caps (decoder->srcpad);
+ if (caps) {
+ GstQuery *query = gst_query_new_allocation (caps, FALSE);
+ gst_pad_peer_query (decoder->srcpad, query);
+ gst_query_unref (query);
+ gst_caps_unref (caps);
+ }
+ }
+
+ gst_v4l2_object_stop (self->v4l2capture);
+ self->output_flow = GST_FLOW_OK;
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == FALSE) {
+ if (self->is_drc == TRUE)
+ {
+ g_mutex_lock(&self->v4l2capture->cplane_stopped_lock);
+ while (self->v4l2capture->capture_plane_stopped != TRUE)
+ {
+ g_cond_wait(&self->v4l2capture->cplane_stopped_cond,
+ &self->v4l2capture->cplane_stopped_lock);
+ }
+ self->v4l2capture->capture_plane_stopped = FALSE;
+ g_mutex_unlock(&self->v4l2capture->cplane_stopped_lock);
+ gst_v4l2_object_close(self->v4l2output);
+ gst_v4l2_object_close(self->v4l2capture);
+ gst_v4l2_object_open(self->v4l2output);
+ if (!gst_v4l2_object_open_shared(self->v4l2capture, self->v4l2output))
+ {
+ g_print("gstv4l2object open shared failed\n");
+ if (GST_V4L2_IS_OPEN(self->v4l2output))
+ gst_v4l2_object_close(self->v4l2output);
+ return FALSE;
+ }
+ }
+ }
+#endif
+ }
+
+ ret = gst_v4l2_object_set_format (self->v4l2output, state->caps, &error);
+
+ if (ret)
+ self->input_state = gst_video_codec_state_ref (state);
+ else
+ gst_v4l2_error (self, &error);
+
+#ifdef USE_V4L2_TARGET_NV
+ ret = set_v4l2_controls(self);
+#endif
+
+#ifndef USE_V4L2_TARGET_NV
+done:
+#endif
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_dec_flush (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ GST_DEBUG_OBJECT (self, "Flushed");
+
+ /* Ensure the processing thread has stopped for the reverse playback
+ * discount case */
+ if (gst_pad_get_task_state (decoder->srcpad) == GST_TASK_STARTED) {
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_v4l2_object_unlock (self->v4l2capture);
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (decoder->srcpad);
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ }
+
+ self->output_flow = GST_FLOW_OK;
+
+ gst_v4l2_object_unlock_stop (self->v4l2output);
+ gst_v4l2_object_unlock_stop (self->v4l2capture);
+
+ if (self->v4l2output->pool)
+ gst_v4l2_buffer_pool_flush (self->v4l2output->pool);
+
+ if (self->v4l2capture->pool)
+ gst_v4l2_buffer_pool_flush (self->v4l2capture->pool);
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_dec_negotiate (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ /* We don't allow renegotiation without carefull disabling the pool */
+ if (self->v4l2capture->pool &&
+ gst_buffer_pool_is_active (GST_BUFFER_POOL (self->v4l2capture->pool)))
+ return TRUE;
+
+ return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
+}
+
+static gboolean
+gst_v4l2_decoder_cmd (GstV4l2Object * v4l2object, guint cmd, guint flags)
+{
+ struct v4l2_decoder_cmd dcmd = { 0, };
+
+ GST_DEBUG_OBJECT (v4l2object->element,
+ "sending v4l2 decoder command %u with flags %u", cmd, flags);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ dcmd.cmd = cmd;
+ dcmd.flags = flags;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_DECODER_CMD, &dcmd) < 0)
+ goto dcmd_failed;
+
+ return TRUE;
+
+dcmd_failed:
+ if (errno == ENOTTY) {
+ GST_INFO_OBJECT (v4l2object->element,
+ "Failed to send decoder command %u with flags %u for '%s'. (%s)",
+ cmd, flags, v4l2object->videodev, g_strerror (errno));
+ } else {
+ GST_ERROR_OBJECT (v4l2object->element,
+ "Failed to send decoder command %u with flags %u for '%s'. (%s)",
+ cmd, flags, v4l2object->videodev, g_strerror (errno));
+ }
+ return FALSE;
+}
+
+static GstFlowReturn
+gst_v4l2_video_dec_finish (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstBuffer *buffer;
+
+ if (gst_pad_get_task_state (decoder->srcpad) != GST_TASK_STARTED)
+ goto done;
+
+ GST_DEBUG_OBJECT (self, "Finishing decoding");
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+#ifndef USE_V4L2_TARGET_NV
+ if (gst_v4l2_decoder_cmd (self->v4l2output, V4L2_DEC_CMD_STOP, 0)) {
+#else
+ if (gst_v4l2_decoder_cmd (self->v4l2output, V4L2_DEC_CMD_STOP,
+ V4L2_DEC_CMD_STOP_TO_BLACK)) {
+#endif
+ GstTask *task = decoder->srcpad->task;
+ if (!task) {
+ goto stop_task;
+ }
+
+ /* If the decoder stop command succeeded, just wait until processing is
+ * finished */
+ GST_OBJECT_LOCK (task);
+ while (GST_TASK_STATE (task) == GST_TASK_STARTED)
+ GST_TASK_WAIT (task);
+ GST_OBJECT_UNLOCK (task);
+ ret = GST_FLOW_FLUSHING;
+ } else {
+ /* otherwise keep queuing empty buffers until the processing thread has
+ * stopped, _pool_process() will return FLUSHING when that happened */
+ while (ret == GST_FLOW_OK) {
+ buffer = gst_buffer_new ();
+ ret =
+ gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
+ v4l2output->pool), &buffer);
+ gst_buffer_unref (buffer);
+ }
+ }
+
+ /* and ensure the processing thread has stopped in case another error
+ * occured. */
+stop_task:
+ gst_v4l2_object_unlock (self->v4l2capture);
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (decoder->srcpad);
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ if (ret == GST_FLOW_FLUSHING)
+ ret = self->output_flow;
+
+ GST_DEBUG_OBJECT (decoder, "Done draining buffers");
+
+ /* TODO Shall we cleanup any reffed frame to workaround broken decoders ? */
+
+done:
+ return ret;
+}
+
+static GstFlowReturn
+gst_v4l2_video_dec_drain (GstVideoDecoder * decoder)
+{
+#if ENABLE_DRAIN
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ GST_DEBUG_OBJECT (self, "Draining...");
+ gst_v4l2_video_dec_finish (decoder);
+ gst_v4l2_video_dec_flush (decoder);
+
+ return GST_FLOW_OK;
+#else
+ return GST_FLOW_OK;
+#endif
+}
+
+#ifdef USE_V4L2_TARGET_NV
+GstVideoCodecFrame *
+gst_v4l2_video_dec_find_nearest_frame (GstBuffer * buf, GList * frames)
+{
+ GstVideoCodecFrame *best = NULL;
+ GstClockTimeDiff best_diff = G_MAXINT64;
+ GstClockTime timestamp;
+ GList *l;
+
+ timestamp = buf->pts;
+
+ for (l = frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+ GstClockTimeDiff diff = ABS (GST_CLOCK_DIFF (timestamp, tmp->pts));
+
+ if (diff < best_diff) {
+ best = tmp;
+ best_diff = diff;
+
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (best)
+ gst_video_codec_frame_ref (best);
+
+ g_list_foreach (frames, (GFunc) gst_video_codec_frame_unref, NULL);
+ g_list_free (frames);
+
+ return best;
+}
+
+
+static void
+gst_v4l2_video_dec_clean_older_frames (GstV4l2VideoDec * self,
+ GstBuffer * buf, GList * frames)
+{
+ GList *l;
+ GstClockTime timestamp;
+
+ timestamp = buf->pts;
+
+ if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
+ /* We could release all frames stored with pts < timestamp since the
+ * decoder will likely output frames in display order */
+ for (l = frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+
+ if (tmp->pts < timestamp) {
+ GST_LOG_OBJECT (self,
+ "discarding ghost frame %p (#%d) PTS:%" GST_TIME_FORMAT " DTS:%"
+ GST_TIME_FORMAT, tmp, tmp->system_frame_number,
+ GST_TIME_ARGS (tmp->pts), GST_TIME_ARGS (tmp->dts));
+ gst_video_decoder_release_frame (GST_VIDEO_DECODER (self), tmp);
+ } else {
+ gst_video_codec_frame_unref (tmp);
+ }
+ }
+ } else {
+ /* We will release all frames with invalid timestamp because we don't even
+ * know if they will be output some day. */
+ for (l = frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+
+ if (!GST_CLOCK_TIME_IS_VALID (tmp->pts)) {
+ GST_LOG_OBJECT (self,
+ "discarding frame %p (#%d) with invalid PTS:%" GST_TIME_FORMAT
+ " DTS:%" GST_TIME_FORMAT, tmp, tmp->system_frame_number,
+ GST_TIME_ARGS (tmp->pts), GST_TIME_ARGS (tmp->dts));
+ gst_video_decoder_release_frame (GST_VIDEO_DECODER (self), tmp);
+ } else {
+ gst_video_codec_frame_unref (tmp);
+ }
+ }
+ }
+
+ g_list_free (frames);
+}
+
+#endif
+
+#ifndef USE_V4L2_TARGET_NV
+static GstVideoCodecFrame *
+gst_v4l2_video_dec_get_oldest_frame (GstVideoDecoder * decoder)
+{
+ GstVideoCodecFrame *frame = NULL;
+ GList *frames, *l;
+ gint count = 0;
+
+ frames = gst_video_decoder_get_frames (decoder);
+
+ for (l = frames; l != NULL; l = l->next) {
+ GstVideoCodecFrame *f = l->data;
+
+ if (!frame || frame->pts > f->pts)
+ frame = f;
+
+ count++;
+ }
+
+ if (frame) {
+ GST_LOG_OBJECT (decoder,
+ "Oldest frame is %d %" GST_TIME_FORMAT " and %d frames left",
+ frame->system_frame_number, GST_TIME_ARGS (frame->pts), count - 1);
+ gst_video_codec_frame_ref (frame);
+ }
+
+ g_list_free_full (frames, (GDestroyNotify) gst_video_codec_frame_unref);
+
+ return frame;
+}
+#endif
+
+static void
+gst_v4l2_video_dec_loop (GstVideoDecoder * decoder)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ GstV4l2BufferPool *v4l2_pool = GST_V4L2_BUFFER_POOL (self->v4l2capture->pool);
+ GstBufferPool *pool;
+ GstVideoCodecFrame *frame;
+ GstBuffer *buffer = NULL;
+ GstFlowReturn ret;
+
+ GST_LOG_OBJECT (decoder, "Allocate output buffer");
+
+ self->output_flow = GST_FLOW_OK;
+ do {
+ /* We cannot use the base class allotate helper since it taking the internal
+ * stream lock. we know that the acquire may need to poll until more frames
+ * comes in and holding this lock would prevent that.
+ */
+ pool = gst_video_decoder_get_buffer_pool (decoder);
+
+ /* Pool may be NULL if we started going to READY state */
+ if (pool == NULL) {
+ ret = GST_FLOW_FLUSHING;
+ goto beach;
+ }
+
+ ret = gst_buffer_pool_acquire_buffer (pool, &buffer, NULL);
+ g_object_unref (pool);
+
+ if (ret != GST_FLOW_OK)
+ goto beach;
+
+ GST_LOG_OBJECT (decoder, "Process output buffer");
+ ret = gst_v4l2_buffer_pool_process (v4l2_pool, &buffer);
+
+ } while (ret == GST_V4L2_FLOW_CORRUPTED_BUFFER);
+
+ if (ret != GST_FLOW_OK)
+ goto beach;
+
+#ifdef USE_V4L2_TARGET_NV
+ frame = gst_v4l2_video_dec_find_nearest_frame (buffer,
+ gst_video_decoder_get_frames (GST_VIDEO_DECODER (self)));
+ /* So we have a timestamped buffer and get, or not, corresponding frame.
+ * Assuming decoder output frames in display order, frames preceding this
+ * frame could be discarded as they seems useless due to e.g interlaced
+ * stream, corrupted input data...
+ * In any cases, not likely to be seen again. so drop it before they pile up
+ * and use all the memory. */
+ gst_v4l2_video_dec_clean_older_frames (self, buffer,
+ gst_video_decoder_get_frames (GST_VIDEO_DECODER (self)));
+#else
+ frame = gst_v4l2_video_dec_get_oldest_frame (decoder);
+#endif
+
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == FALSE) {
+ if (frame && self->enable_frame_type_reporting) {
+ g_print ("Frame %d\n", frame->system_frame_number);
+ }
+ }
+#endif
+ if (frame) {
+ frame->output_buffer = buffer;
+ buffer = NULL;
+
+ if(enable_latency_measurement) /* TODO with better option */
+ {
+ gpointer in_time = g_hash_table_lookup (self->hash_pts_systemtime,
+ &frame->pts);
+ gdouble input_time = *((gdouble*)in_time);
+ gdouble output_time = get_current_system_timestamp ();
+ if (output_time < input_time)
+ {
+ gdouble time = G_MAXDOUBLE - input_time;
+ GST_DEBUG_OBJECT (self, "True Decode Latency = %f \n",
+ output_time + time);
+ }
+ else
+ {
+ GST_DEBUG_OBJECT (self, "True Decode Latency = %f \n",
+ (output_time - input_time));
+ }
+ GstCaps *reference = gst_caps_new_simple ("video/x-raw",
+ "component_name", G_TYPE_STRING, GST_ELEMENT_NAME(self),
+ "frame_num", G_TYPE_INT, self->frame_num++,
+ "in_timestamp", G_TYPE_DOUBLE, input_time,
+ "out_timestamp", G_TYPE_DOUBLE, output_time,
+ NULL);
+ GstReferenceTimestampMeta * dec_meta =
+ gst_buffer_add_reference_timestamp_meta (frame->output_buffer, reference,
+ 0, 0);
+ if(dec_meta == NULL)
+ {
+ GST_DEBUG_OBJECT (decoder, "dec_meta: %p", dec_meta);
+ }
+ gst_caps_unref(reference);
+ }
+
+#if USE_V4L2_TARGET_NV
+
+ if (!gst_buffer_copy_into (frame->output_buffer, frame->input_buffer,
+ (GstBufferCopyFlags)GST_BUFFER_COPY_METADATA, 0, -1)) {
+ GST_DEBUG_OBJECT (decoder, "Buffer metadata copy failed \n");
+ }
+ if ((self->drop_frame_interval == 0) ||
+ (self->decoded_picture_cnt % self->drop_frame_interval == 0))
+ ret = gst_video_decoder_finish_frame (decoder, frame);
+ else
+ ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
+
+ if (ret != GST_FLOW_OK)
+ goto beach;
+
+ self->decoded_picture_cnt += 1;
+#else
+ ret = gst_video_decoder_finish_frame (decoder, frame);
+#endif
+
+ } else {
+ GST_WARNING_OBJECT (decoder, "Decoder is producing too many buffers");
+ gst_buffer_unref (buffer);
+ }
+
+ return;
+
+beach:
+ GST_DEBUG_OBJECT (decoder, "Leaving output thread: %s",
+ gst_flow_get_name (ret));
+
+ gst_buffer_replace (&buffer, NULL);
+ self->output_flow = ret;
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_pad_pause_task (decoder->srcpad);
+}
+
+static gboolean
+gst_v4l2_video_remove_padding (GstCapsFeatures * features,
+ GstStructure * structure, gpointer user_data)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (user_data);
+ GstVideoAlignment *align = &self->v4l2capture->align;
+ GstVideoInfo *info = &self->v4l2capture->info;
+#ifndef USE_V4L2_TARGET_NV
+ int width, height;
+
+ if (!gst_structure_get_int (structure, "width", &width))
+ return TRUE;
+
+ if (!gst_structure_get_int (structure, "height", &height))
+ return TRUE;
+#else
+ guint width, height;
+
+ if (!gst_structure_get_int (structure, "width", (gint *) & width))
+ return TRUE;
+
+ if (!gst_structure_get_int (structure, "height", (gint *) & height))
+ return TRUE;
+#endif
+ if (align->padding_left != 0 || align->padding_top != 0 ||
+ height != info->height + align->padding_bottom)
+ return TRUE;
+
+ if (height == info->height + align->padding_bottom) {
+ /* Some drivers may round up width to the padded with */
+ if (width == info->width + align->padding_right)
+ gst_structure_set (structure,
+ "width", G_TYPE_INT, width - align->padding_right,
+ "height", G_TYPE_INT, height - align->padding_bottom, NULL);
+ /* Some drivers may keep visible width and only round up bytesperline */
+#ifndef USE_V4L2_TARGET_NV
+ else if (width == info->width)
+#else
+ else if (width == (guint) info->width)
+#endif
+ gst_structure_set (structure,
+ "height", G_TYPE_INT, height - align->padding_bottom, NULL);
+ }
+
+ return TRUE;
+}
+
+
+static const char * const NvBufSurfaceMemType_names[] =
+{
+ [NVBUF_MEM_DEFAULT] = "nvbuf-mem-default",
+ [NVBUF_MEM_CUDA_PINNED] = "nvbuf-mem-cuda-pinned",
+ [NVBUF_MEM_CUDA_DEVICE] = "nvbuf-mem-cuda-device",
+ [NVBUF_MEM_CUDA_UNIFIED] = "nvbuf-mem-cuda-unified",
+ [NVBUF_MEM_SURFACE_ARRAY] = "nvbuf-mem-surface-array",
+ [NVBUF_MEM_HANDLE] = "nvbuf-mem-handle",
+ [NVBUF_MEM_SYSTEM] = "nvbuf-mem-system",
+};
+
+static gboolean
+gst_h265_parse_process_nal (GstV4l2VideoDec *self, H265NalUnit * nalu)
+{
+ H265SPS sps = { 0, };
+ guint nal_type;
+ H265Parser nalparser;
+ H265ParserResult pres = H265_PARSER_ERROR;
+
+ /* nothing to do for broken input */
+ if (G_UNLIKELY (nalu->size < 2)) {
+ return TRUE;
+ }
+
+ /* we have a peek as well */
+ nal_type = nalu->type;
+
+ switch (nal_type) {
+ case H265_NAL_SPS:
+ /* reset state, everything else is obsolete */
+
+ pres = h265_parser_parse_sps (&nalparser, nalu, &sps, TRUE);
+
+ /* arranged for a fallback sps.id, so use that one and only warn */
+ if (pres != H265_PARSER_OK) {
+ /* try to not parse VUI */
+ pres = h265_parser_parse_sps (&nalparser, nalu, &sps, FALSE);
+ if (pres != H265_PARSER_OK) {
+ return FALSE;
+ }
+ }
+ self->current_width = sps.width;
+ self->current_height = sps.height;
+ break;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+gst_h264_parse_process_nal (GstV4l2VideoDec *self, H264NalUnit * nalu)
+{
+ guint nal_type;
+ H264SPS sps = { 0, };
+ H264NalParser nalparser;
+ H264ParserResult pres;
+
+ /* nothing to do for broken input */
+ if (G_UNLIKELY (nalu->size < 2)) {
+ GST_DEBUG_OBJECT (self, "not processing nal size %u", nalu->size);
+ return TRUE;
+ }
+
+ /* we have a peek as well */
+ nal_type = nalu->type;
+
+ GST_DEBUG_OBJECT (self, "processing nal of type %u , size %u",
+ nal_type, nalu->size);
+
+ if (nal_type == H264_NAL_SPS)
+ {
+ GST_DEBUG_OBJECT (self, "GOT SPS frame\n");
+ }
+
+ switch (nal_type) {
+ case H264_NAL_SPS:
+ /* reset state, everything else is obsolete */
+ pres = h264_parser_parse_sps (&nalparser, nalu, &sps, TRUE);
+
+ /* arranged for a fallback sps.id, so use that one and only warn */
+ if (pres != H264_PARSER_OK) {
+ GST_WARNING_OBJECT (self, "failed to parse SPS:");
+ return FALSE;
+ }
+
+ self->current_width = (sps.pic_width_in_mbs_minus1 + 1) << 4;
+ self->current_height = (sps.pic_height_in_map_units_minus1 + 1) << 4;
+
+ break;
+ }
+
+ return TRUE;
+}
+
+static GstFlowReturn
+gst_v4l2_h265_stream_parser (GstV4l2VideoDec *self, gpointer data, guint32 size)
+{
+ H265NalUnit nalu;
+ H265ParserResult pres;
+ gint current_off = 0;
+ guint nalu_size = 0;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ while (TRUE)
+ {
+ pres = h265_parser_identify_nalu (NULL, data, current_off, size, &nalu);
+
+ nalu_size += nalu.size + 4;
+ if (nalu_size >= size)
+ break;
+
+ switch (pres) {
+ case H265_PARSER_OK:
+ GST_DEBUG_OBJECT (self, "complete nal (offset, size): (%u, %u) ",
+ nalu.offset, nalu.size);
+ break;
+ case H265_PARSER_NO_NAL_END:
+ GST_WARNING_OBJECT (self, "Start of the nal found, but not the end");
+ break;
+ case H265_PARSER_ERROR:
+ /* should not really occur either */
+ GST_ELEMENT_ERROR (self, STREAM, FORMAT,
+ ("Error parsing H.265 stream"), ("Invalid H.265 stream"));
+ ret = GST_FLOW_ERROR;
+ break;
+ case H265_PARSER_NO_NAL:
+ GST_ELEMENT_ERROR (self, STREAM, FORMAT,
+ ("Error parsing H.265 stream"), ("No H.265 NAL unit found"));
+ ret = GST_FLOW_ERROR;
+ break;
+ default:
+ ret = GST_FLOW_ERROR;
+ //g_assert_not_reached ();
+ break;
+ }
+
+ GST_DEBUG_OBJECT (self, "%p complete nal found. Off: %u, Size: %u",
+ data, nalu.offset, nalu.size);
+
+ if (!gst_h265_parse_process_nal (self, &nalu)) {
+ GST_WARNING_OBJECT (self,
+ "broken/invalid nal Type: %d, Size: %u will be dropped",
+ nalu.type, nalu.size);
+ }
+ current_off += nalu.size;
+ }
+ GST_DEBUG_OBJECT (self,
+ "sps width = %d height = %d \n", self->current_width, self->current_height);
+
+ return ret;
+}
+
+static GstFlowReturn
+gst_v4l2_h264_stream_parser (GstV4l2VideoDec *self, gpointer data, guint32 size)
+{
+ H264NalUnit nalu;
+ H264ParserResult pres;
+ gint current_off = 0;
+ guint nalu_size = 0;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ while (TRUE)
+ {
+ pres = h264_parser_identify_nalu (NULL, data, current_off, size, &nalu);
+
+ nalu_size += nalu.size + 4;
+ if (nalu_size >= size)
+ break;
+
+ switch (pres)
+ {
+ case H264_PARSER_OK:
+ GST_DEBUG_OBJECT(self, "complete nal (offset, size): (%u, %u) ",
+ nalu.offset, nalu.size);
+ break;
+ case H264_PARSER_NO_NAL_END:
+ GST_WARNING_OBJECT(self, "parser will assume that the end of the data is the end of the NAL unit");
+ break;
+ case H264_PARSER_ERROR:
+ /* should not really occur either */
+ GST_ELEMENT_ERROR(self, STREAM, FORMAT,
+ ("Error parsing H.264 stream"), ("Invalid H.264 stream"));
+ ret = GST_FLOW_ERROR;
+ break;
+ case H264_PARSER_NO_NAL:
+ GST_ELEMENT_ERROR(self, STREAM, FORMAT,
+ ("Error parsing H.264 stream"), ("No H.264 NAL unit found"));
+ ret = GST_FLOW_ERROR;
+ break;
+ default:
+ ret = GST_FLOW_ERROR;
+ // g_assert_not_reached ();
+ break;
+ }
+
+ GST_DEBUG_OBJECT (self, "%p complete nal found. Off: %u, Size: %u",
+ data, nalu.offset, nalu.size);
+
+ if (!gst_h264_parse_process_nal (self, &nalu)) {
+ GST_WARNING_OBJECT (self,
+ "broken/invalid nal Type: %d, Size: %u will be dropped",
+ nalu.type, nalu.size);
+ }
+ current_off += nalu.size;
+ }
+ GST_DEBUG_OBJECT (self,
+ "sps width = %d height = %d", self->current_width, self->current_height);
+
+ return ret;
+}
+
+static gboolean
+findvpxStartCode(GstV4l2Object *obj, const uint8_t* data, int size)
+{
+ for (int i = 0; i < size - 2; i++)
+ {
+ if (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP8)
+ {
+ if (data[i] == VP8_START_BYTE_0 &&
+ data[i + 1] == VP8_START_BYTE_1)
+ {
+ return TRUE;
+ }
+ }
+ else if (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP9)
+ {
+ if (data[i] == VP9_START_BYTE_0 &&
+ data[i + 1] == VP9_START_BYTE_1 &&
+ data[i + 2] == VP9_START_BYTE_2)
+ {
+ return TRUE;
+ }
+ }
+ }
+ return FALSE; // Start code not found
+}
+
+static GstFlowReturn
+gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ GstV4l2Error error = GST_V4L2_ERROR_INIT;
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ GstFlowReturn ret = GST_FLOW_OK;
+ gboolean vpx_ret = FALSE;
+ gboolean processed = FALSE;
+ GstBuffer *tmp;
+ GstTaskState task_state;
+#ifdef USE_V4L2_TARGET_NV
+ gboolean trigger_drc = false;
+ GstV4l2BufferPool *v4l2pool = GST_V4L2_BUFFER_POOL (self->v4l2output->pool);
+ GstV4l2Object *obj = v4l2pool->obj;
+#endif
+
+ GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == TRUE) {
+ if (((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP8) ||
+ (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_VP9)) && self->valid_vpx == FALSE)
+ {
+ GstMapInfo map;
+ if (!gst_buffer_map(frame->input_buffer, &map, GST_MAP_READ))
+ {
+ GST_ERROR_OBJECT(self, "couldnt map frame input_buffer\n");
+ }
+
+ vpx_ret = findvpxStartCode(obj, map.data, map.size);
+ if (vpx_ret == FALSE)
+ {
+ gst_buffer_unmap (frame->input_buffer, &map);
+ goto drop;
+ }
+ else if (vpx_ret == TRUE)
+ {
+ self->valid_vpx = TRUE;
+ }
+
+ gst_buffer_unmap (frame->input_buffer, &map);
+ }
+ if (((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
+ (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265)) &&
+ !(GST_BUFFER_FLAG_IS_SET(GST_BUFFER_CAST(frame->input_buffer),
+ GST_BUFFER_FLAG_DELTA_UNIT)))
+ {
+ GstMapInfo map;
+ if (!gst_buffer_map(frame->input_buffer, &map, GST_MAP_READ))
+ {
+ GST_ERROR_OBJECT(self, "couldnt map frame input_buffer\n");
+ }
+
+ if (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264)
+ {
+ if (gst_v4l2_h264_stream_parser(self, map.data, map.size) != GST_FLOW_OK)
+ GST_ERROR_OBJECT(self, "h264 stream parsing failed");
+ }
+ else if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
+ {
+ if (gst_v4l2_h265_stream_parser(self, map.data, map.size) != GST_FLOW_OK)
+ GST_ERROR_OBJECT(self, "h265 stream parsing failed");
+ }
+ gst_buffer_unmap (frame->input_buffer, &map);
+
+ if (self->old_width || self->old_height)
+ {
+ if ((self->old_width != self->current_width) ||
+ (self->old_height != self->current_height))
+ {
+ GST_INFO_OBJECT(self, "Decoder found new resolution. triggering DRC seq.");
+ GST_INFO_OBJECT(self, "Old Resolution: [%d x %d], New Resolution: [%d x %d]",
+ self->old_width, self->old_height, self->current_width, self->current_height);
+ trigger_drc = true;
+ }
+ }
+
+ self->old_width = self->current_width;
+ self->old_height = self->current_height;
+
+ if (trigger_drc == true)
+ {
+ GstStructure *structure = NULL;
+ GstCaps *dec_sink_caps = gst_pad_get_current_caps(decoder->sinkpad);
+ if (dec_sink_caps != NULL)
+ {
+ dec_sink_caps = gst_caps_make_writable(dec_sink_caps);
+ structure = gst_caps_get_structure(dec_sink_caps, 0);
+ gst_structure_set(structure, "width", G_TYPE_INT, self->current_width,
+ "height", G_TYPE_INT, self->current_height,
+ NULL);
+ /* Replace coded size with visible size, we want to negotiate visible size
+ * with downstream, not coded size. */
+ gst_caps_map_in_place(dec_sink_caps, gst_v4l2_video_remove_padding, self);
+ GST_DEBUG_OBJECT(self, "dec_sink_caps: %s", gst_caps_to_string(dec_sink_caps));
+ }
+ self->idr_received = FALSE;
+
+ gst_v4l2_video_dec_finish(decoder);
+ gst_v4l2_object_stop(self->v4l2output);
+ {
+ GstCaps *caps = gst_pad_get_current_caps(decoder->srcpad);
+ if (caps)
+ {
+ GstQuery *query = gst_query_new_allocation(caps, FALSE);
+ gst_pad_peer_query(decoder->srcpad, query);
+ gst_query_unref(query);
+ gst_caps_unref(caps);
+ }
+ }
+ gst_v4l2_object_stop(self->v4l2capture);
+ self->output_flow = GST_FLOW_OK;
+
+ g_mutex_lock(&self->v4l2capture->cplane_stopped_lock);
+ while (self->v4l2capture->capture_plane_stopped != TRUE)
+ {
+ g_cond_wait(&self->v4l2capture->cplane_stopped_cond,
+ &self->v4l2capture->cplane_stopped_lock);
+ }
+ self->v4l2capture->capture_plane_stopped = FALSE;
+ g_mutex_unlock(&self->v4l2capture->cplane_stopped_lock);
+
+ gst_v4l2_object_close(self->v4l2output);
+ gst_v4l2_object_close(self->v4l2capture);
+ if (!gst_v4l2_object_open(self->v4l2output))
+ GST_ERROR_OBJECT(self, "gst_v4l2_object_open (self->v4l2output) failed\n");
+ if (!gst_v4l2_object_open_shared(self->v4l2capture, self->v4l2output))
+ {
+ GST_ERROR_OBJECT(self, "gstv4l2object open shared failed\n");
+ if (GST_V4L2_IS_OPEN(self->v4l2output))
+ gst_v4l2_object_close(self->v4l2output);
+ return GST_FLOW_ERROR;
+ }
+ if (dec_sink_caps != NULL)
+ {
+ ret = gst_v4l2_object_set_format(self->v4l2output, dec_sink_caps, &error);
+ gst_caps_unref(dec_sink_caps);
+ } else
+ {
+ GST_ERROR_OBJECT(self, "Decoder sink caps == NULL");
+ }
+
+ if (ret)
+ self->input_state = gst_video_codec_state_ref(self->input_state);
+ else
+ gst_v4l2_error(self, &error);
+
+ set_v4l2_controls(self);
+ GST_INFO_OBJECT(self, "Reset Done");
+ }
+ else if (self->set_format == true)
+ {
+ set_v4l2_controls(self);
+ }
+ }
+ }
+
+ /* CUVID and TEGRA decoders return format when SPS/PPS is received along with
+ * a frame. In case of RTSP inputs we drop the DELTA units which are not
+ * decodable independently until we receive I / IDR frame.
+ */
+ if ((GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H264) ||
+ (GST_V4L2_PIXELFORMAT(obj) == V4L2_PIX_FMT_H265))
+ {
+ if ((GST_BUFFER_FLAG_IS_SET (GST_BUFFER_CAST(frame->input_buffer),
+ GST_BUFFER_FLAG_DELTA_UNIT)) &&
+ (self->idr_received == FALSE))
+ {
+ GST_DEBUG_OBJECT (decoder, "Delta Unit Received, Dropping...");
+ gst_video_decoder_drop_frame (decoder, frame);
+ return GST_FLOW_OK;
+ }
+ self->idr_received = TRUE;
+ }
+#endif
+
+ if (is_cuvid == TRUE) {
+ if (self->skip_frames == V4L2_SKIP_FRAMES_TYPE_DECODE_IDR_ONLY) {
+ // Decode only I Frames and drop others.
+ if (GST_BUFFER_FLAG_IS_SET (GST_BUFFER_CAST(frame->input_buffer),
+ GST_BUFFER_FLAG_DELTA_UNIT)) {
+ gst_video_decoder_drop_frame (decoder, frame);
+ return GST_FLOW_OK;
+ }
+ }
+ }
+
+ if (enable_latency_measurement)
+ {
+ self->buffer_in_time = get_current_system_timestamp ();
+ g_hash_table_insert (self->hash_pts_systemtime, &frame->pts, &self->buffer_in_time);
+ }
+
+ if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
+ goto flushing;
+
+ if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) {
+ if (!self->input_state)
+ goto not_negotiated;
+ if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps,
+ &error))
+ goto not_negotiated;
+ }
+
+ if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) {
+ GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
+ GstVideoInfo info;
+ GstVideoCodecState *output_state;
+ GstBuffer *codec_data;
+ GstCaps *acquired_caps, *available_caps, *caps, *filter;
+ GstStructure *st;
+
+ GST_DEBUG_OBJECT (self, "Sending header");
+
+ codec_data = self->input_state->codec_data;
+
+ /* We are running in byte-stream mode, so we don't know the headers, but
+ * we need to send something, otherwise the decoder will refuse to
+ * intialize.
+ */
+ if (codec_data) {
+ gst_buffer_ref (codec_data);
+ } else {
+ codec_data = gst_buffer_ref (frame->input_buffer);
+ processed = TRUE;
+ }
+
+ /* Ensure input internal pool is active */
+ if (!gst_buffer_pool_is_active (pool)) {
+ GstStructure *config = gst_buffer_pool_get_config (pool);
+#ifndef USE_V4L2_TARGET_NV
+ gst_buffer_pool_config_set_params (config, self->input_state->caps,
+ self->v4l2output->info.size, 2, 2);
+#else
+ if (obj->mode != GST_V4L2_IO_USERPTR)
+ obj->min_buffers = 2;
+
+ if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ gst_buffer_pool_config_set_params (config, self->input_state->caps,
+ self->v4l2output->info.size, obj->min_buffers, obj->min_buffers);
+ }
+#endif
+
+ /* There is no reason to refuse this config */
+ if (!gst_buffer_pool_set_config (pool, config))
+ goto activate_failed;
+
+ if (!gst_buffer_pool_set_active (pool, TRUE))
+ goto activate_failed;
+ }
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ ret =
+ gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->
+ v4l2output->pool), &codec_data);
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ gst_buffer_unref (codec_data);
+
+ /* TODO: nvparser should return proper format of a stream with first
+ * few bytes of stream header*/
+#ifdef USE_V4L2_TARGET_NV
+ if (!processed) {
+ if (is_cuvid == TRUE)
+ processed = TRUE;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ ret =
+ gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
+ pool), &frame->input_buffer);
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ if (ret == GST_FLOW_FLUSHING) {
+ if (gst_pad_get_task_state (GST_VIDEO_DECODER_SRC_PAD (self)) !=
+ GST_TASK_STARTED)
+ ret = self->output_flow;
+ goto drop;
+ } else if (ret != GST_FLOW_OK) {
+ goto process_failed;
+ }
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
+ gint retval;
+ gint dqevent_loop_count = 0;
+ struct v4l2_event ev;
+#ifndef USE_V4L2_TARGET_NV_X86
+ /* This is WAR for Bug 3544450 */
+ if(!processed)
+ {
+ processed = TRUE;
+ /* Wait for DQEVENT for 0.6 sec */
+ while (dqevent_loop_count < LOOP_COUNT_TO_WAIT_FOR_DQEVENT) {
+ memset (&ev, 0, sizeof (ev));
+ retval = obj->ioctl (obj->video_fd, VIDIOC_DQEVENT, &ev);
+ dqevent_loop_count ++;
+ if (retval != 0)
+ {
+ if (errno == EINVAL)
+ goto process_failed;
+ usleep (WAIT_TIME_PER_LOOP_FOR_DQEVENT); //TODO is this needed ?
+ continue;
+ }
+ else
+ break;
+ }
+ if (dqevent_loop_count == LOOP_COUNT_TO_WAIT_FOR_DQEVENT) {
+ g_print ("Stream format not found, dropping the frame\n");
+ goto drop;
+ }
+ }
+ else
+#endif
+ {
+ while (1) {
+ memset (&ev, 0, sizeof (ev));
+ retval = obj->ioctl (obj->video_fd, VIDIOC_DQEVENT, &ev);
+ if (retval != 0)
+ {
+ if (errno == EINVAL)
+ goto process_failed;
+ if (is_cuvid != TRUE) {
+ if (ev.sequence == 0) {
+ g_print ("Stream format not found, dropping the frame\n");
+ goto drop;
+ }
+ }
+ usleep(100*1000); //TODO is this needed ?
+ continue;
+ }
+ else
+ break;
+ }
+ }
+ }
+#endif
+
+ /* For decoders G_FMT returns coded size, G_SELECTION returns visible size
+ * in the compose rectangle. gst_v4l2_object_acquire_format() checks both
+ * and returns the visible size as with/height and the coded size as
+ * padding. */
+ if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info))
+ goto not_negotiated;
+
+ /* Create caps from the acquired format, remove the format field */
+ acquired_caps = gst_video_info_to_caps (&info);
+ GST_DEBUG_OBJECT (self, "Acquired caps: %" GST_PTR_FORMAT, acquired_caps);
+ st = gst_caps_get_structure (acquired_caps, 0);
+ gst_structure_remove_field (st, "format");
+
+ /* Probe currently available pixel formats */
+ available_caps = gst_v4l2_object_probe_caps (self->v4l2capture, NULL);
+ available_caps = gst_caps_make_writable (available_caps);
+ GST_DEBUG_OBJECT (self, "Available caps: %" GST_PTR_FORMAT, available_caps);
+
+ /* Replace coded size with visible size, we want to negotiate visible size
+ * with downstream, not coded size. */
+ gst_caps_map_in_place (available_caps, gst_v4l2_video_remove_padding, self);
+
+ filter = gst_caps_intersect_full (available_caps, acquired_caps,
+ GST_CAPS_INTERSECT_FIRST);
+ GST_DEBUG_OBJECT (self, "Filtered caps: %" GST_PTR_FORMAT, filter);
+ gst_caps_unref (acquired_caps);
+ gst_caps_unref (available_caps);
+#ifndef USE_V4L2_TARGET_NV
+ caps = gst_pad_peer_query_caps (decoder->srcpad, filter);
+ gst_caps_unref (filter);
+#else
+ caps = gst_pad_peer_query_caps (decoder->srcpad,
+ gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder)));
+ gst_caps_unref (filter);
+
+ if (gst_caps_is_empty (caps)) {
+ GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (decoder);
+ caps = gst_pad_get_pad_template_caps (pad);
+ }
+#endif
+
+ GST_DEBUG_OBJECT (self, "Possible decoded caps: %" GST_PTR_FORMAT, caps);
+ if (gst_caps_is_empty (caps)) {
+ gst_caps_unref (caps);
+ goto not_negotiated;
+ }
+
+ /* Fixate pixel format */
+ caps = gst_caps_fixate (caps);
+
+ GST_DEBUG_OBJECT (self, "Chosen decoded caps: %" GST_PTR_FORMAT, caps);
+
+ /* Try to set negotiated format, on success replace acquired format */
+#ifndef USE_V4L2_TARGET_NV
+ if (gst_v4l2_object_set_format (self->v4l2capture, caps, &error))
+ gst_video_info_from_caps (&info, caps);
+ else
+ gst_v4l2_clear_error (&error);
+#endif
+ gst_caps_unref (caps);
+
+ output_state = gst_video_decoder_set_output_state (decoder,
+ info.finfo->format, info.width, info.height, self->input_state);
+
+#ifdef USE_V4L2_TARGET_NV
+ if (output_state->caps)
+ gst_caps_unref (output_state->caps);
+ output_state->caps = gst_video_info_to_caps (&output_state->info);
+ GstCapsFeatures *features = gst_caps_features_new ("memory:NVMM", NULL);
+ gst_caps_set_features (output_state->caps, 0, features);
+ {
+ // convert "cudadec_mem_type" to NvBufSurfaceMemType
+ int buf_surface_mem_type = 0;
+ switch(self->cudadec_mem_type)
+ {
+ case 0:
+ {
+ if(is_cuvid == TRUE)
+ buf_surface_mem_type = NVBUF_MEM_CUDA_DEVICE;
+ else
+ buf_surface_mem_type = NVBUF_MEM_SURFACE_ARRAY;
+ break;
+ }
+ case 1:
+ buf_surface_mem_type = NVBUF_MEM_CUDA_PINNED;
+ break;
+ case 2:
+ buf_surface_mem_type = NVBUF_MEM_CUDA_UNIFIED;
+ break;
+ }
+ gst_caps_set_simple(output_state->caps, "nvbuf-memory-type", G_TYPE_STRING , NvBufSurfaceMemType_names[buf_surface_mem_type], NULL);
+ gst_caps_set_simple(output_state->caps, "gpu-id", G_TYPE_INT , self->cudadec_gpu_id, NULL);
+ }
+#endif
+ /* Copy the rest of the information, there might be more in the future */
+ output_state->info.interlace_mode = info.interlace_mode;
+ gst_video_codec_state_unref (output_state);
+
+ if (!gst_video_decoder_negotiate (decoder)) {
+ if (GST_PAD_IS_FLUSHING (decoder->srcpad))
+ goto flushing;
+ else
+ goto not_negotiated;
+ }
+
+ /* Ensure our internal pool is activated */
+ if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE))
+ goto activate_failed;
+
+#ifdef USE_V4L2_TARGET_NV
+ if (self->v4l2capture->pool) {
+ if (self->cap_buf_dynamic_allocation == CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_RW_PLAYBACK) {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE);
+ } else if (self->cap_buf_dynamic_allocation == CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_RW_PLAYBACK && self->rate < 0) {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE);
+ } else if (self->cap_buf_dynamic_allocation == CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_PLAYBACK && self->rate > 0) {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE);
+ } else {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ FALSE);
+ }
+ }
+#endif
+ }
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == TRUE)
+ {
+ if ((trigger_drc == false) && (self->set_format == true))
+ {
+ GstStructure *sink_pad_st = NULL, *src_pad_st = NULL;
+ const GValue *framerate = NULL;
+ GstCaps *dec_sink_caps = gst_pad_get_current_caps(decoder->sinkpad);
+ GstCaps *dec_src_caps = gst_pad_get_current_caps(decoder->srcpad);
+ if (G_UNLIKELY (dec_sink_caps != NULL) && G_UNLIKELY (dec_src_caps != NULL))
+ {
+ GST_DEBUG_OBJECT(self, "dec_sink_caps: %s", gst_caps_to_string(dec_sink_caps));
+ dec_src_caps = gst_caps_make_writable(dec_src_caps);
+ sink_pad_st = gst_caps_get_structure(dec_sink_caps, 0);
+ src_pad_st = gst_caps_get_structure(dec_src_caps, 0);
+ framerate = gst_structure_get_value(sink_pad_st, "framerate");
+ if (framerate)
+ gst_structure_set_value(src_pad_st, "framerate", framerate);
+
+ GST_DEBUG_OBJECT(self, "dec_src_caps: %s", gst_caps_to_string(dec_src_caps));
+
+ gst_pad_set_caps(decoder->srcpad, dec_src_caps);
+
+ gst_caps_unref(dec_sink_caps);
+ gst_caps_unref(dec_src_caps);
+ }
+ }
+ trigger_drc = false;
+ self->set_format = false;
+ }
+#endif
+
+ task_state = gst_pad_get_task_state (GST_VIDEO_DECODER_SRC_PAD (self));
+ if (task_state == GST_TASK_STOPPED || task_state == GST_TASK_PAUSED) {
+ /* It's possible that the processing thread stopped due to an error */
+ if (self->output_flow != GST_FLOW_OK &&
+ self->output_flow != GST_FLOW_FLUSHING) {
+ GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
+ ret = self->output_flow;
+ goto drop;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 1);
+#endif
+
+ GST_DEBUG_OBJECT (self, "Starting decoding thread");
+
+ /* Start the processing task, when it quits, the task will disable input
+ * processing to unlock input if draining, or prevent potential block */
+ self->output_flow = GST_FLOW_FLUSHING;
+ if (!gst_pad_start_task (decoder->srcpad,
+ (GstTaskFunction) gst_v4l2_video_dec_loop, self, NULL))
+ goto start_task_failed;
+ }
+
+ if (!processed) {
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ ret =
+ gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output->
+ pool), &frame->input_buffer);
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ if (ret == GST_FLOW_FLUSHING) {
+ if (gst_pad_get_task_state (GST_VIDEO_DECODER_SRC_PAD (self)) !=
+ GST_TASK_STARTED)
+ ret = self->output_flow;
+ goto drop;
+ } else if (ret != GST_FLOW_OK) {
+ if (gst_pad_get_task_state (GST_VIDEO_DECODER_SINK_PAD(self)) ==
+ GST_TASK_STOPPED)
+ {
+ ret = GST_FLOW_OK;
+ goto drop;
+ }
+ goto process_failed;
+ }
+ }
+
+
+ /* No need to keep input arround */
+ tmp = frame->input_buffer;
+ frame->input_buffer = gst_buffer_new ();
+ gst_buffer_copy_into (frame->input_buffer, tmp,
+ GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
+ GST_BUFFER_COPY_META, 0, 0);
+
+ /* Parse SEI data from the bitsream */
+ if ((is_cuvid == TRUE) && (self->extract_sei_type5_data == TRUE))
+ {
+ uint8_t *sei_type5_payload = NULL;
+ GstMapInfo map;
+ if (!gst_buffer_map (tmp, &map, GST_MAP_READWRITE))
+ {
+ GST_DEBUG_OBJECT (self, "couldnt map\n");
+ goto process_failed;
+ }
+ else
+ {
+ uint32_t payload_size = 0;
+ uint8_t *stream_data = (uint8_t *)map.data;
+ sei_type5_payload = parse_sei_data (stream_data, map.size,
+ &payload_size, self->sei_uuid_string);
+ if (sei_type5_payload != NULL)
+ {
+ GST_DEBUG_OBJECT (self, "sei_type5_payload found\n");
+ GstVideoSEIMeta *video_sei_meta =
+ (GstVideoSEIMeta *)gst_buffer_add_meta(
+ frame->input_buffer, GST_VIDEO_SEI_META_INFO, NULL);
+ video_sei_meta->sei_metadata_type =
+ GST_USER_SEI_META;
+ video_sei_meta->sei_metadata_size = payload_size;
+ video_sei_meta->sei_metadata_ptr = sei_type5_payload;
+ }
+ gst_buffer_unmap (tmp, &map);
+ }
+ }
+
+ gst_buffer_unref (tmp);
+
+ gst_video_codec_frame_unref (frame);
+ return ret;
+
+ /* ERRORS */
+not_negotiated:
+ {
+ GST_ERROR_OBJECT (self, "not negotiated");
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ gst_v4l2_error (self, &error);
+ goto drop;
+ }
+activate_failed:
+ {
+ GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
+ (_("Failed to allocate required memory.")),
+ ("Buffer pool activation failed"));
+ ret = GST_FLOW_ERROR;
+ goto drop;
+ }
+flushing:
+ {
+ ret = GST_FLOW_FLUSHING;
+ goto drop;
+ }
+
+start_task_failed:
+ {
+ GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
+ (_("Failed to start decoding thread.")), (NULL));
+ ret = GST_FLOW_ERROR;
+ goto drop;
+ }
+process_failed:
+ {
+ GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
+ (_("Failed to process frame.")),
+ ("Maybe be due to not enough memory or failing driver"));
+ ret = GST_FLOW_ERROR;
+ goto drop;
+ }
+drop:
+ {
+ gst_video_decoder_drop_frame (decoder, frame);
+ return ret;
+ }
+}
+
+static gboolean
+gst_v4l2_video_dec_decide_allocation (GstVideoDecoder * decoder,
+ GstQuery * query)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ GstClockTime latency;
+ gboolean ret = FALSE;
+
+ if (gst_v4l2_object_decide_allocation (self->v4l2capture, query))
+ ret = GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
+ query);
+
+ if (GST_CLOCK_TIME_IS_VALID (self->v4l2capture->duration)) {
+ latency = self->v4l2capture->min_buffers * self->v4l2capture->duration;
+ GST_DEBUG_OBJECT (self, "Setting latency: %" GST_TIME_FORMAT " (%"
+ G_GUINT32_FORMAT " * %" G_GUINT64_FORMAT, GST_TIME_ARGS (latency),
+ self->v4l2capture->min_buffers, self->v4l2capture->duration);
+ gst_video_decoder_set_latency (decoder, latency, latency);
+ } else {
+ GST_WARNING_OBJECT (self, "Duration invalid, not setting latency");
+ }
+
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_dec_src_query (GstVideoDecoder * decoder, GstQuery * query)
+{
+ gboolean ret = TRUE;
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_CAPS:{
+ GstCaps *filter, *result = NULL;
+ GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (decoder);
+
+ gst_query_parse_caps (query, &filter);
+
+ if (self->probed_srccaps)
+ result = gst_caps_ref (self->probed_srccaps);
+ else
+ result = gst_pad_get_pad_template_caps (pad);
+
+ if (filter) {
+ GstCaps *tmp = result;
+ result =
+ gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (tmp);
+ }
+
+ GST_DEBUG_OBJECT (self, "Returning src caps %" GST_PTR_FORMAT, result);
+
+ gst_query_set_caps_result (query, result);
+ gst_caps_unref (result);
+ break;
+ }
+
+ default:
+ ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_query (decoder, query);
+ break;
+ }
+
+ return ret;
+}
+
+static GstCaps *
+gst_v4l2_video_dec_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ GstCaps *result;
+
+ result = gst_video_decoder_proxy_getcaps (decoder, self->probed_sinkcaps,
+ filter);
+
+ GST_DEBUG_OBJECT (self, "Returning sink caps %" GST_PTR_FORMAT, result);
+
+ return result;
+}
+
+static gboolean
+gst_v4l2_video_dec_sink_event (GstVideoDecoder * decoder, GstEvent * event)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ gboolean ret;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_START:
+ break;
+#ifdef USE_V4L2_TARGET_NV
+ case GST_EVENT_GAP:
+ GST_DEBUG_OBJECT (self, "GAP event received");
+ if (GST_V4L2_IS_ACTIVE (self->v4l2capture))
+ {
+ GST_DEBUG_OBJECT (self, "CAPTURE PLANE buffer pool is active"
+ " so passing on the event to downstream");
+ break;
+ }
+ else
+ {
+ GST_DEBUG_OBJECT (self, "CAPTURE PLANE buffer pool is inactive"
+ " so ignore the gap event");
+ gst_event_unref(event);
+ return TRUE;
+ }
+#endif
+ default:
+ break;
+ }
+
+ ret = GST_VIDEO_DECODER_CLASS (parent_class)->sink_event (decoder, event);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_START:
+ break;
+ default:
+ break;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_DEC_DROP_FRAME_INTERVAL_UPDATE) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_dec_drop_frame_interval_update (event, &stream_id, &self->drop_frame_interval);
+ }
+
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_DEC_SKIP_FRAME_UPDATE) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_dec_skip_frame_update (event, &stream_id, &self->skip_frames);
+ /* Handle skip-frame event */
+ if (self->skip_frames != V4L2_SKIP_FRAMES_TYPE_NONE) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_SKIP_FRAMES,
+ self->skip_frames)) {
+ g_print ("S_EXT_CTRLS for SKIP_FRAMES failed\n");
+ return FALSE;
+ }
+ }
+ }
+
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_DEC_ENABLE_LOW_LATENCY_MODE) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_dec_enable_low_latency_mode (event, &stream_id, &self->cudadec_low_latency);
+ /* TODO: Handle enable of low_latency mode */
+ }
+#endif
+
+ return ret;
+}
+
+#ifdef USE_V4L2_TARGET_NV
+static gboolean
+gst_v4l2_video_dec_src_event (GstVideoDecoder * decoder, GstEvent * event)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
+ gboolean ret;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_SEEK:
+ {
+ GstFormat format;
+ gdouble rate;
+ GstSeekFlags flags;
+ GstSeekType start_type, stop_type;
+ gint64 start, stop;
+
+ gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
+ &stop_type, &stop);
+
+ self->rate = rate;
+ GST_DEBUG_OBJECT (self, "Seek event received with rate %f", rate);
+
+ if (self->v4l2capture->pool) {
+ if (self->cap_buf_dynamic_allocation == CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_RW_PLAYBACK) {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE);
+ } else if (self->cap_buf_dynamic_allocation == CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_RW_PLAYBACK && self->rate < 0) {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE);
+ } else if (self->cap_buf_dynamic_allocation == CAP_BUF_DYNAMIC_ALLOC_ENABLED_FOR_FW_PLAYBACK && self->rate > 0) {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE);
+ } else {
+ gst_v4l2_buffer_pool_enable_dynamic_allocation (GST_V4L2_BUFFER_POOL (self->v4l2capture->pool),
+ FALSE);
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_event (decoder, event);
+
+ return ret;
+}
+#endif
+
+static GstStateChangeReturn
+gst_v4l2_video_dec_change_state (GstElement * element,
+ GstStateChange transition)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (element);
+ GstVideoDecoder *decoder = GST_VIDEO_DECODER (element);
+
+ if (transition == GST_STATE_CHANGE_PAUSED_TO_READY) {
+ g_atomic_int_set (&self->active, FALSE);
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_v4l2_object_unlock (self->v4l2capture);
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (decoder->srcpad);
+ }
+
+ return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+}
+
+static void
+gst_v4l2_video_dec_dispose (GObject * object)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (object);
+
+ gst_caps_replace (&self->probed_sinkcaps, NULL);
+ gst_caps_replace (&self->probed_srccaps, NULL);
+
+ G_OBJECT_CLASS (parent_class)->dispose (object);
+}
+
+static void
+gst_v4l2_video_dec_finalize (GObject * object)
+{
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (object);
+
+#ifdef USE_V4L2_TARGET_NV
+ g_cond_clear (&self->v4l2capture->cplane_stopped_cond);
+ g_mutex_clear (&self->v4l2capture->cplane_stopped_lock);
+#endif
+
+ gst_v4l2_object_destroy (self->v4l2capture);
+ gst_v4l2_object_destroy (self->v4l2output);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+static void
+gst_v4l2_video_dec_init (GstV4l2VideoDec * self)
+{
+ /* V4L2 object are created in subinstance_init */
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == TRUE) {
+ default_num_extra_surfaces = 0; //default for dGPU
+ default_cudadec_low_latency = FALSE;
+ default_sei_extract_data = FALSE;
+ } else if (is_cuvid == FALSE) {
+ default_num_extra_surfaces = 1; //default for Tegra
+ }
+ self->skip_frames = DEFAULT_SKIP_FRAME_TYPE;
+ self->drop_frame_interval = 0;
+ self->decoded_picture_cnt = 0;
+ self->num_extra_surfaces = default_num_extra_surfaces;
+ self->valid_vpx = FALSE;
+
+ self->disable_dpb = DEFAULT_DISABLE_DPB;
+ self->enable_full_frame = DEFAULT_FULL_FRAME;
+ self->enable_frame_type_reporting = DEFAULT_FRAME_TYPR_REPORTING;
+ self->enable_error_check = DEFAULT_ERROR_CHECK;
+ self->enable_max_performance = DEFAULT_MAX_PERFORMANCE;
+ self->cudadec_mem_type = DEFAULT_CUDADEC_MEM_TYPE;
+ self->cudadec_gpu_id = DEFAULT_CUDADEC_GPU_ID;
+ self->cudadec_low_latency = default_cudadec_low_latency;
+ self->idr_received = FALSE;
+ self->rate = 1;
+ self->cap_buf_dynamic_allocation = DEFAULT_CAP_BUF_DYNAMIC_ALLOCATION;
+#endif
+
+ const gchar * latency = g_getenv("NVDS_ENABLE_LATENCY_MEASUREMENT");
+ if(latency)
+ {
+ enable_latency_measurement = TRUE;
+ }
+}
+
+static void
+gst_v4l2_video_dec_subinstance_init (GTypeInstance * instance, gpointer g_class)
+{
+ GstV4l2VideoDecClass *klass = GST_V4L2_VIDEO_DEC_CLASS (g_class);
+ GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (instance);
+ GstVideoDecoder *decoder = GST_VIDEO_DECODER (instance);
+
+ gst_video_decoder_set_packetized (decoder, TRUE);
+
+ self->v4l2output = gst_v4l2_object_new (GST_ELEMENT (self),
+ GST_OBJECT (GST_VIDEO_DECODER_SINK_PAD (self)),
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, klass->default_device,
+ gst_v4l2_get_output, gst_v4l2_set_output, NULL);
+ self->v4l2output->no_initial_format = TRUE;
+ self->v4l2output->keep_aspect = FALSE;
+#ifdef USE_V4L2_TARGET_NV
+ self->v4l2output->open_mjpeg_block = TRUE;
+#endif
+
+ self->v4l2capture = gst_v4l2_object_new (GST_ELEMENT (self),
+ GST_OBJECT (GST_VIDEO_DECODER_SRC_PAD (self)),
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, klass->default_device,
+ gst_v4l2_get_input, gst_v4l2_set_input, NULL);
+ self->v4l2capture->no_initial_format = TRUE;
+ self->v4l2output->keep_aspect = FALSE;
+#ifdef USE_V4L2_TARGET_NV
+ g_mutex_init (&self->v4l2capture->cplane_stopped_lock);
+ g_cond_init (&self->v4l2capture->cplane_stopped_cond);
+#endif
+}
+
+static void
+gst_v4l2_video_dec_class_init (GstV4l2VideoDecClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstVideoDecoderClass *video_decoder_class;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ video_decoder_class = (GstVideoDecoderClass *) klass;
+
+#ifndef USE_V4L2_TARGET_NV
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_video_dec_debug, "v4l2videodec", 0,
+ "V4L2 Video Decoder");
+#else
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_video_dec_debug, "v4l2videodec", 0,
+ "NVIDIA V4L2 Video Decoder");
+#endif
+
+ gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_dispose);
+ gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_finalize);
+
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == FALSE) {
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_set_property_tegra);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_get_property_tegra);
+ } else if (is_cuvid == TRUE) {
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_set_property_cuvid);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_get_property_cuvid);
+ }
+
+ g_object_class_install_property (gobject_class, PROP_SKIP_FRAME,
+ g_param_spec_enum ("skip-frames",
+ "Skip frames",
+ "Type of frames to skip during decoding",
+ GST_TYPE_V4L2_VID_DEC_SKIP_FRAMES,
+ DEFAULT_SKIP_FRAME_TYPE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_PLAYING));
+
+ g_object_class_install_property (gobject_class, PROP_DROP_FRAME_INTERVAL,
+ g_param_spec_uint ("drop-frame-interval",
+ "Drop frames interval",
+ "Interval to drop the frames,ex: value of 5 means every 5th frame will be given by decoder, rest all dropped",
+ 0,
+ 30, 30,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_NUM_EXTRA_SURFACES,
+ g_param_spec_uint ("num-extra-surfaces",
+ "Number of extra surfaces",
+ "Additional number of surfaces in addition to min decode surfaces given by the v4l2 driver",
+ 0,
+ 55, 55,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ if (is_cuvid == FALSE) {
+ g_object_class_install_property (gobject_class, PROP_DISABLE_DPB,
+ g_param_spec_boolean ("disable-dpb",
+ "Disable DPB buffer",
+ "Set to disable DPB buffer for low latency",
+ DEFAULT_DISABLE_DPB, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_USE_FULL_FRAME,
+ g_param_spec_boolean ("enable-full-frame",
+ "Full Frame",
+ "Whether or not the data is full framed",
+ DEFAULT_FULL_FRAME, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class,
+ PROP_ENABLE_FRAME_TYPE_REPORTING,
+ g_param_spec_boolean ("enable-frame-type-reporting",
+ "enable-frame-type-reporting", "Set to enable frame type reporting",
+ DEFAULT_FRAME_TYPR_REPORTING,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_OPEN_MJPEG_BLOCK,
+ g_param_spec_boolean ("mjpeg",
+ "Open MJPEG Block",
+ "Set to open MJPEG block",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_ERROR_CHECK,
+ g_param_spec_boolean ("enable-error-check",
+ "enable-error-check",
+ "Set to enable error check",
+ DEFAULT_ERROR_CHECK, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_ENABLE_MAX_PERFORMANCE,
+ g_param_spec_boolean ("enable-max-performance",
+ "Enable max performance", "Set to enable max performance",
+ DEFAULT_MAX_PERFORMANCE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_CAP_BUF_DYNAMIC_ALLOCATION,
+ g_param_spec_enum ("capture-buffer-dynamic-allocation",
+ "Enable capture buffer dynamic allocation",
+ "Set to enable capture buffer dynamic allocation",
+ GST_TYPE_V4L2_DEC_CAP_BUF_DYNAMIC_ALLOC,
+ DEFAULT_CAP_BUF_DYNAMIC_ALLOCATION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ } else if (is_cuvid == TRUE) {
+ g_object_class_install_property (gobject_class, PROP_CUDADEC_MEM_TYPE,
+ g_param_spec_enum ("cudadec-memtype",
+ "Memory type for cuda decoder buffers",
+ "Set to specify memory type for cuda decoder buffers",
+ GST_TYPE_V4L2_VID_CUDADEC_MEM_TYPE, DEFAULT_CUDADEC_MEM_TYPE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_CUDADEC_GPU_ID,
+ g_param_spec_uint ("gpu-id",
+ "GPU Device ID",
+ "Set to GPU Device ID for decoder ",
+ 0,
+ G_MAXUINT, DEFAULT_CUDADEC_GPU_ID,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_CUDADEC_LOW_LATENCY,
+ g_param_spec_boolean ("low-latency-mode",
+ "CUVID Decode Low Latency Mode",
+ "Set low latency mode for bitstreams having I and IPPP frames",
+ default_cudadec_low_latency,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_EXTRACT_SEI_TYPE5_DATA,
+ g_param_spec_boolean ("extract-sei-type5-data",
+ "extract-sei-type5-data",
+ "Set to extract and attach SEI type5 unregistered data on output buffer",
+ default_sei_extract_data,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_SEI_UUID_STRING,
+ g_param_spec_string ("sei-uuid", "SEI UUID String",
+ "Set 16 bytes UUID string for SEI Parsing, extract-sei-type5-data should be TRUE",
+ NULL,
+ (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+
+ }
+#endif
+
+ video_decoder_class->open = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_open);
+ video_decoder_class->close = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_close);
+ video_decoder_class->start = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_start);
+ video_decoder_class->stop = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_stop);
+ video_decoder_class->finish = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_finish);
+ video_decoder_class->flush = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_flush);
+ video_decoder_class->drain = GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_drain);
+ video_decoder_class->set_format =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_set_format);
+ video_decoder_class->negotiate =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_negotiate);
+ video_decoder_class->decide_allocation =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_decide_allocation);
+ /* FIXME propose_allocation or not ? */
+ video_decoder_class->handle_frame =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_handle_frame);
+ video_decoder_class->getcaps =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_sink_getcaps);
+ video_decoder_class->src_query =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_src_query);
+ video_decoder_class->sink_event =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_sink_event);
+#ifdef USE_V4L2_TARGET_NV
+ video_decoder_class->src_event =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_src_event);
+#endif
+
+ element_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_dec_change_state);
+
+ gst_v4l2_object_install_m2m_properties_helper (gobject_class);
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == FALSE)
+ gst_v4l2_object_install_m2m_dec_iomode_properties_helper (gobject_class);
+#endif
+}
+
+static void
+gst_v4l2_video_dec_subclass_init (gpointer g_class, gpointer data)
+{
+ GstV4l2VideoDecClass *klass = GST_V4L2_VIDEO_DEC_CLASS (g_class);
+ GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
+ GstV4l2VideoDecCData *cdata = data;
+
+ klass->default_device = cdata->device;
+
+#ifndef USE_V4L2_TARGET_NV
+ /* Note: gst_pad_template_new() take the floating ref from the caps */
+ gst_element_class_add_pad_template (element_class,
+ gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
+ cdata->sink_caps));
+ gst_element_class_add_pad_template (element_class,
+ gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
+ cdata->src_caps));
+
+ gst_element_class_set_static_metadata (element_class, cdata->longname,
+ "Codec/Decoder/Video", cdata->description,
+ "Nicolas Dufresne ");
+
+ gst_caps_unref (cdata->sink_caps);
+ gst_caps_unref (cdata->src_caps);
+#else
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_v4l2dec_sink_template));
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_v4l2dec_src_template));
+
+ gst_element_class_set_static_metadata (element_class,
+ "NVIDIA v4l2 video decoder" /*cdata->longname */ ,
+ "Codec/Decoder/Video",
+ "Decode video streams via V4L2 API" /*cdata->description */ ,
+ "Nicolas Dufresne , Viranjan Pagar ");
+#endif
+
+ g_free (cdata);
+}
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_video_dec (GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gboolean ret = FALSE;
+
+ if (gst_caps_is_subset (sink_caps, gst_v4l2_object_get_codec_caps ())
+ && gst_caps_is_subset (src_caps, gst_v4l2_object_get_raw_caps ()))
+ ret = TRUE;
+
+ return ret;
+}
+
+#ifndef USE_V4L2_TARGET_NV
+static gchar *
+gst_v4l2_video_dec_set_metadata (GstStructure * s, GstV4l2VideoDecCData * cdata,
+ const gchar * basename)
+{
+ gchar *codec_name = NULL;
+ gchar *type_name = NULL;
+
+#define SET_META(codec) \
+G_STMT_START { \
+ cdata->longname = "V4L2 " codec " Decoder"; \
+ cdata->description = "Decodes " codec " streams via V4L2 API"; \
+ codec_name = g_ascii_strdown (codec, -1); \
+} G_STMT_END
+
+ if (gst_structure_has_name (s, "image/jpeg")) {
+ SET_META ("JPEG");
+ } else if (gst_structure_has_name (s, "video/mpeg")) {
+ gint mpegversion = 0;
+ gst_structure_get_int (s, "mpegversion", &mpegversion);
+
+ if (mpegversion == 2) {
+ SET_META ("MPEG2");
+ } else {
+ SET_META ("MPEG4");
+ }
+ } else if (gst_structure_has_name (s, "video/x-h263")) {
+ SET_META ("H263");
+ } else if (gst_structure_has_name (s, "video/x-h264")) {
+ SET_META ("H264");
+ } else if (gst_structure_has_name (s, "video/x-wmv")) {
+ SET_META ("VC1");
+ } else if (gst_structure_has_name (s, "video/x-vp8")) {
+ SET_META ("VP8");
+ } else if (gst_structure_has_name (s, "video/x-vp9")) {
+ SET_META ("VP9");
+ } else if (gst_structure_has_name (s, "video/x-bayer")) {
+ SET_META ("BAYER");
+ } else if (gst_structure_has_name (s, "video/x-sonix")) {
+ SET_META ("SONIX");
+ } else if (gst_structure_has_name (s, "video/x-pwc1")) {
+ SET_META ("PWC1");
+ } else if (gst_structure_has_name (s, "video/x-pwc2")) {
+ SET_META ("PWC2");
+ } else {
+ /* This code should be kept on sync with the exposed CODEC type of format
+ * from gstv4l2object.c. This warning will only occure in case we forget
+ * to also add a format here. */
+ gchar *s_str = gst_structure_to_string (s);
+ g_warning ("Missing fixed name mapping for caps '%s', this is a GStreamer "
+ "bug, please report at https://bugs.gnome.org", s_str);
+ g_free (s_str);
+ }
+
+ if (codec_name) {
+ type_name = g_strdup_printf ("v4l2%sdec", codec_name);
+ if (g_type_from_name (type_name) != 0) {
+ g_free (type_name);
+ type_name = g_strdup_printf ("v4l2%s%sdec", basename, codec_name);
+ }
+
+ g_free (codec_name);
+ }
+
+ return type_name;
+#undef SET_META
+}
+
+void
+gst_v4l2_video_dec_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gint i;
+
+ for (i = 0; i < gst_caps_get_size (sink_caps); i++) {
+ GstV4l2VideoDecCData *cdata;
+ GstStructure *s;
+ GTypeQuery type_query;
+ GTypeInfo type_info = { 0, };
+ GType type, subtype;
+ gchar *type_name;
+
+ s = gst_caps_get_structure (sink_caps, i);
+
+ cdata = g_new0 (GstV4l2VideoDecCData, 1);
+ cdata->device = g_strdup (device_path);
+ cdata->sink_caps = gst_caps_new_empty ();
+ gst_caps_append_structure (cdata->sink_caps, gst_structure_copy (s));
+ cdata->src_caps = gst_caps_ref (src_caps);
+ type_name = gst_v4l2_video_dec_set_metadata (s, cdata, basename);
+
+ /* Skip over if we hit an unmapped type */
+ if (!type_name) {
+ g_free (cdata);
+ continue;
+ }
+
+ type = gst_v4l2_video_dec_get_type ();
+ g_type_query (type, &type_query);
+ memset (&type_info, 0, sizeof (type_info));
+ type_info.class_size = type_query.class_size;
+ type_info.instance_size = type_query.instance_size;
+ type_info.class_init = gst_v4l2_video_dec_subclass_init;
+ type_info.class_data = cdata;
+ type_info.instance_init = gst_v4l2_video_dec_subinstance_init;
+
+ subtype = g_type_register_static (type, type_name, &type_info, 0);
+ if (!gst_element_register (plugin, type_name, GST_RANK_PRIMARY + 1,
+ subtype))
+ GST_WARNING ("Failed to register plugin '%s'", type_name);
+
+ g_free (type_name);
+ }
+}
+
+#else
+
+void
+gst_v4l2_video_dec_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ GTypeQuery type_query;
+ GTypeInfo type_info = { 0, };
+ GType type, subtype;
+ GstV4l2VideoDecCData *cdata;
+
+ cdata = g_new0 (GstV4l2VideoDecCData, 1);
+ cdata->device = g_strdup (device_path);
+
+ type = gst_v4l2_video_dec_get_type ();
+ g_type_query (type, &type_query);
+ memset (&type_info, 0, sizeof (type_info));
+ type_info.class_size = type_query.class_size;
+ type_info.instance_size = type_query.instance_size;
+ type_info.class_init = gst_v4l2_video_dec_subclass_init;
+ type_info.class_data = cdata;
+ type_info.instance_init = gst_v4l2_video_dec_subinstance_init;
+
+ subtype = g_type_register_static (type, "nvv4l2decoder", &type_info, 0);
+ gst_element_register (plugin, "nvv4l2decoder", GST_RANK_PRIMARY + 11, subtype);
+}
+#endif
diff --git a/gst-v4l2/gstv4l2videodec.h b/gst-v4l2/gstv4l2videodec.h
new file mode 100644
index 0000000..cd08809
--- /dev/null
+++ b/gst-v4l2/gstv4l2videodec.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_VIDEO_DEC_H__
+#define __GST_V4L2_VIDEO_DEC_H__
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_VIDEO_DEC \
+ (gst_v4l2_video_dec_get_type())
+#define GST_V4L2_VIDEO_DEC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_VIDEO_DEC,GstV4l2VideoDec))
+#define GST_V4L2_VIDEO_DEC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_VIDEO_DEC,GstV4l2VideoDecClass))
+#define GST_IS_V4L2_VIDEO_DEC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_VIDEO_DEC))
+#define GST_IS_V4L2_VIDEO_DEC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_VIDEO_DEC))
+
+/* The structures are renamed as the name conflicts with the
+ * OSS v4l2 library structures. */
+#ifdef USE_V4L2_TARGET_NV
+#define GstV4l2VideoDec GstNvV4l2VideoDec
+#define GstV4l2VideoDecClass GstNvV4l2VideoDecClass
+#define LOOP_COUNT_TO_WAIT_FOR_DQEVENT 6
+#define WAIT_TIME_PER_LOOP_FOR_DQEVENT 100*1000
+
+#define VP8_START_BYTE_0 0x9D
+#define VP8_START_BYTE_1 0x01
+
+#define VP9_START_BYTE_0 0x49
+#define VP9_START_BYTE_1 0x83
+#define VP9_START_BYTE_2 0x42
+#endif
+
+typedef struct _GstV4l2VideoDec GstV4l2VideoDec;
+typedef struct _GstV4l2VideoDecClass GstV4l2VideoDecClass;
+
+struct _GstV4l2VideoDec
+{
+ GstVideoDecoder parent;
+
+ /* < private > */
+ GstV4l2Object *v4l2output;
+ GstV4l2Object *v4l2capture;
+
+ /* pads */
+ GstCaps *probed_srccaps;
+ GstCaps *probed_sinkcaps;
+
+ /* State */
+ GstVideoCodecState *input_state;
+
+ gboolean active;
+ GstFlowReturn output_flow;
+ guint64 frame_num;
+#ifdef USE_V4L2_TARGET_NV
+ GHashTable* hash_pts_systemtime;
+ gdouble buffer_in_time;
+ guint64 decoded_picture_cnt;
+ guint32 skip_frames;
+ gboolean idr_received;
+ guint32 drop_frame_interval;
+ guint32 num_extra_surfaces;
+ gboolean is_drc;
+ gboolean disable_dpb;
+ gboolean enable_full_frame;
+ gboolean enable_frame_type_reporting;
+ gboolean enable_error_check;
+ gboolean enable_max_performance;
+ gboolean set_format;
+ guint32 cudadec_mem_type;
+ guint32 cudadec_gpu_id;
+ guint32 cudadec_num_surfaces;
+ gboolean cudadec_low_latency;
+ gboolean extract_sei_type5_data;
+ gchar *sei_uuid_string;
+ gdouble rate;
+ guint32 cap_buf_dynamic_allocation;
+ guint32 current_width;
+ guint32 current_height;
+ guint32 old_width;
+ guint32 old_height;
+ gboolean valid_vpx;
+#endif
+};
+
+struct _GstV4l2VideoDecClass
+{
+ GstVideoDecoderClass parent_class;
+
+ gchar *default_device;
+};
+
+GType gst_v4l2_video_dec_get_type (void);
+
+gboolean gst_v4l2_is_video_dec (GstCaps * sink_caps, GstCaps * src_caps);
+#ifdef USE_V4L2_TARGET_NV
+gboolean set_v4l2_controls (GstV4l2VideoDec *self);
+#endif
+void gst_v4l2_video_dec_register (GstPlugin * plugin,
+ const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);
+
+G_END_DECLS
+#endif /* __GST_V4L2_VIDEO_DEC_H__ */
diff --git a/gst-v4l2/gstv4l2videoenc.c b/gst-v4l2/gstv4l2videoenc.c
new file mode 100644
index 0000000..232521d
--- /dev/null
+++ b/gst-v4l2/gstv4l2videoenc.c
@@ -0,0 +1,2746 @@
+/*
+ * Copyright (C) 2014-2017 SUMOMO Computer Association
+ * Authors Ayaka
+ * Copyright (C) 2017 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#ifdef USE_V4L2_TARGET_NV
+#include
+#endif
+
+#include "gstv4l2object.h"
+#include "gstv4l2videoenc.h"
+#include "gstnvdsseimeta.h"
+#include "gst-nvcustomevent.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_video_enc_debug);
+#define GST_CAT_DEFAULT gst_v4l2_video_enc_debug
+static gboolean enable_latency_measurement = FALSE;
+
+#ifdef USE_V4L2_TARGET_NV
+#define OUTPUT_CAPS \
+ "video/x-raw(memory:NVMM), " \
+ "width = (gint) [ 1, MAX ], " \
+ "height = (gint) [ 1, MAX ], " \
+ "format = (string) { I420, NV12, P010_10LE, Y444, Y444_10LE, NV24}, " \
+ "framerate = (fraction) [ 0, MAX ];"
+
+static GstStaticCaps sink_template_caps =
+ GST_STATIC_CAPS (OUTPUT_CAPS);
+static GstStaticPadTemplate gst_v4l2enc_sink_template =
+GST_STATIC_PAD_TEMPLATE ("sink",
+ GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS (OUTPUT_CAPS));
+
+#endif
+
+typedef struct
+{
+ gchar *device;
+ GstCaps *sink_caps;
+ GstCaps *src_caps;
+} GstV4l2VideoEncCData;
+
+#ifdef USE_V4L2_TARGET_NV
+GstVideoCodecFrame *
+gst_v4l2_video_enc_find_nearest_frame (GstV4l2VideoEnc *self,
+ GstBuffer * buf, GList * frames);
+gboolean set_v4l2_video_encoder_properties (GstVideoEncoder * encoder);
+gboolean setQpRange (GstV4l2Object * v4l2object, guint label, guint MinQpI,
+ guint MaxQpI, guint MinQpP, guint MaxQpP, guint MinQpB, guint MaxQpB);
+gboolean setHWPresetType (GstV4l2Object * v4l2object, guint label,
+ enum v4l2_enc_hw_preset_type type);
+gint gst_v4l2_trace_file_open (FILE ** file);
+void gst_v4l2_trace_file_close (FILE * file);
+void gst_v4l2_trace_printf (FILE * file, const gchar *fmt, ...);
+
+static gboolean
+gst_v4l2_video_enc_parse_constqp (GstV4l2VideoEnc * self, const gchar * arr);
+static gboolean
+gst_v4l2_video_enc_parse_initqp (GstV4l2VideoEnc * self, const gchar * arr);
+static gboolean
+gst_v4l2_video_enc_parse_quantization_range (GstV4l2VideoEnc * self,
+ const gchar * arr);
+static GType gst_v4l2_videnc_hw_preset_level_get_type (void);
+static GType gst_v4l2_videnc_tuning_info_get_type (void);
+static void gst_v4l2_video_encoder_forceIDR (GstV4l2VideoEnc * self);
+
+static GType gst_v4l2_videnc_ratecontrol_get_type (void);
+enum
+{
+ /* actions */
+ SIGNAL_FORCE_IDR,
+ LAST_SIGNAL
+};
+
+static guint gst_v4l2_signals[LAST_SIGNAL] = { 0 };
+
+#endif
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+#ifdef USE_V4L2_TARGET_NV
+ /* Common properties */
+ PROP_BITRATE,
+ PROP_RATE_CONTROL,
+ PROP_INTRA_FRAME_INTERVAL,
+ /* Properties exposed on dGPU only */
+ PROP_CUDAENC_GPU_ID,
+ PROP_CUDAENC_PRESET_ID,
+ PROP_CUDAENC_CONSTQP,
+ PROP_CUDAENC_INITQP,
+ PROP_CUDAENC_TUNING_INFO_ID,
+ /* Properties exposed on Tegra only */
+ PROP_PEAK_BITRATE,
+ PROP_QUANT_I_FRAMES,
+ PROP_QUANT_P_FRAMES,
+ PROP_QUANT_B_FRAMES,
+ PROP_HW_PRESET_LEVEL,
+ PROP_QUANT_RANGE,
+ PROP_VIRTUAL_BUFFER_SIZE,
+ PROP_MEASURE_LATENCY,
+ PROP_RC_ENABLE,
+ PROP_MAX_PERF,
+ PROP_IDR_FRAME_INTERVAL,
+ PROP_FORCE_INTRA,
+ PROP_COPY_METADATA,
+ PROP_FORCE_IDR
+#endif
+};
+
+#ifdef USE_V4L2_TARGET_NV
+/* Defaults */
+#define GST_V4L2_VIDEO_ENC_BITRATE_DEFAULT (4000000)
+#define GST_V4L2_VIDEO_ENC_PEAK_BITRATE_DEFAULT (0)
+#define DEFAULT_RATE_CONTROL V4L2_MPEG_VIDEO_BITRATE_MODE_CBR
+#define DEFAULT_INTRA_FRAME_INTERVAL 30
+#define DEFAULT_CUDAENC_GPU_ID 0
+#define DEFAULT_CUDAENC_PRESET_ID 1
+#define DEFAULT_CUDAENC_TUNING_INFO_ID 3
+#define DEFAULT_IDR_FRAME_INTERVAL 256
+#define GST_V4L2_VIDEO_ENC_QUANT_I_FRAMES_DEFAULT (0xffffffff)
+#define GST_V4L2_VIDEO_ENC_QUANT_P_FRAMES_DEFAULT (0xffffffff)
+#define GST_V4L2_VIDEO_ENC_QUANT_B_FRAMES_DEFAULT (0xffffffff)
+#define DEFAULT_HW_PRESET_LEVEL V4L2_ENC_HW_PRESET_ULTRAFAST
+#define DEFAULT_TUNING_INFO_PRESET V4L2_ENC_TUNING_INFO_LOW_LATENCY
+
+#define GST_TYPE_V4L2_VID_ENC_HW_PRESET_LEVEL (gst_v4l2_videnc_hw_preset_level_get_type ())
+#define GST_TYPE_V4L2_VID_ENC_TUNING_INFO_PRESET (gst_v4l2_videnc_tuning_info_get_type ())
+#define GST_TYPE_V4L2_VID_ENC_RATECONTROL (gst_v4l2_videnc_ratecontrol_get_type())
+#define DEFAULT_VBV_SIZE 4000000
+#endif
+
+#define gst_v4l2_video_enc_parent_class parent_class
+G_DEFINE_ABSTRACT_TYPE (GstV4l2VideoEnc, gst_v4l2_video_enc,
+ GST_TYPE_VIDEO_ENCODER);
+
+static gdouble get_current_system_timestamp(void)
+{
+ struct timeval t1;
+ double elapsedTime = 0;
+ gettimeofday(&t1, NULL);
+ elapsedTime = (t1.tv_sec) * 1000.0;
+ elapsedTime += (t1.tv_usec) / 1000.0;
+ return elapsedTime;
+}
+
+#ifdef USE_V4L2_TARGET_NV
+GType
+gst_v4l2_enc_output_io_mode_get_type (void)
+{
+ static GType v4l2_enc_output_io_mode = 0;
+
+ if (!v4l2_enc_output_io_mode) {
+ static const GEnumValue enc_output_io_modes[] = {
+ {GST_V4L2_IO_AUTO, "GST_V4L2_IO_AUTO", "auto"},
+ {GST_V4L2_IO_MMAP, "GST_V4L2_IO_MMAP", "mmap"},
+ {GST_V4L2_IO_DMABUF_IMPORT, "GST_V4L2_IO_DMABUF_IMPORT", "dmabuf-import"},
+ {0, NULL, NULL}
+ };
+
+ v4l2_enc_output_io_mode = g_enum_register_static ("GstNvV4l2EncOutputIOMode",
+ enc_output_io_modes);
+ }
+ return v4l2_enc_output_io_mode;
+}
+
+GType
+gst_v4l2_enc_capture_io_mode_get_type (void)
+{
+ static GType v4l2_enc_capture_io_mode = 0;
+
+ if (!v4l2_enc_capture_io_mode) {
+ static const GEnumValue enc_capture_io_modes[] = {
+ {GST_V4L2_IO_AUTO, "GST_V4L2_IO_AUTO", "auto"},
+ {GST_V4L2_IO_MMAP, "GST_V4L2_IO_MMAP", "mmap"},
+ {0, NULL, NULL}
+ };
+
+ v4l2_enc_capture_io_mode = g_enum_register_static ("GstNvV4l2EncCaptureIOMode",
+ enc_capture_io_modes);
+ }
+ return v4l2_enc_capture_io_mode;
+}
+#endif
+
+static void
+gst_v4l2_video_enc_set_property_tegra (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_RATE_CONTROL:
+ self->ratecontrol = g_value_get_enum (value);
+ break;
+
+ case PROP_BITRATE:
+ self->bitrate = g_value_get_uint (value);
+ if (GST_V4L2_IS_OPEN (self->v4l2output)) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_BITRATE, self->bitrate)) {
+ g_print ("S_EXT_CTRLS for BITRATE failed\n");
+ }
+ }
+ break;
+
+ case PROP_INTRA_FRAME_INTERVAL:
+ self->iframeinterval = g_value_get_uint (value);
+ break;
+
+ case PROP_PEAK_BITRATE:
+ self->peak_bitrate = g_value_get_uint (value);
+ break;
+
+ case PROP_QUANT_RANGE:
+ gst_v4l2_video_enc_parse_quantization_range (self,
+ g_value_get_string (value));
+ self->set_qpRange = TRUE;
+ break;
+
+ case PROP_QUANT_I_FRAMES:
+ self->quant_i_frames = g_value_get_uint (value);
+ break;
+
+ case PROP_QUANT_P_FRAMES:
+ self->quant_p_frames = g_value_get_uint (value);
+ break;
+
+ case PROP_QUANT_B_FRAMES:
+ self->quant_b_frames = g_value_get_uint (value);
+ break;
+
+ case PROP_HW_PRESET_LEVEL:
+ self->hw_preset_level = g_value_get_enum (value);
+ break;
+
+ case PROP_VIRTUAL_BUFFER_SIZE:
+ self->virtual_buffer_size = g_value_get_uint (value);
+ break;
+
+ case PROP_MEASURE_LATENCY:
+ self->measure_latency = g_value_get_boolean (value);
+ break;
+
+ case PROP_RC_ENABLE:
+ self->ratecontrol_enable = g_value_get_boolean (value);
+ break;
+
+ case PROP_MAX_PERF:
+ self->maxperf_enable = g_value_get_boolean (value);
+ break;
+
+ case PROP_IDR_FRAME_INTERVAL:
+ self->idrinterval = g_value_get_uint (value);
+ break;
+#endif
+
+ /* By default, only set on output */
+ default:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static void
+gst_v4l2_video_enc_set_property_cuvid (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_RATE_CONTROL:
+ self->ratecontrol = g_value_get_enum (value);
+ break;
+
+ case PROP_BITRATE:
+ self->bitrate = g_value_get_uint (value);
+ if (GST_V4L2_IS_OPEN (self->v4l2output)) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_BITRATE, self->bitrate)) {
+ g_print ("S_EXT_CTRLS for BITRATE failed\n");
+ }
+ }
+ break;
+
+ case PROP_INTRA_FRAME_INTERVAL:
+ self->iframeinterval = g_value_get_uint (value);
+ break;
+
+ case PROP_QUANT_RANGE:
+ gst_v4l2_video_enc_parse_quantization_range (self,
+ g_value_get_string (value));
+ self->set_qpRange = TRUE;
+ break;
+
+ case PROP_CUDAENC_GPU_ID:
+ self->cudaenc_gpu_id = g_value_get_uint (value);
+ break;
+
+ case PROP_CUDAENC_PRESET_ID:
+ self->cudaenc_preset_id = g_value_get_uint (value);
+ break;
+
+ case PROP_CUDAENC_CONSTQP:
+ gst_v4l2_video_enc_parse_constqp (self,
+ g_value_get_string (value));
+ break;
+
+ case PROP_CUDAENC_INITQP:
+ gst_v4l2_video_enc_parse_initqp (self,
+ g_value_get_string (value));
+ break;
+
+ case PROP_CUDAENC_TUNING_INFO_ID:
+ self->cudaenc_tuning_info_id = g_value_get_enum (value);
+ break;
+
+ case PROP_IDR_FRAME_INTERVAL:
+ self->idrinterval = g_value_get_uint (value);
+ break;
+
+ case PROP_FORCE_IDR:
+ self->force_idr = g_value_get_boolean (value);
+ break;
+
+ case PROP_FORCE_INTRA:
+ self->force_intra = g_value_get_boolean (value);
+ break;
+
+ case PROP_COPY_METADATA:
+ self->copy_meta = g_value_get_boolean (value);
+ break;
+#endif
+
+ /* By default, only set on output */
+ default:
+ if (!gst_v4l2_object_set_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static void
+gst_v4l2_video_enc_get_property_tegra (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_RATE_CONTROL:
+ g_value_set_enum (value, self->ratecontrol);
+ break;
+
+ case PROP_BITRATE:
+ g_value_set_uint (value, self->bitrate);
+ break;
+
+ case PROP_INTRA_FRAME_INTERVAL:
+ g_value_set_uint (value, self->iframeinterval);
+ break;
+
+ case PROP_PEAK_BITRATE:
+ g_value_set_uint (value, self->peak_bitrate);
+ break;
+
+ case PROP_QUANT_RANGE:
+ // gst_v4l2_video_enc_get_quantization_range (self, value);
+ break;
+
+ case PROP_QUANT_I_FRAMES:
+ g_value_set_uint (value, self->quant_i_frames);
+ break;
+
+ case PROP_QUANT_P_FRAMES:
+ g_value_set_uint (value, self->quant_p_frames);
+ break;
+
+ case PROP_QUANT_B_FRAMES:
+ g_value_set_uint (value, self->quant_b_frames);
+ break;
+
+ case PROP_HW_PRESET_LEVEL:
+ g_value_set_enum (value, self->hw_preset_level);
+ break;
+
+ case PROP_VIRTUAL_BUFFER_SIZE:
+ g_value_set_uint (value, self->virtual_buffer_size);
+ break;
+
+ case PROP_MEASURE_LATENCY:
+ g_value_set_boolean (value, self->measure_latency);
+ break;
+
+ case PROP_RC_ENABLE:
+ g_value_set_boolean (value, self->ratecontrol_enable);
+ break;
+
+ case PROP_MAX_PERF:
+ g_value_set_boolean (value, self->maxperf_enable);
+ break;
+
+ case PROP_IDR_FRAME_INTERVAL:
+ g_value_set_uint (value, self->idrinterval);
+ break;
+#endif
+
+ /* By default read from output */
+ default:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static void
+gst_v4l2_video_enc_get_property_cuvid (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_CAPTURE_IO_MODE:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2capture,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+
+#ifdef USE_V4L2_TARGET_NV
+ case PROP_RATE_CONTROL:
+ g_value_set_enum (value, self->ratecontrol);
+ break;
+
+ case PROP_BITRATE:
+ g_value_set_uint (value, self->bitrate);
+ break;
+
+ case PROP_INTRA_FRAME_INTERVAL:
+ g_value_set_uint (value, self->iframeinterval);
+ break;
+
+ case PROP_CUDAENC_GPU_ID:
+ g_value_set_uint(value, self->cudaenc_gpu_id);
+ break;
+
+ case PROP_CUDAENC_PRESET_ID:
+ g_value_set_uint(value, self->cudaenc_preset_id);
+ break;
+
+ case PROP_QUANT_RANGE:
+ // gst_v4l2_video_enc_get_quantization_range (self, value);
+ break;
+
+ case PROP_CUDAENC_CONSTQP:
+ break;
+
+ case PROP_CUDAENC_INITQP:
+ break;
+
+ case PROP_CUDAENC_TUNING_INFO_ID:
+ g_value_set_enum(value, self->cudaenc_tuning_info_id);
+ break;
+
+ case PROP_IDR_FRAME_INTERVAL:
+ g_value_set_uint (value, self->idrinterval);
+ break;
+
+ case PROP_FORCE_IDR:
+ g_value_set_boolean (value, self->force_idr);
+ break;
+
+ case PROP_FORCE_INTRA:
+ g_value_set_boolean (value, self->force_intra);
+ break;
+
+ case PROP_COPY_METADATA:
+ g_value_set_boolean (value, self->copy_meta);
+ break;
+#endif
+
+ /* By default read from output */
+ default:
+ if (!gst_v4l2_object_get_property_helper (self->v4l2output,
+ prop_id, value, pspec)) {
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ }
+ break;
+ }
+}
+
+static gboolean
+gst_v4l2_video_enc_open (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstCaps *codec_caps;
+#ifdef USE_V4L2_TARGET_NV
+ const gchar *mimetype;
+ GstStructure *s;
+ GstV4l2VideoEncClass *klass = NULL;
+ if (is_cuvid == TRUE)
+ klass = GST_V4L2_VIDEO_ENC_GET_CLASS (encoder);
+
+#endif
+
+ GST_DEBUG_OBJECT (self, "Opening");
+
+ if (!gst_v4l2_object_open (self->v4l2output))
+ goto failure;
+
+ if (!gst_v4l2_object_open_shared (self->v4l2capture, self->v4l2output))
+ goto failure;
+
+ self->probed_sinkcaps = gst_v4l2_object_probe_caps (self->v4l2output,
+ gst_v4l2_object_get_raw_caps ());
+
+ if (gst_caps_is_empty (self->probed_sinkcaps))
+ goto no_raw_format;
+
+ codec_caps = gst_pad_get_pad_template_caps (encoder->srcpad);
+ self->probed_srccaps = gst_v4l2_object_probe_caps (self->v4l2capture,
+ codec_caps);
+ gst_caps_unref (codec_caps);
+
+ if (gst_caps_is_empty (self->probed_srccaps))
+ goto no_encoded_format;
+
+#ifdef USE_V4L2_TARGET_NV
+ s = gst_caps_get_structure (self->probed_srccaps, 0);
+ mimetype = gst_structure_get_name (s);
+ if (g_str_equal (mimetype, "video/x-h264") && self->slice_output) {
+ gst_structure_remove_field (s, "alignment");
+ gst_structure_set (s, "alignment", G_TYPE_STRING, "nal", NULL);
+ }
+
+ if (self->measure_latency) {
+ if (gst_v4l2_trace_file_open (&self->tracing_file_enc) == 0) {
+ g_print ("%s: open trace file successfully\n", __func__);
+ self->got_frame_pt = g_queue_new ();
+ } else
+ g_print ("%s: failed to open trace file\n", __func__);
+ }
+
+ if (is_cuvid == TRUE) {
+ if (strcmp (klass->codec_name, "H264") == 0
+ || strcmp (klass->codec_name, "H265") == 0){
+ if (!klass->set_encoder_properties (encoder)) {
+ return FALSE;
+ }
+ }
+
+ if (!set_v4l2_video_encoder_properties (encoder)) {
+ return FALSE;
+ }
+ }
+#endif
+ return TRUE;
+
+no_encoded_format:
+ GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
+ (_("Encoder on device %s has no supported output format"),
+ self->v4l2output->videodev), (NULL));
+ goto failure;
+
+
+no_raw_format:
+ GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
+ (_("Encoder on device %s has no supported input format"),
+ self->v4l2output->videodev), (NULL));
+ goto failure;
+
+failure:
+ if (GST_V4L2_IS_OPEN (self->v4l2output))
+ gst_v4l2_object_close (self->v4l2output);
+
+ if (GST_V4L2_IS_OPEN (self->v4l2capture))
+ gst_v4l2_object_close (self->v4l2capture);
+
+ gst_caps_replace (&self->probed_srccaps, NULL);
+ gst_caps_replace (&self->probed_sinkcaps, NULL);
+
+ return FALSE;
+}
+
+static gboolean
+gst_v4l2_video_enc_close (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+
+ GST_DEBUG_OBJECT (self, "Closing");
+
+ gst_v4l2_object_close (self->v4l2output);
+ gst_v4l2_object_close (self->v4l2capture);
+ gst_caps_replace (&self->probed_srccaps, NULL);
+ gst_caps_replace (&self->probed_sinkcaps, NULL);
+
+#ifdef USE_V4L2_TARGET_NV
+ if (self->tracing_file_enc) {
+ gst_v4l2_trace_file_close (self->tracing_file_enc);
+ g_queue_free (self->got_frame_pt);
+ }
+#endif
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_enc_start (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+
+ GST_DEBUG_OBJECT (self, "Starting");
+
+ gst_v4l2_object_unlock (self->v4l2output);
+ g_atomic_int_set (&self->active, TRUE);
+ self->output_flow = GST_FLOW_OK;
+
+ self->hash_pts_systemtime = g_hash_table_new(NULL, NULL);
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_enc_stop (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+
+ GST_DEBUG_OBJECT (self, "Stopping");
+
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_v4l2_object_unlock (self->v4l2capture);
+
+ /* Wait for capture thread to stop */
+ gst_pad_stop_task (encoder->srcpad);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ self->output_flow = GST_FLOW_OK;
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ /* Should have been flushed already */
+ g_assert (g_atomic_int_get (&self->active) == FALSE);
+ g_assert (g_atomic_int_get (&self->processing) == FALSE);
+
+ gst_v4l2_object_stop (self->v4l2output);
+ gst_v4l2_object_stop (self->v4l2capture);
+
+ g_hash_table_destroy (self->hash_pts_systemtime);
+
+ if (self->input_state) {
+ gst_video_codec_state_unref (self->input_state);
+ self->input_state = NULL;
+ }
+
+ GST_DEBUG_OBJECT (self, "Stopped");
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_encoder_cmd (GstV4l2Object * v4l2object, guint cmd, guint flags)
+{
+ struct v4l2_encoder_cmd ecmd = { 0, };
+
+ GST_DEBUG_OBJECT (v4l2object->element,
+ "sending v4l2 encoder command %u with flags %u", cmd, flags);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ ecmd.cmd = cmd;
+ ecmd.flags = flags;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_ENCODER_CMD, &ecmd) < 0)
+ goto ecmd_failed;
+
+ return TRUE;
+
+ecmd_failed:
+ if (errno == ENOTTY) {
+ GST_INFO_OBJECT (v4l2object->element,
+ "Failed to send encoder command %u with flags %u for '%s'. (%s)",
+ cmd, flags, v4l2object->videodev, g_strerror (errno));
+ } else {
+ GST_ERROR_OBJECT (v4l2object->element,
+ "Failed to send encoder command %u with flags %u for '%s'. (%s)",
+ cmd, flags, v4l2object->videodev, g_strerror (errno));
+ }
+ return FALSE;
+}
+
+static GstFlowReturn
+gst_v4l2_video_enc_finish (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ if (gst_pad_get_task_state (encoder->srcpad) != GST_TASK_STARTED)
+ goto done;
+
+ GST_DEBUG_OBJECT (self, "Finishing encoding");
+
+ /* drop the stream lock while draining, so remaining buffers can be
+ * pushed from the src pad task thread */
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+#ifndef USE_V4L2_TARGET_NV
+ if (gst_v4l2_encoder_cmd (self->v4l2capture, V4L2_ENC_CMD_STOP, 0)) {
+#else
+ if (gst_v4l2_encoder_cmd (self->v4l2capture, V4L2_ENC_CMD_STOP,
+ V4L2_DEC_CMD_STOP_TO_BLACK)) {
+#endif
+ GstTask *task = encoder->srcpad->task;
+
+ /* Wait for the task to be drained */
+ GST_OBJECT_LOCK (task);
+ while (GST_TASK_STATE (task) == GST_TASK_STARTED)
+ GST_TASK_WAIT (task);
+ GST_OBJECT_UNLOCK (task);
+ ret = GST_FLOW_FLUSHING;
+ }
+
+ /* and ensure the processing thread has stopped in case another error
+ * occured. */
+ gst_v4l2_object_unlock (self->v4l2capture);
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (encoder->srcpad);
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ if (ret == GST_FLOW_FLUSHING)
+ ret = self->output_flow;
+
+ GST_DEBUG_OBJECT (encoder, "Done draining buffers");
+
+done:
+ return ret;
+}
+
+#ifdef USE_V4L2_TARGET_NV
+gboolean is_drc (GstVideoEncoder *encoder, GstCaps *input_caps)
+{
+ int curr_width, curr_height, new_width, new_height;
+ GstStructure *sink_caps_st, *input_caps_st;
+ GstCaps *sink_caps = gst_caps_make_writable(gst_pad_get_current_caps(encoder->sinkpad));
+ sink_caps_st = gst_caps_get_structure(sink_caps, 0);
+ input_caps_st = gst_caps_get_structure(input_caps, 0);
+
+ gst_structure_get_int(sink_caps_st, "width", &curr_width);
+ gst_structure_get_int(sink_caps_st, "height", &curr_height);
+ gst_structure_get_int(input_caps_st, "width", &new_width);
+ gst_structure_get_int(input_caps_st, "height", &new_height);
+
+ GST_INFO_OBJECT(encoder, "curr resolution: [%dx%d], new resolution: [%dx%d]", curr_width, curr_height, new_width, new_height);
+ if ((curr_width != new_width) || (curr_height != new_height))
+ return TRUE;
+
+ gst_caps_unref(sink_caps);
+ return FALSE;
+}
+
+void set_encoder_src_caps (GstVideoEncoder *encoder, GstCaps *input_caps)
+{
+ GstStructure *src_caps_st, *input_caps_st;
+ const GValue *framerate = NULL;
+ GstCaps *src_caps = gst_caps_make_writable(gst_pad_get_current_caps(encoder->srcpad));
+ src_caps_st = gst_caps_get_structure(src_caps, 0);
+ input_caps_st = gst_caps_get_structure(input_caps, 0);
+ framerate = gst_structure_get_value(input_caps_st, "framerate");
+ if (framerate)
+ gst_structure_set_value(src_caps_st, "framerate", framerate);
+
+ GST_DEBUG_OBJECT(encoder, "enc_src_caps: %s", gst_caps_to_string(src_caps));
+ gst_pad_set_caps(encoder->srcpad, src_caps);
+ gst_caps_unref(src_caps);
+}
+
+gboolean
+reconfigure_fps (GstVideoEncoder *encoder, GstCaps *input_caps, guint label)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstV4l2Object *v4l2object = self->v4l2output;
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ v4l2_ctrl_video_framerate enc_config;
+ gint curr_fps_n = 0, curr_fps_d = 0;
+ gint new_fps_n = 0, new_fps_d = 0;
+ gint ret = 0;
+
+ /*Check if current fps is same as in newly received caps */
+ GstStructure *sink_pad_st, *input_caps_st;
+ GstCaps *sink_caps = gst_pad_get_current_caps(encoder->sinkpad);
+ sink_pad_st = gst_caps_get_structure(sink_caps, 0);
+ input_caps_st = gst_caps_get_structure(input_caps, 0);
+ gst_structure_get_fraction (sink_pad_st, "framerate", &curr_fps_n, &curr_fps_d);
+ gst_structure_get_fraction (input_caps_st, "framerate", &new_fps_n, &new_fps_d);
+ GST_INFO_OBJECT(encoder, "old framerate:[%d/%d], new framerate:[%d/%d]", curr_fps_n, curr_fps_d, new_fps_n, new_fps_d);
+ if ((curr_fps_n != new_fps_n) || (curr_fps_d != new_fps_d)) {
+ enc_config.fps_n = new_fps_n;
+ enc_config.fps_d = new_fps_d;
+ } else {
+ GST_DEBUG_OBJECT(encoder, "No change in framerate");
+ return TRUE;
+ }
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = label;
+ control.string = (gchar *) &enc_config;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ GST_WARNING_OBJECT (encoder, "Error in reconfiguring fps\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif
+
+static gboolean
+gst_v4l2_video_enc_set_format (GstVideoEncoder * encoder,
+ GstVideoCodecState * state)
+{
+ gboolean ret = TRUE;
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstV4l2Error error = GST_V4L2_ERROR_INIT;
+ GstCaps *outcaps;
+ GstVideoCodecState *output;
+#ifdef USE_V4L2_TARGET_NV
+ const gchar *mimetype;
+ GstStructure *s;
+#endif
+
+ GST_DEBUG_OBJECT (self, "Setting format: %" GST_PTR_FORMAT, state->caps);
+
+ if (self->input_state) {
+ if (is_cuvid == FALSE) {
+ if (gst_v4l2_object_caps_equal(self->v4l2output, state->caps)) {
+ GST_DEBUG_OBJECT(self, "Compatible caps");
+ return TRUE;
+ }
+ }
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == TRUE) {
+ if (is_drc (encoder, state->caps)) {
+ /*TODO: Reset encoder to allocate new buffer size at encoder output plane*/
+ } else {
+ GST_DEBUG_OBJECT (self, "Not DRC. Reconfigure encoder with new fps if required");
+ if (!reconfigure_fps(encoder, state->caps, V4L2_CID_MPEG_VIDEOENC_RECONFIG_FPS))
+ GST_WARNING_OBJECT(self, "S_EXT_CTRLS for RECONFIG_FPS failed\n");
+ /* set encoder src caps */
+ set_encoder_src_caps(encoder, state->caps);
+ return TRUE;
+ }
+ }
+#endif
+
+ if (gst_v4l2_video_enc_finish (encoder) != GST_FLOW_OK)
+ return FALSE;
+
+ gst_v4l2_object_stop (self->v4l2output);
+ gst_v4l2_object_stop (self->v4l2capture);
+
+ gst_video_codec_state_unref (self->input_state);
+ self->input_state = NULL;
+ }
+
+ outcaps = gst_pad_get_pad_template_caps (encoder->srcpad);
+ outcaps = gst_caps_make_writable (outcaps);
+
+#ifdef USE_V4L2_TARGET_NV
+ s = gst_caps_get_structure (outcaps, 0);
+ mimetype = gst_structure_get_name (s);
+ if (g_str_equal (mimetype, "video/x-h264")) {
+ gst_structure_remove_field (s, "alignment");
+ if (self->slice_output) {
+ gst_structure_set (s, "alignment", G_TYPE_STRING, "nal", NULL);
+ } else {
+ gst_structure_set (s, "alignment", G_TYPE_STRING, "au", NULL);
+ }
+ }
+#endif
+
+ output = gst_video_encoder_set_output_state (encoder, outcaps, state);
+ gst_video_codec_state_unref (output);
+
+#ifdef USE_V4L2_TARGET_NV
+ outcaps = gst_caps_fixate (outcaps);
+#endif
+ if (!gst_video_encoder_negotiate (encoder))
+ return FALSE;
+
+ if (!gst_v4l2_object_set_format (self->v4l2output, state->caps, &error)) {
+ gst_v4l2_error (self, &error);
+ return FALSE;
+ }
+
+ /* activating a capture pool will also call STREAMON. CODA driver will
+ * refuse to configure the output if the capture is stremaing. */
+ if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool),
+ TRUE)) {
+ GST_WARNING_OBJECT (self, "Could not activate capture buffer pool.");
+ return FALSE;
+ }
+
+ self->input_state = gst_video_codec_state_ref (state);
+
+ GST_DEBUG_OBJECT (self, "output caps: %" GST_PTR_FORMAT, state->caps);
+
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_enc_flush (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+
+ GST_DEBUG_OBJECT (self, "Flushing");
+
+ /* Ensure the processing thread has stopped for the reverse playback
+ * iscount case */
+ if (g_atomic_int_get (&self->processing)) {
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ gst_v4l2_object_unlock_stop (self->v4l2output);
+ gst_v4l2_object_unlock_stop (self->v4l2capture);
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (encoder->srcpad);
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ }
+
+ self->output_flow = GST_FLOW_OK;
+
+ gst_v4l2_object_unlock_stop (self->v4l2output);
+ gst_v4l2_object_unlock_stop (self->v4l2capture);
+
+ return TRUE;
+}
+
+struct ProfileLevelCtx
+{
+ GstV4l2VideoEnc *self;
+ const gchar *profile;
+ const gchar *level;
+};
+
+static gboolean
+get_string_list (GstStructure * s, const gchar * field, GQueue * queue)
+{
+ const GValue *value;
+
+ value = gst_structure_get_value (s, field);
+
+ if (!value)
+ return FALSE;
+
+ if (GST_VALUE_HOLDS_LIST (value)) {
+ guint i;
+
+ if (gst_value_list_get_size (value) == 0)
+ return FALSE;
+
+ for (i = 0; i < gst_value_list_get_size (value); i++) {
+ const GValue *item = gst_value_list_get_value (value, i);
+
+ if (G_VALUE_HOLDS_STRING (item))
+ g_queue_push_tail (queue, g_value_dup_string (item));
+ }
+ } else if (G_VALUE_HOLDS_STRING (value)) {
+ g_queue_push_tail (queue, g_value_dup_string (value));
+ }
+
+ return TRUE;
+}
+
+static gboolean
+negotiate_profile_and_level (GstCapsFeatures * features, GstStructure * s,
+ gpointer user_data)
+{
+ struct ProfileLevelCtx *ctx = user_data;
+ GstV4l2VideoEncClass *klass = GST_V4L2_VIDEO_ENC_GET_CLASS (ctx->self);
+ GstV4l2Object *v4l2object = GST_V4L2_VIDEO_ENC (ctx->self)->v4l2output;
+ GQueue profiles = G_QUEUE_INIT;
+ GQueue levels = G_QUEUE_INIT;
+ gboolean failed = FALSE;
+
+ if (klass->profile_cid && get_string_list (s, "profile", &profiles)) {
+ GList *l;
+
+ for (l = profiles.head; l; l = l->next) {
+ struct v4l2_control control = { 0, };
+ gint v4l2_profile;
+ const gchar *profile = l->data;
+
+ GST_TRACE_OBJECT (ctx->self, "Trying profile %s", profile);
+
+ control.id = klass->profile_cid;
+ control.value = v4l2_profile = klass->profile_from_string (profile);
+
+ if (control.value < 0)
+ continue;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_CTRL, &control) < 0) {
+ GST_WARNING_OBJECT (ctx->self, "Failed to set %s profile: '%s'",
+ klass->codec_name, g_strerror (errno));
+ break;
+ }
+
+ profile = klass->profile_to_string (control.value);
+
+ if (control.value == v4l2_profile) {
+ ctx->profile = profile;
+ break;
+ }
+
+ if (g_list_find_custom (l, profile, g_str_equal)) {
+ ctx->profile = profile;
+ break;
+ }
+ }
+
+ if (profiles.length && !ctx->profile)
+ failed = TRUE;
+
+ g_queue_foreach (&profiles, (GFunc) g_free, NULL);
+ g_queue_clear (&profiles);
+ }
+
+ if (!failed && klass->level_cid && get_string_list (s, "level", &levels)) {
+ GList *l;
+
+ for (l = levels.head; l; l = l->next) {
+ struct v4l2_control control = { 0, };
+ gint v4l2_level;
+ const gchar *level = l->data;
+
+ GST_TRACE_OBJECT (ctx->self, "Trying level %s", level);
+
+ control.id = klass->level_cid;
+ control.value = v4l2_level = klass->level_from_string (level);
+
+ if (control.value < 0)
+ continue;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_CTRL, &control) < 0) {
+ GST_WARNING_OBJECT (ctx->self, "Failed to set %s level: '%s'",
+ klass->codec_name, g_strerror (errno));
+ break;
+ }
+
+ level = klass->level_to_string (control.value);
+
+ if (control.value == v4l2_level) {
+ ctx->level = level;
+ break;
+ }
+
+ if (g_list_find_custom (l, level, g_str_equal)) {
+ ctx->level = level;
+ break;
+ }
+ }
+
+ if (levels.length && !ctx->level)
+ failed = TRUE;
+
+ g_queue_foreach (&levels, (GFunc) g_free, NULL);
+ g_queue_clear (&levels);
+ }
+
+ /* If it failed, we continue */
+ return failed;
+}
+
+static gboolean
+gst_v4l2_video_enc_negotiate (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEncClass *klass = GST_V4L2_VIDEO_ENC_GET_CLASS (encoder);
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+#ifndef USE_V4L2_TARGET_NV
+ GstV4l2Object *v4l2object = self->v4l2output;
+#endif
+ GstCaps *allowed_caps;
+ struct ProfileLevelCtx ctx = { self, NULL, NULL };
+ GstVideoCodecState *state;
+ GstStructure *s;
+
+ GST_DEBUG_OBJECT (self, "Negotiating %s profile and level.",
+ klass->codec_name);
+
+ /* Only renegotiate on upstream changes */
+ if (self->input_state)
+ return TRUE;
+
+ allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_ENCODER_SRC_PAD (encoder));
+
+ if (allowed_caps) {
+
+ if (gst_caps_is_empty (allowed_caps))
+ goto not_negotiated;
+
+ allowed_caps = gst_caps_make_writable (allowed_caps);
+
+ /* negotiate_profile_and_level() will return TRUE on failure to keep
+ * iterating, if gst_caps_foreach() returns TRUE it means there was no
+ * compatible profile and level in any of the structure */
+ if (gst_caps_foreach (allowed_caps, negotiate_profile_and_level, &ctx)) {
+ goto no_profile_level;
+ }
+ }
+
+#ifndef USE_V4L2_TARGET_NV
+ if (klass->profile_cid && !ctx.profile) {
+ struct v4l2_control control = { 0, };
+
+ control.id = klass->profile_cid;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_CTRL, &control) < 0)
+ goto g_ctrl_failed;
+
+ ctx.profile = klass->profile_to_string (control.value);
+ }
+
+ if (klass->level_cid && !ctx.level) {
+ struct v4l2_control control = { 0, };
+
+ control.id = klass->level_cid;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_CTRL, &control) < 0)
+ goto g_ctrl_failed;
+
+ ctx.level = klass->level_to_string (control.value);
+ }
+#endif
+
+ if (allowed_caps)
+ gst_caps_unref (allowed_caps);
+
+ GST_DEBUG_OBJECT (self, "Selected %s profile %s at level %s",
+ klass->codec_name, ctx.profile, ctx.level);
+
+ state = gst_video_encoder_get_output_state (encoder);
+ s = gst_caps_get_structure (state->caps, 0);
+
+ if (klass->profile_cid)
+ gst_structure_set (s, "profile", G_TYPE_STRING, ctx.profile, NULL);
+
+ if (klass->level_cid)
+ gst_structure_set (s, "level", G_TYPE_STRING, ctx.level, NULL);
+
+ gst_video_codec_state_unref (state);
+
+ if (!GST_VIDEO_ENCODER_CLASS (parent_class)->negotiate (encoder))
+ return FALSE;
+
+ return TRUE;
+#ifndef USE_V4L2_TARGET_NV
+g_ctrl_failed:
+ GST_WARNING_OBJECT (self, "Failed to get %s profile and level: '%s'",
+ klass->codec_name, g_strerror (errno));
+ goto not_negotiated;
+#endif
+
+no_profile_level:
+ GST_WARNING_OBJECT (self, "No compatible level and profile in caps: %"
+ GST_PTR_FORMAT, allowed_caps);
+ goto not_negotiated;
+
+not_negotiated:
+ if (allowed_caps)
+ gst_caps_unref (allowed_caps);
+ return FALSE;
+}
+
+#ifdef USE_V4L2_TARGET_NV
+GstVideoCodecFrame *
+gst_v4l2_video_enc_find_nearest_frame (GstV4l2VideoEnc *self,
+ GstBuffer * buf, GList * frames)
+{
+ GstVideoCodecFrame *best = NULL;
+ GstClockTimeDiff best_diff = G_MAXINT64;
+ GstClockTime timestamp;
+ GList *l;
+
+ timestamp = buf->pts;
+
+ for (l = frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+ GstClockTimeDiff diff = ABS (GST_CLOCK_DIFF (timestamp, tmp->pts));
+
+ if (diff < best_diff) {
+ best = tmp;
+ best_diff = diff;
+
+ if (diff == 0)
+ break;
+ }
+ }
+
+ /* For slice output mode, video encoder will ouput multi buffer with
+ * one input buffer. Which will cause frames list haven't any entry.
+ * So the best will be NULL. Video bit stream will be discard in
+ * function gst_v4l2_video_enc_loop() when the best is NULL.
+ * Here reuse previous frame when the best is NULL to handle discard
+ * bit stream issue when slice output mode enabled. */
+ /* Video encoder will output the same PTS for slices. Reuse previous
+ * frame for the same PTS slices */
+ if (self->slice_output && self->best_prev
+ && GST_CLOCK_TIME_IS_VALID (buf->pts)
+ && GST_CLOCK_TIME_IS_VALID (self->buf_pts_prev)
+ && buf->pts == self->buf_pts_prev)
+ best = NULL;
+ self->buf_pts_prev = buf->pts;
+
+ if (best) {
+ gst_video_codec_frame_ref (best);
+ if (self->slice_output) {
+ if (self->best_prev)
+ gst_video_codec_frame_unref (self->best_prev);
+ self->best_prev = gst_video_codec_frame_ref (best);
+ }
+ } else if (self->slice_output) {
+ best = gst_video_codec_frame_ref (self->best_prev);
+ /* Presentation_frame_number == 0 means discontinues. Need avoid it */
+ if (best->presentation_frame_number == 0)
+ best->presentation_frame_number = 1;
+ }
+
+ g_list_foreach (frames, (GFunc) gst_video_codec_frame_unref, NULL);
+ g_list_free (frames);
+
+ return best;
+}
+#else
+static GstVideoCodecFrame *
+gst_v4l2_video_enc_get_oldest_frame (GstVideoEncoder * encoder)
+{
+ GstVideoCodecFrame *frame = NULL;
+ GList *frames, *l;
+ gint count = 0;
+
+ frames = gst_video_encoder_get_frames (encoder);
+
+ for (l = frames; l != NULL; l = l->next) {
+ GstVideoCodecFrame *f = l->data;
+
+ if (!frame || frame->pts > f->pts)
+ frame = f;
+
+ count++;
+ }
+
+ if (frame) {
+ GST_LOG_OBJECT (encoder,
+ "Oldest frame is %d %" GST_TIME_FORMAT
+ " and %d frames left",
+ frame->system_frame_number, GST_TIME_ARGS (frame->pts), count - 1);
+ gst_video_codec_frame_ref (frame);
+ }
+
+ g_list_free_full (frames, (GDestroyNotify) gst_video_codec_frame_unref);
+
+ return frame;
+}
+#endif
+
+static void
+gst_v4l2_video_enc_loop (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstVideoCodecFrame *frame;
+ GstBuffer *buffer = NULL;
+ GstFlowReturn ret;
+#ifdef USE_V4L2_TARGET_NV
+ struct timeval ts;
+ guint64 done_time;
+ guint64 *in_time_pt;
+#endif
+
+ GST_LOG_OBJECT (encoder, "Allocate output buffer");
+
+ buffer = gst_video_encoder_allocate_output_buffer (encoder,
+ self->v4l2capture->info.size);
+
+ if (NULL == buffer) {
+ ret = GST_FLOW_FLUSHING;
+ goto beach;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == TRUE && self->force_idr) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_FORCE_IDR_FRAME, 1)) {
+ g_print ("S_EXT_CTRLS for FORCE_IDR_FRAME failed\n");
+ return;
+ }
+ self->force_idr = FALSE;
+ }
+ if (is_cuvid == TRUE && self->force_intra) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_FORCE_INTRA_FRAME, 1)) {
+ g_print ("S_EXT_CTRLS for FORCE_INTRA_FRAME failed\n");
+ return;
+ }
+ self->force_intra = FALSE;
+ }
+#endif
+
+ /* FIXME Check if buffer isn't the last one here */
+
+ GST_LOG_OBJECT (encoder, "Process output buffer");
+ ret =
+ gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL
+ (self->v4l2capture->pool), &buffer);
+
+ if (ret != GST_FLOW_OK)
+ goto beach;
+
+#ifdef USE_V4L2_TARGET_NV
+ frame = gst_v4l2_video_enc_find_nearest_frame (self, buffer,
+ gst_video_encoder_get_frames (GST_VIDEO_ENCODER (self)));
+#else
+ frame = gst_v4l2_video_enc_get_oldest_frame (encoder);
+#endif
+
+ if (frame) {
+ frame->output_buffer = buffer;
+ buffer = NULL;
+
+ if(enable_latency_measurement) /* TODO with better option */
+ {
+ gpointer in_time = g_hash_table_lookup (self->hash_pts_systemtime,
+ &frame->pts);
+ gdouble input_time = *((gdouble*)in_time);
+ gdouble output_time = get_current_system_timestamp ();
+ if (output_time < input_time)
+ {
+ gdouble time = G_MAXDOUBLE - input_time;
+ g_print ("Encode Latency = %f \n", output_time + time);
+ }
+ else
+ {
+ g_print ("Encode Latency = %f \n", (output_time - input_time));
+ }
+ GstCaps *reference = gst_caps_new_simple ("video/x-raw",
+ "component_name", G_TYPE_STRING, GST_ELEMENT_NAME(self),
+ /*"frame_num", G_TYPE_INT, self->frame_num++,*/
+ "in_timestamp", G_TYPE_DOUBLE, input_time,
+ "out_timestamp", G_TYPE_DOUBLE, output_time,
+ NULL);
+ GstReferenceTimestampMeta * enc_meta =
+ gst_buffer_add_reference_timestamp_meta (frame->output_buffer, reference,
+ 0, 0);
+ if(enc_meta == NULL)
+ {
+ GST_DEBUG_OBJECT (encoder, "enc_meta: %p", enc_meta);
+ }
+ gst_caps_unref(reference);
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+
+ if (self->copy_meta == TRUE)
+ {
+ if (!gst_buffer_copy_into (frame->output_buffer, frame->input_buffer,
+ (GstBufferCopyFlags)GST_BUFFER_COPY_METADATA, 0, -1)) {
+ GST_DEBUG_OBJECT (encoder, "Buffer metadata copy failed \n");
+ }
+ }
+
+ if (self->tracing_file_enc) {
+ gettimeofday (&ts, NULL);
+ done_time = ((gint64) ts.tv_sec * 1000000 + ts.tv_usec) / 1000;
+
+ in_time_pt = g_queue_pop_head (self->got_frame_pt);
+ gst_v4l2_trace_printf (self->tracing_file_enc,
+ "KPI: v4l2: frameNumber= %lld encoder= %lld ms pts= %lld\n",
+ frame->system_frame_number, done_time - *in_time_pt, frame->pts);
+
+ g_free (in_time_pt);
+ }
+#endif
+
+ ret = gst_video_encoder_finish_frame (encoder, frame);
+
+ if (ret != GST_FLOW_OK)
+ goto beach;
+ } else {
+ GST_WARNING_OBJECT (encoder, "Encoder is producing too many buffers");
+ gst_buffer_unref (buffer);
+ }
+
+ return;
+
+beach:
+ GST_DEBUG_OBJECT (encoder, "Leaving output thread");
+
+ gst_buffer_replace (&buffer, NULL);
+ self->output_flow = ret;
+ g_atomic_int_set (&self->processing, FALSE);
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_pad_pause_task (encoder->srcpad);
+}
+
+static void
+gst_v4l2_video_enc_loop_stopped (GstV4l2VideoEnc * self)
+{
+ if (g_atomic_int_get (&self->processing)) {
+ GST_DEBUG_OBJECT (self, "Early stop of encoding thread");
+ self->output_flow = GST_FLOW_FLUSHING;
+ g_atomic_int_set (&self->processing, FALSE);
+ }
+
+ GST_DEBUG_OBJECT (self, "Encoding task destroyed: %s",
+ gst_flow_get_name (self->output_flow));
+
+}
+
+static GstFlowReturn
+gst_v4l2_video_enc_handle_frame (GstVideoEncoder * encoder,
+ GstVideoCodecFrame * frame)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstTaskState task_state;
+#ifdef USE_V4L2_TARGET_NV
+ guint64 *in_time;
+ struct timeval ts;
+#endif
+
+ GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number);
+
+#ifdef USE_V4L2_TARGET_NV
+ if (self->tracing_file_enc) {
+ gettimeofday (&ts, NULL);
+ in_time = g_malloc (sizeof (guint64));
+ *in_time = ((gint64) ts.tv_sec * 1000000 + ts.tv_usec) / 1000;
+ g_queue_push_tail (self->got_frame_pt, in_time);
+ }
+#endif
+
+ if (enable_latency_measurement)
+ {
+ self->buffer_in_time = get_current_system_timestamp ();
+ g_hash_table_insert (self->hash_pts_systemtime, &frame->pts, &self->buffer_in_time);
+ }
+
+ if (G_UNLIKELY (!g_atomic_int_get (&self->active)))
+ goto flushing;
+
+ task_state = gst_pad_get_task_state (GST_VIDEO_DECODER_SRC_PAD (self));
+ if (task_state == GST_TASK_STOPPED || task_state == GST_TASK_PAUSED) {
+ GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
+
+ /* It possible that the processing thread stopped due to an error */
+ if (self->output_flow != GST_FLOW_OK &&
+ self->output_flow != GST_FLOW_FLUSHING &&
+ self->output_flow != GST_V4L2_FLOW_LAST_BUFFER) {
+ GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving");
+ ret = self->output_flow;
+ goto drop;
+ }
+
+ /* Ensure input internal pool is active */
+ if (!gst_buffer_pool_is_active (pool)) {
+ GstStructure *config = gst_buffer_pool_get_config (pool);
+ guint min = MAX (self->v4l2output->min_buffers, GST_V4L2_MIN_BUFFERS);
+
+ gst_buffer_pool_config_set_params (config, self->input_state->caps,
+ self->v4l2output->info.size, min, min);
+
+ /* There is no reason to refuse this config */
+ if (!gst_buffer_pool_set_config (pool, config))
+ goto activate_failed;
+
+ if (!gst_buffer_pool_set_active (pool, TRUE))
+ goto activate_failed;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 1);
+#endif
+
+ GST_DEBUG_OBJECT (self, "Starting encoding thread");
+
+ /* Start the processing task, when it quits, the task will disable input
+ * processing to unlock input if draining, or prevent potential block */
+ if (!gst_pad_start_task (encoder->srcpad,
+ (GstTaskFunction) gst_v4l2_video_enc_loop, self,
+ (GDestroyNotify) gst_v4l2_video_enc_loop_stopped))
+ goto start_task_failed;
+ }
+
+ if (frame->input_buffer) {
+
+ GstVideoSEIMeta *meta =
+ (GstVideoSEIMeta *) gst_buffer_get_meta (frame->input_buffer,
+ GST_VIDEO_SEI_META_API_TYPE);
+ if (!meta)
+ {
+ GST_DEBUG ("NO META RETRIEVED BY ENCODER\n");
+ }
+ else
+ {
+ uint32_t total_metadata_size = meta->sei_metadata_size;
+ GST_DEBUG_OBJECT (self, "total metadata size = %d\n", total_metadata_size);
+ self->v4l2output->sei_payload_size = 0;
+ self->v4l2output->sei_payload = NULL;
+ if (meta->sei_metadata_type == (guint)GST_USER_SEI_META)
+ {
+ self->v4l2output->sei_payload_size = meta->sei_metadata_size;
+ self->v4l2output->sei_payload = (void *) meta->sei_metadata_ptr;
+ }
+ }
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ ret =
+ gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL
+ (self->v4l2output->pool), &frame->input_buffer);
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ if (ret == GST_FLOW_FLUSHING) {
+ if (gst_pad_get_task_state (GST_VIDEO_DECODER_SRC_PAD (self)) !=
+ GST_TASK_STARTED)
+ ret = self->output_flow;
+ goto drop;
+ } else if (ret != GST_FLOW_OK) {
+ goto process_failed;
+ }
+ }
+
+ gst_video_codec_frame_unref (frame);
+ return ret;
+
+ /* ERRORS */
+activate_failed:
+ {
+ GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
+ (_("Failed to allocate required memory.")),
+ ("Buffer pool activation failed"));
+ return GST_FLOW_ERROR;
+
+ }
+flushing:
+ {
+ ret = GST_FLOW_FLUSHING;
+ goto drop;
+ }
+start_task_failed:
+ {
+ GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
+ (_("Failed to start encoding thread.")), (NULL));
+ g_atomic_int_set (&self->processing, FALSE);
+ ret = GST_FLOW_ERROR;
+ goto drop;
+ }
+process_failed:
+ {
+ GST_ELEMENT_ERROR (self, RESOURCE, FAILED,
+ (_("Failed to process frame.")),
+ ("Maybe be due to not enough memory or failing driver"));
+ ret = GST_FLOW_ERROR;
+ goto drop;
+ }
+drop:
+ {
+ gst_video_encoder_finish_frame (encoder, frame);
+ return ret;
+ }
+}
+
+static gboolean
+gst_v4l2_video_enc_decide_allocation (GstVideoEncoder *
+ encoder, GstQuery * query)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstVideoCodecState *state = gst_video_encoder_get_output_state (encoder);
+ GstV4l2Error error = GST_V4L2_ERROR_INIT;
+ GstClockTime latency;
+ gboolean ret = FALSE;
+
+#ifdef USE_V4L2_TARGET_NV
+ if (self->v4l2capture->active) {
+ if (self->v4l2capture->pool) {
+ GST_DEBUG_OBJECT (self->v4l2capture->dbg_obj, "deactivating pool");
+ gst_buffer_pool_set_active (self->v4l2capture->pool, FALSE);
+ }
+ GST_V4L2_SET_INACTIVE (self->v4l2capture);
+ }
+#endif
+
+ /* We need to set the format here, since this is called right after
+ * GstVideoEncoder have set the width, height and framerate into the state
+ * caps. These are needed by the driver to calculate the buffer size and to
+ * implement bitrate adaptation. */
+ if (!gst_v4l2_object_set_format (self->v4l2capture, state->caps, &error)) {
+ gst_v4l2_error (self, &error);
+ ret = FALSE;
+ goto done;
+ }
+
+ if (gst_v4l2_object_decide_allocation (self->v4l2capture, query)) {
+ GstVideoEncoderClass *enc_class = GST_VIDEO_ENCODER_CLASS (parent_class);
+ ret = enc_class->decide_allocation (encoder, query);
+ }
+
+ /* FIXME This may not be entirely correct, as encoder may keep some
+ * observation withouth delaying the encoding. Linux Media API need some
+ * more work to explicitly expressed the decoder / encoder latency. This
+ * value will then become max latency, and the reported driver latency would
+ * become the min latency. */
+ latency = self->v4l2capture->min_buffers * self->v4l2capture->duration;
+ gst_video_encoder_set_latency (encoder, latency, latency);
+
+done:
+ gst_video_codec_state_unref (state);
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_enc_propose_allocation (GstVideoEncoder *
+ encoder, GstQuery * query)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ gboolean ret = FALSE;
+
+ GST_DEBUG_OBJECT (self, "called");
+
+ if (query == NULL)
+ ret = TRUE;
+ else
+ ret = gst_v4l2_object_propose_allocation (self->v4l2output, query);
+
+ if (ret)
+ ret = GST_VIDEO_ENCODER_CLASS (parent_class)->propose_allocation (encoder,
+ query);
+
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_enc_src_query (GstVideoEncoder * encoder, GstQuery * query)
+{
+ gboolean ret = TRUE;
+ switch (GST_QUERY_TYPE (query)) {
+#ifndef USE_V4L2_TARGET_NV
+ case GST_QUERY_CAPS:{
+ GstCaps *filter, *result = NULL;
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstPad *pad = GST_VIDEO_ENCODER_SRC_PAD (encoder);
+
+ gst_query_parse_caps (query, &filter);
+
+ /* FIXME Try and not probe the entire encoder, but only the implement
+ * subclass format */
+ if (self->probed_srccaps) {
+ GstCaps *tmpl = gst_pad_get_pad_template_caps (pad);
+ result = gst_caps_intersect (tmpl, self->probed_srccaps);
+ gst_caps_unref (tmpl);
+ } else
+ result = gst_pad_get_pad_template_caps (pad);
+
+ if (filter) {
+ GstCaps *tmp = result;
+ result =
+ gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (tmp);
+ }
+
+ GST_DEBUG_OBJECT (self, "Returning src caps %" GST_PTR_FORMAT, result);
+
+ gst_query_set_caps_result (query, result);
+ gst_caps_unref (result);
+ break;
+ }
+#endif
+
+ /*
+ * Drop upstream "GST_QUERY_SEEKING" query from h264parse element.
+ * This is a WAR to avoid memory leaks from h264parse element
+ */
+ case GST_QUERY_SEEKING:{
+ return ret;
+ }
+
+ default:
+ ret = GST_VIDEO_ENCODER_CLASS (parent_class)->src_query (encoder, query);
+ break;
+ }
+
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_enc_sink_query (GstVideoEncoder * encoder, GstQuery * query)
+{
+ gboolean ret = TRUE;
+
+ switch (GST_QUERY_TYPE (query)) {
+#ifndef USE_V4L2_TARGET_NV
+ case GST_QUERY_CAPS:{
+ GstCaps *filter, *result = NULL;
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ GstPad *pad = GST_VIDEO_ENCODER_SINK_PAD (encoder);
+
+ gst_query_parse_caps (query, &filter);
+
+ if (self->probed_sinkcaps)
+ result = gst_caps_ref (self->probed_sinkcaps);
+ else
+ result = gst_pad_get_pad_template_caps (pad);
+
+ if (filter) {
+ GstCaps *tmp = result;
+ result =
+ gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (tmp);
+ }
+
+ GST_DEBUG_OBJECT (self, "Returning sink caps %" GST_PTR_FORMAT, result);
+
+ gst_query_set_caps_result (query, result);
+ gst_caps_unref (result);
+ break;
+ }
+#endif
+
+ default:
+ ret = GST_VIDEO_ENCODER_CLASS (parent_class)->sink_query (encoder, query);
+ break;
+ }
+
+ return ret;
+}
+
+static gboolean
+gst_v4l2_video_enc_sink_event (GstVideoEncoder * encoder, GstEvent * event)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (encoder);
+ gboolean ret;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_START:
+ GST_DEBUG_OBJECT (self, "flush start");
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_v4l2_object_unlock (self->v4l2capture);
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_VIDEO_ENCODER_CLASS (parent_class)->sink_event (encoder, event);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_START:
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+ gst_pad_stop_task (encoder->srcpad);
+ GST_DEBUG_OBJECT (self, "flush start done");
+ default:
+ break;
+ }
+
+#ifdef USE_V4L2_TARGET_NV
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_BITRATE_UPDATE) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_enc_bitrate_update (event, &stream_id, &self->bitrate);
+ if (GST_V4L2_IS_OPEN (self->v4l2output)) {
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_BITRATE, self->bitrate)) {
+ g_print ("S_EXT_CTRLS for BITRATE failed\n");
+ }
+ }
+ }
+
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_FORCE_IDR) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_enc_force_idr (event, &stream_id, &self->force_idr);
+ }
+
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_FORCE_INTRA) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_enc_force_intra (event, &stream_id, &self->force_intra);
+ }
+
+ if ((GstNvCustomEventType)GST_EVENT_TYPE (event) == GST_NVEVENT_ENC_IFRAME_INTERVAL_UPDATE) {
+ gchar* stream_id = NULL;
+ gst_nvevent_parse_enc_iframeinterval_update (event, &stream_id, &self->iframeinterval);
+ if (!set_v4l2_video_mpeg_class (self->v4l2output,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, self->iframeinterval)) {
+ g_print ("S_EXT_CTRLS for GOP_SIZE failed\n");
+ return FALSE;
+ }
+ }
+#endif
+
+ return ret;
+}
+
+static GstStateChangeReturn
+gst_v4l2_video_enc_change_state (GstElement * element,
+ GstStateChange transition)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (element);
+
+ if (transition == GST_STATE_CHANGE_PAUSED_TO_READY) {
+ g_atomic_int_set (&self->active, FALSE);
+ gst_v4l2_object_unlock (self->v4l2output);
+ gst_v4l2_object_unlock (self->v4l2capture);
+
+#ifdef USE_V4L2_TARGET_NV
+ set_v4l2_video_mpeg_class (self->v4l2capture,
+ V4L2_CID_MPEG_SET_POLL_INTERRUPT, 0);
+#endif
+
+ }
+
+ return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+}
+
+
+static void
+gst_v4l2_video_enc_dispose (GObject * object)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (object);
+
+ gst_caps_replace (&self->probed_sinkcaps, NULL);
+ gst_caps_replace (&self->probed_srccaps, NULL);
+
+ G_OBJECT_CLASS (parent_class)->dispose (object);
+}
+
+static void
+gst_v4l2_video_enc_finalize (GObject * object)
+{
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (object);
+
+ gst_v4l2_object_destroy (self->v4l2capture);
+ gst_v4l2_object_destroy (self->v4l2output);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+static void
+gst_v4l2_video_enc_init (GstV4l2VideoEnc * self)
+{
+ /* V4L2 object are created in subinstance_init */
+#ifdef USE_V4L2_TARGET_NV
+ self->ratecontrol = DEFAULT_RATE_CONTROL;
+ self->bitrate = GST_V4L2_VIDEO_ENC_BITRATE_DEFAULT;
+ self->peak_bitrate = GST_V4L2_VIDEO_ENC_PEAK_BITRATE_DEFAULT;
+ self->idrinterval = DEFAULT_IDR_FRAME_INTERVAL;
+ self->iframeinterval = DEFAULT_INTRA_FRAME_INTERVAL;
+ self->quant_i_frames = GST_V4L2_VIDEO_ENC_QUANT_I_FRAMES_DEFAULT;
+ self->quant_p_frames = GST_V4L2_VIDEO_ENC_QUANT_P_FRAMES_DEFAULT;
+ self->quant_b_frames = GST_V4L2_VIDEO_ENC_QUANT_B_FRAMES_DEFAULT;
+ self->MinQpI = (guint) - 1;
+ self->MaxQpI = (guint) - 1;
+ self->MinQpP = (guint) - 1;
+ self->MaxQpP = (guint) - 1;
+ self->MinQpB = (guint) - 1;
+ self->MaxQpB = (guint) - 1;
+ self->set_qpRange = FALSE;
+ self->force_idr = FALSE;
+ self->force_intra = FALSE;
+ self->hw_preset_level = DEFAULT_HW_PRESET_LEVEL;
+ self->virtual_buffer_size = DEFAULT_VBV_SIZE;
+ self->ratecontrol_enable = TRUE;
+ self->maxperf_enable = FALSE;
+ self->measure_latency = FALSE;
+ self->slice_output = FALSE;
+ self->best_prev = NULL;
+ self->buf_pts_prev = GST_CLOCK_STIME_NONE;
+ if (is_cuvid == TRUE)
+ {
+ self->cudaenc_gpu_id = DEFAULT_CUDAENC_GPU_ID;
+ self->cudaenc_preset_id = DEFAULT_CUDAENC_PRESET_ID;
+ self->cudaenc_tuning_info_id = DEFAULT_TUNING_INFO_PRESET;
+ }
+
+ const gchar * latency = g_getenv("NVDS_ENABLE_LATENCY_MEASUREMENT");
+ if(latency)
+ {
+ enable_latency_measurement = TRUE;
+ }
+
+#endif
+}
+
+static void
+gst_v4l2_video_enc_subinstance_init (GTypeInstance * instance, gpointer g_class)
+{
+ GstV4l2VideoEncClass *klass = GST_V4L2_VIDEO_ENC_CLASS (g_class);
+ GstV4l2VideoEnc *self = GST_V4L2_VIDEO_ENC (instance);
+
+ self->v4l2output = gst_v4l2_object_new (GST_ELEMENT (self),
+ GST_OBJECT (GST_VIDEO_ENCODER_SINK_PAD (self)),
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, klass->default_device,
+ gst_v4l2_get_output, gst_v4l2_set_output, NULL);
+ self->v4l2output->no_initial_format = TRUE;
+ self->v4l2output->keep_aspect = FALSE;
+
+ self->v4l2capture = gst_v4l2_object_new (GST_ELEMENT (self),
+ GST_OBJECT (GST_VIDEO_ENCODER_SRC_PAD (self)),
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, klass->default_device,
+ gst_v4l2_get_input, gst_v4l2_set_input, NULL);
+ self->v4l2capture->no_initial_format = TRUE;
+ self->v4l2output->keep_aspect = FALSE;
+}
+
+static void
+gst_v4l2_video_enc_class_init (GstV4l2VideoEncClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstVideoEncoderClass *video_encoder_class;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ video_encoder_class = (GstVideoEncoderClass *) klass;
+
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_video_enc_debug, "v4l2videoenc", 0,
+ "V4L2 Video Encoder");
+
+ gobject_class->dispose = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_dispose);
+ gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_finalize);
+ if (is_cuvid == FALSE) {
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_set_property_tegra);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_get_property_tegra);
+ } else if (is_cuvid == TRUE) {
+ gobject_class->set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_set_property_cuvid);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_get_property_cuvid);
+ }
+#ifdef USE_V4L2_TARGET_NV
+ g_object_class_install_property (gobject_class, PROP_RATE_CONTROL,
+ g_param_spec_enum ("control-rate", "ratecontrol",
+ "Set control rate for v4l2 encode",
+ GST_TYPE_V4L2_VID_ENC_RATECONTROL, DEFAULT_RATE_CONTROL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_BITRATE,
+ g_param_spec_uint ("bitrate", "Target Bitrate",
+ "Set bitrate for v4l2 encode",
+ 0, G_MAXUINT, GST_V4L2_VIDEO_ENC_BITRATE_DEFAULT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_INTRA_FRAME_INTERVAL,
+ g_param_spec_uint ("iframeinterval", "Intra Frame interval",
+ "Encoding Intra Frame occurance frequency",
+ 0, G_MAXUINT, DEFAULT_INTRA_FRAME_INTERVAL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_QUANT_RANGE,
+ g_param_spec_string ("qp-range", "qpp-range",
+ "Qunatization range for P, I and B frame,\n"
+ "\t\t\t Use string with unsigned integer values of Qunatization Range \n"
+ "\t\t\t in MinQpP,MaxQpP:MinQpI,MaxQpI:MinQpB,MaxQpB order, to set the property.",
+ "-1,-1:-1,-1:-1,-1",
+ (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+
+ g_object_class_install_property (gobject_class, PROP_IDR_FRAME_INTERVAL,
+ g_param_spec_uint ("idrinterval", "IDR Frame interval",
+ "Encoding IDR Frame occurance frequency",
+ 0, G_MAXUINT, DEFAULT_IDR_FRAME_INTERVAL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ if (is_cuvid == TRUE) {
+ g_object_class_install_property (gobject_class, PROP_CUDAENC_GPU_ID,
+ g_param_spec_uint ("gpu-id",
+ "GPU Device ID",
+ "Set to GPU Device ID for Encoder ",
+ 0,
+ G_MAXUINT, DEFAULT_CUDAENC_GPU_ID,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_CUDAENC_PRESET_ID,
+ g_param_spec_uint ("preset-id",
+ "CUVID Encoder Preset ID",
+ "For each tuning info, seven presets from P1 (highest performance) to P7 (lowest performance) \n"
+ "\t\t\thave been provided to control performance/quality trade off. Using these presets will\n"
+ "\t\t\tautomatically set all relevant encoding parameters for the selected tuning info ",
+ 1,
+ 7, DEFAULT_CUDAENC_PRESET_ID,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_CUDAENC_CONSTQP,
+ g_param_spec_string ("constqp", "CUVID Encoder constqp values",
+ "Set unsigned integer values for constqp, control-rate should be set to GST_V4L2_VIDENC_CONSTANT_QP,\n"
+ "\t\t\tUse string with values of constQP in constQpI:constQpP:constQpB order, to set the property.",
+ "0:0:0",
+ (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+
+ g_object_class_install_property (gobject_class, PROP_CUDAENC_INITQP,
+ g_param_spec_string ("initqp", "CUVID Encoder initqp values",
+ "Set unsigned integer values for initqp,\n"
+ "\t\t\tUse string with values of initQP in IInitQP:PInitQP:BInitQP order, to set the property.\n"
+ "\t\t\tThis provides rough hint to encoder to influence the qp difference between I, P and B",
+ "0:0:0",
+ (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+
+ g_object_class_install_property (gobject_class, PROP_FORCE_IDR,
+ g_param_spec_boolean ("force-idr",
+ "Force an IDR frame",
+ "Force an IDR frame",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_CUDAENC_TUNING_INFO_ID,
+ g_param_spec_enum ("tuning-info-id", "TuningInfoforHWEncoder",
+ "Tuning Info Preset for encoder",
+ GST_TYPE_V4L2_VID_ENC_TUNING_INFO_PRESET,
+ DEFAULT_TUNING_INFO_PRESET,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_FORCE_INTRA,
+ g_param_spec_boolean ("force-intra",
+ "Force an INTRA frame",
+ "Force an INTRA frame",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_COPY_METADATA,
+ g_param_spec_boolean ("copy-meta",
+ "Copies input metadata on output buffer",
+ "Copies input metadata on output buffer",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ } else if (is_cuvid == FALSE) {
+ g_object_class_install_property (gobject_class, PROP_PEAK_BITRATE,
+ g_param_spec_uint ("peak-bitrate", "Peak Bitrate",
+ "Peak bitrate in variable control-rate\n"
+ "\t\t\t The value must be >= bitrate\n"
+ "\t\t\t (1.2*bitrate) is set by default(Default: 0)",
+ 0, G_MAXUINT, GST_V4L2_VIDEO_ENC_PEAK_BITRATE_DEFAULT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_PLAYING));
+
+ g_object_class_install_property (gobject_class, PROP_QUANT_I_FRAMES,
+ g_param_spec_uint ("quant-i-frames", "I-Frame Quantization",
+ "Quantization parameter for I-frames (0xffffffff=component default),\n"
+ "\t\t\t use with ratecontrol-enable = 0\n"
+ "\t\t\t and preset-level = 0",
+ 0, G_MAXUINT, GST_V4L2_VIDEO_ENC_QUANT_I_FRAMES_DEFAULT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_QUANT_P_FRAMES,
+ g_param_spec_uint ("quant-p-frames", "P-Frame Quantization",
+ "Quantization parameter for P-frames (0xffffffff=component default),\n"
+ "\t\t\t use with ratecontrol-enable = 0\n"
+ "\t\t\t and preset-level = 0",
+ 0, G_MAXUINT, GST_V4L2_VIDEO_ENC_QUANT_P_FRAMES_DEFAULT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_QUANT_B_FRAMES,
+ g_param_spec_uint ("quant-b-frames", "B-Frame Quantization",
+ "Quantization parameter for B-frames (0xffffffff=component default),\n"
+ "\t\t\t use with ratecontrol-enable = 0\n"
+ "\t\t\t and preset-level = 0",
+ 0, G_MAXUINT, GST_V4L2_VIDEO_ENC_QUANT_B_FRAMES_DEFAULT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_HW_PRESET_LEVEL,
+ g_param_spec_enum ("preset-level", "HWpresetlevelforencoder",
+ "HW preset level for encoder",
+ GST_TYPE_V4L2_VID_ENC_HW_PRESET_LEVEL,
+ DEFAULT_HW_PRESET_LEVEL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_VIRTUAL_BUFFER_SIZE,
+ g_param_spec_uint ("vbv-size", "vb size attribute",
+ "virtual buffer size ",
+ 0, G_MAXUINT, DEFAULT_VBV_SIZE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_MEASURE_LATENCY,
+ g_param_spec_boolean ("MeasureEncoderLatency",
+ "Enable Measure Encoder Latency",
+ "Enable Measure Encoder latency Per Frame",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_RC_ENABLE,
+ g_param_spec_boolean ("ratecontrol-enable",
+ "Enable or Disable rate control mode",
+ "Enable or Disable rate control mode",
+ TRUE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ g_object_class_install_property (gobject_class, PROP_MAX_PERF,
+ g_param_spec_boolean ("maxperf-enable",
+ "Enable or Disable Max Performance mode",
+ "Enable or Disable Max Performance mode",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+
+ /* Signals */
+ gst_v4l2_signals[SIGNAL_FORCE_IDR] =
+ g_signal_new ("force-IDR",
+ G_TYPE_FROM_CLASS (video_encoder_class),
+ (GSignalFlags) (G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION),
+ G_STRUCT_OFFSET (GstV4l2VideoEncClass, force_IDR),
+ NULL, NULL, g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0);
+
+ klass->force_IDR = gst_v4l2_video_encoder_forceIDR;
+ }
+#endif
+
+ video_encoder_class->open = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_open);
+ video_encoder_class->close = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_close);
+ video_encoder_class->start = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_start);
+ video_encoder_class->stop = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_stop);
+ video_encoder_class->finish = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_finish);
+ video_encoder_class->flush = GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_flush);
+ video_encoder_class->set_format =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_set_format);
+ video_encoder_class->negotiate =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_negotiate);
+ video_encoder_class->decide_allocation =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_decide_allocation);
+ video_encoder_class->propose_allocation =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_propose_allocation);
+ video_encoder_class->sink_query =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_sink_query);
+ video_encoder_class->src_query =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_src_query);
+ video_encoder_class->sink_event =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_sink_event);
+ video_encoder_class->handle_frame =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_handle_frame);
+
+ element_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_v4l2_video_enc_change_state);
+#ifdef USE_V4L2_TARGET_NV
+ klass->set_video_encoder_properties = set_v4l2_video_encoder_properties;
+#endif
+
+ gst_v4l2_object_install_m2m_properties_helper (gobject_class);
+#ifdef USE_V4L2_TARGET_NV
+ if (is_cuvid == FALSE)
+ gst_v4l2_object_install_m2m_enc_iomode_properties_helper (gobject_class);
+#endif
+}
+
+#ifndef USE_V4L2_TARGET_NV
+static void
+gst_v4l2_video_enc_subclass_init (gpointer g_class, gpointer data)
+{
+ GstV4l2VideoEncClass *klass = GST_V4L2_VIDEO_ENC_CLASS (g_class);
+ GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
+ GstV4l2VideoEncCData *cdata = data;
+
+ klass->default_device = cdata->device;
+
+ /* Note: gst_pad_template_new() take the floating ref from the caps */
+ gst_element_class_add_pad_template (element_class,
+ gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
+ cdata->sink_caps));
+ gst_element_class_add_pad_template (element_class,
+ gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
+ cdata->src_caps));
+
+ gst_caps_unref (cdata->sink_caps);
+ gst_caps_unref (cdata->src_caps);
+
+ g_free (cdata);
+}
+#else
+static void
+gst_v4l2_video_enc_subclass_init (gpointer g_class, gpointer data)
+{
+ GstV4l2VideoEncClass *klass = GST_V4L2_VIDEO_ENC_CLASS (g_class);
+ GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
+ GstV4l2VideoEncCData *cdata = data;
+
+ klass->default_device = cdata->device;
+
+ /* Note: gst_pad_template_new() take the floating ref from the caps */
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_v4l2enc_sink_template));
+ gst_element_class_add_pad_template (element_class,
+ gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
+ cdata->src_caps));
+
+ g_free (cdata);
+}
+#endif
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_video_enc (GstCaps * sink_caps, GstCaps * src_caps,
+ GstCaps * codec_caps)
+{
+ gboolean ret = FALSE;
+ gboolean (*check_caps) (const GstCaps *, const GstCaps *);
+
+ if (codec_caps) {
+ check_caps = gst_caps_can_intersect;
+ } else {
+ codec_caps = gst_v4l2_object_get_codec_caps ();
+ check_caps = gst_caps_is_subset;
+ }
+
+ if (gst_caps_is_subset (sink_caps, gst_v4l2_object_get_raw_caps ())
+ && check_caps (src_caps, codec_caps))
+ ret = TRUE;
+
+ return ret;
+}
+
+void
+gst_v4l2_video_enc_register (GstPlugin * plugin, GType type,
+ const char *codec, const gchar * basename, const gchar * device_path,
+ GstCaps * sink_caps, GstCaps * codec_caps, GstCaps * src_caps)
+{
+ GTypeQuery type_query;
+ GTypeInfo type_info = { 0, };
+ GType subtype;
+ gchar *type_name;
+ GstV4l2VideoEncCData *cdata;
+#ifndef USE_V4L2_TARGET_NV
+ GstCaps *filtered_caps;
+
+ filtered_caps = gst_caps_intersect (src_caps, codec_caps);
+#endif
+
+ cdata = g_new0 (GstV4l2VideoEncCData, 1);
+ cdata->device = g_strdup (device_path);
+#ifndef USE_V4L2_TARGET_NV
+ cdata->sink_caps = gst_caps_ref (sink_caps);
+ cdata->src_caps = gst_caps_ref (filtered_caps);
+#else
+ cdata->sink_caps = gst_caps_ref (gst_static_caps_get(&sink_template_caps));
+ cdata->src_caps = gst_caps_ref (codec_caps);
+#endif
+
+ g_type_query (type, &type_query);
+ memset (&type_info, 0, sizeof (type_info));
+ type_info.class_size = type_query.class_size;
+ type_info.instance_size = type_query.instance_size;
+ type_info.class_init = gst_v4l2_video_enc_subclass_init;
+ type_info.class_data = cdata;
+ type_info.instance_init = gst_v4l2_video_enc_subinstance_init;
+
+ /* The first encoder to be registered should use a constant name, like
+ * v4l2h264enc, for any additional encoders, we create unique names. Encoder
+ * names may change between boots, so this should help gain stable names for
+ * the most common use cases. */
+#ifndef USE_V4L2_TARGET_NV
+ type_name = g_strdup_printf ("v4l2%senc", codec);
+
+ if (g_type_from_name (type_name) != 0) {
+ g_free (type_name);
+ type_name = g_strdup_printf ("v4l2%s%senc", basename, codec);
+ }
+#else
+ type_name = g_strdup_printf ("nvv4l2%senc", codec);
+#endif
+
+ subtype = g_type_register_static (type, type_name, &type_info, 0);
+
+ if (!gst_element_register (plugin, type_name, GST_RANK_PRIMARY + 1, subtype))
+ GST_WARNING ("Failed to register plugin '%s'", type_name);
+
+ g_free (type_name);
+}
+
+
+#ifdef USE_V4L2_TARGET_NV
+gint
+gst_v4l2_trace_file_open (FILE ** TracingFile)
+{
+ gchar buf[4096] = { };
+
+ const gchar *homedir = g_getenv ("HOME");
+ if (!homedir)
+ homedir = g_get_home_dir ();
+
+ if (homedir == NULL)
+ return -1;
+
+ snprintf (buf, sizeof (buf) - 1, "%s/gst_v4l2_enc_latency_%d.log",
+ homedir, (gint) getpid ());
+
+ *TracingFile = fopen (buf, "w");
+
+ if (*TracingFile == NULL) {
+ return -1;
+ }
+ return 0;
+}
+
+void
+gst_v4l2_trace_file_close (FILE * TracingFile)
+{
+ if (TracingFile == NULL)
+ return;
+ fclose (TracingFile);
+ TracingFile = NULL;
+}
+
+void
+gst_v4l2_trace_printf (FILE * TracingFile, const gchar *fmt, ...)
+{
+ va_list ap;
+
+ if (TracingFile != NULL) {
+ va_start (ap, fmt);
+ vfprintf (TracingFile, fmt, ap);
+ fprintf (TracingFile, "\n");
+ fflush (TracingFile);
+ va_end (ap);
+ }
+}
+
+static GType
+gst_v4l2_videnc_tuning_info_get_type (void)
+{
+ static GType qtype = 0;
+
+ if (qtype == 0) {
+ static const GEnumValue values[] = {
+ /*{V4L2_ENC_TUNING_INFO_UNDEFINED,
+ "No Tuning Info", "UndefinedTuningInfo"},*/
+ {V4L2_ENC_TUNING_INFO_HIGH_QUALITY,
+ "Tuning Preset for High Quality", "HighQualityPreset"},
+ {V4L2_ENC_TUNING_INFO_LOW_LATENCY,
+ "Tuning Preset for Low Latency", "LowLatencyPreset"},
+ {V4L2_ENC_TUNING_INFO_ULTRA_LOW_LATENCY,
+ "Tuning Preset for Low Latency", "UltraLowLatencyPreset"},
+ {V4L2_ENC_TUNING_INFO_LOSSLESS,
+ "Tuning Preset for Lossless", "LosslessPreset"},
+ {0, NULL, NULL}
+ };
+
+ qtype = g_enum_register_static ("GstV4L2VideoEncTuingInfoPreset", values);
+ }
+ return qtype;
+}
+
+static GType
+gst_v4l2_videnc_hw_preset_level_get_type (void)
+{
+ static GType qtype = 0;
+
+ if (qtype == 0) {
+ static const GEnumValue values[] = {
+ {V4L2_ENC_HW_PRESET_DISABLE, "Disable HW-Preset",
+ "DisablePreset"},
+ {V4L2_ENC_HW_PRESET_ULTRAFAST, "UltraFastPreset for high perf",
+ "UltraFastPreset"},
+ {V4L2_ENC_HW_PRESET_FAST, "FastPreset", "FastPreset"},
+ {V4L2_ENC_HW_PRESET_MEDIUM, "MediumPreset", "MediumPreset"},
+ {V4L2_ENC_HW_PRESET_SLOW, "SlowPreset", "SlowPreset"},
+ {0, NULL, NULL}
+ };
+
+ qtype = g_enum_register_static ("GstV4L2VideoEncHwPreset", values);
+ }
+ return qtype;
+}
+
+static GType
+gst_v4l2_videnc_ratecontrol_get_type (void)
+{
+ static volatile gsize ratecontrol = 0;
+ if (is_cuvid == false) {
+ static const GEnumValue rc_type[] = {
+ {V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, "GST_V4L2_VIDENC_VARIABLE_BITRATE",
+ "variable_bitrate"},
+ {V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, "GST_V4L2_VIDENC_CONSTANT_BITRATE",
+ "constant_bitrate"},
+ {0, NULL, NULL}
+ };
+
+ if (g_once_init_enter (&ratecontrol)) {
+ GType tmp =
+ g_enum_register_static ("GstV4l2VideoEncRateControlType", rc_type);
+ g_once_init_leave (&ratecontrol, tmp);
+ }
+ } else {
+ static const GEnumValue rc_type_cuvid[] = {
+ {V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, "GST_V4L2_VIDENC_VARIABLE_BITRATE",
+ "variable_bitrate"},
+ {V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, "GST_V4L2_VIDENC_CONSTANT_BITRATE",
+ "constant_bitrate"},
+ {V4L2_MPEG_VIDEO_BITRATE_MODE_CONSTQP, "GST_V4L2_VIDENC_CONSTANT_QP",
+ "constantQP"},
+ {0, NULL, NULL}
+ };
+
+ if (g_once_init_enter (&ratecontrol)) {
+ GType tmp =
+ g_enum_register_static ("GstV4l2VideoEncRateControlType", rc_type_cuvid);
+ g_once_init_leave (&ratecontrol, tmp);
+ }
+ }
+ return (GType) ratecontrol;
+}
+
+static gboolean
+gst_v4l2_video_enc_parse_constqp (GstV4l2VideoEnc * self,
+ const gchar * arr)
+{
+ gchar *str;
+ self->constQpI = atoi (arr);
+ str = g_strstr_len (arr, -1, ":") + 1;
+ self->constQpP = atoi (str);
+ str = g_strstr_len (str, -1, ":") + 1;
+ self->constQpB = atoi (str);
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_enc_parse_initqp (GstV4l2VideoEnc * self,
+ const gchar * arr)
+{
+ gchar *str;
+ self->IInitQP = atoi (arr);
+ str = g_strstr_len (arr, -1, ":") + 1;
+ self->PInitQP = atoi (str);
+ str = g_strstr_len (str, -1, ":") + 1;
+ self->BInitQP = atoi (str);
+
+ return TRUE;
+}
+
+static gboolean
+gst_v4l2_video_enc_parse_quantization_range (GstV4l2VideoEnc * self,
+ const gchar * arr)
+{
+ gchar *str;
+ self->MinQpP = atoi (arr);
+ str = g_strstr_len (arr, -1, ",");
+ self->MaxQpP = atoi (str + 1);
+ str = g_strstr_len (str, -1, ":");
+ self->MinQpI = atoi (str + 1);
+ str = g_strstr_len (str, -1, ",");
+ self->MaxQpI = atoi (str + 1);
+ str = g_strstr_len (str, -1, ":");
+ self->MinQpB = atoi (str + 1);
+ str = g_strstr_len (str, -1, ",");
+ self->MaxQpB = atoi (str + 1);
+
+ return TRUE;
+}
+
+gboolean
+setHWPresetType (GstV4l2Object * v4l2object, guint label,
+ enum v4l2_enc_hw_preset_type type)
+{
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = label;
+ control.value = type;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting control rate\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+gboolean
+setQpRange (GstV4l2Object * v4l2object, guint label, guint MinQpI, guint MaxQpI,
+ guint MinQpP, guint MaxQpP, guint MinQpB, guint MaxQpB)
+{
+ v4l2_ctrl_video_qp_range qprange;
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ qprange.MinQpI = MinQpI;
+ qprange.MaxQpI = MaxQpI;
+ qprange.MinQpP = MinQpP;
+ qprange.MaxQpP = MaxQpP;
+ qprange.MinQpB = MinQpB;
+ qprange.MaxQpB = MaxQpB;
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = V4L2_CID_MPEG_VIDEOENC_QP_RANGE;
+ control.string = (gchar *) &qprange;
+
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ g_print ("Error while setting qp range\n");
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static void
+gst_v4l2_video_encoder_forceIDR (GstV4l2VideoEnc * self)
+{
+ GstV4l2Object *v4l2object = self->v4l2output;
+ struct v4l2_ext_control control;
+ struct v4l2_ext_controls ctrls;
+ gint ret;
+
+ memset (&control, 0, sizeof (control));
+ memset (&ctrls, 0, sizeof (ctrls));
+
+ ctrls.count = 1;
+ ctrls.controls = &control;
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+
+ control.id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE;
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ g_print ("V4L2 device is not open\n");
+ ret = v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_EXT_CTRLS, &ctrls);
+ if (ret < 0)
+ g_print ("Error while signalling force IDR\n");
+}
+
+gboolean
+set_v4l2_video_encoder_properties (GstVideoEncoder * encoder)
+{
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (encoder);
+
+ if (!GST_V4L2_IS_OPEN (video_enc->v4l2output)) {
+ g_print ("V4L2 device is not open\n");
+ return FALSE;
+ }
+
+ if (video_enc->ratecontrol == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR
+ && video_enc->peak_bitrate == GST_V4L2_VIDEO_ENC_PEAK_BITRATE_DEFAULT)
+ video_enc->peak_bitrate = 1.2f * video_enc->bitrate;
+ else if (video_enc->ratecontrol == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR
+ && video_enc->peak_bitrate <= video_enc->bitrate)
+ video_enc->peak_bitrate = video_enc->bitrate;
+ else if (video_enc->ratecontrol == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ video_enc->peak_bitrate = video_enc->bitrate;
+
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, video_enc->ratecontrol_enable)) {
+ g_print ("S_EXT_CTRLS for FRAME_RC_ENABLE failed\n");
+ return FALSE;
+ }
+
+ if (video_enc->ratecontrol_enable) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE, video_enc->ratecontrol)) {
+ g_print ("S_EXT_CTRLS for BITRATE_MODE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_BITRATE, video_enc->bitrate)) {
+ g_print ("S_EXT_CTRLS for BITRATE failed\n");
+ return FALSE;
+ }
+
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, video_enc->peak_bitrate)) {
+ g_print ("S_EXT_CTRLS for PEAK_BITRATE failed\n");
+ return FALSE;
+ }
+
+ if (video_enc->idrinterval) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_IDR_INTERVAL, video_enc->idrinterval)) {
+ g_print ("S_EXT_CTRLS for IDR_INTERVAL failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->iframeinterval) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, video_enc->iframeinterval)) {
+ g_print ("S_EXT_CTRLS for GOP_SIZE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->hw_preset_level) {
+ if (!setHWPresetType (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_HW_PRESET_TYPE_PARAM,
+ video_enc->hw_preset_level)) {
+ g_print ("S_EXT_CTRLS for HW_PRESET_TYPE_PARAM failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->set_qpRange) {
+ if (!setQpRange (video_enc->v4l2output, V4L2_CID_MPEG_VIDEOENC_QP_RANGE,
+ video_enc->MinQpI, video_enc->MaxQpI, video_enc->MinQpP,
+ video_enc->MaxQpP, video_enc->MinQpB, video_enc->MaxQpB)) {
+ g_print ("S_EXT_CTRLS for QP_RANGE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->quant_i_frames != 0xffffffff && !video_enc->ratecontrol_enable) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, video_enc->quant_i_frames)) {
+ g_print ("S_EXT_CTRLS for H264_I_FRAME_QP failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->quant_p_frames != 0xffffffff && !video_enc->ratecontrol_enable) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, video_enc->quant_p_frames)) {
+ g_print ("S_EXT_CTRLS for H264_P_FRAME_QP failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->quant_b_frames != 0xffffffff && !video_enc->ratecontrol_enable) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, video_enc->quant_b_frames)) {
+ g_print ("S_EXT_CTRLS for H264_B_FRAME_QP failed\n");
+ return FALSE;
+ }
+ }
+
+ if (video_enc->maxperf_enable) {
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEO_MAX_PERFORMANCE, video_enc->maxperf_enable)) {
+ g_print ("S_EXT_CTRLS for MAX_PERFORMANCE failed\n");
+ return FALSE;
+ }
+ }
+
+ if (!set_v4l2_video_mpeg_class (video_enc->v4l2output,
+ V4L2_CID_MPEG_VIDEOENC_VIRTUALBUFFER_SIZE,
+ video_enc->virtual_buffer_size)) {
+ g_print ("S_EXT_CTRLS for VIRTUALBUFFER_SIZE failed\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif
diff --git a/gst-v4l2/gstv4l2videoenc.h b/gst-v4l2/gstv4l2videoenc.h
new file mode 100644
index 0000000..c0e88bc
--- /dev/null
+++ b/gst-v4l2/gstv4l2videoenc.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014 SUMOMO Computer Association.
+ * Author: ayaka
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_VIDEO_ENC_H__
+#define __GST_V4L2_VIDEO_ENC_H__
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_VIDEO_ENC \
+ (gst_v4l2_video_enc_get_type())
+#define GST_V4L2_VIDEO_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_VIDEO_ENC,GstV4l2VideoEnc))
+#define GST_V4L2_VIDEO_ENC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_VIDEO_ENC,GstV4l2VideoEncClass))
+#define GST_IS_V4L2_VIDEO_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_VIDEO_ENC))
+#define GST_IS_V4L2_VIDEO_ENC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_VIDEO_ENC))
+#define GST_V4L2_VIDEO_ENC_GET_CLASS(obj) \
+ (G_TYPE_INSTANCE_GET_CLASS ((obj), GST_TYPE_V4L2_VIDEO_ENC, GstV4l2VideoEncClass))
+
+typedef struct _GstV4l2VideoEnc GstV4l2VideoEnc;
+typedef struct _GstV4l2VideoEncClass GstV4l2VideoEncClass;
+
+struct _GstV4l2VideoEnc
+{
+ GstVideoEncoder parent;
+#ifdef USE_V4L2_TARGET_NV
+ guint32 ratecontrol;
+ guint32 bitrate;
+ guint32 peak_bitrate;
+ guint32 idrinterval;
+ guint32 iframeinterval;
+ guint32 quant_i_frames;
+ guint32 quant_p_frames;
+ guint32 quant_b_frames;
+ guint32 MinQpI;
+ guint32 MaxQpI;
+ guint32 MinQpP;
+ guint32 MaxQpP;
+ guint32 MinQpB;
+ guint32 MaxQpB;
+ guint32 constQpI;
+ guint32 constQpP;
+ guint32 constQpB;
+ guint32 IInitQP;
+ guint32 PInitQP;
+ guint32 BInitQP;
+ gboolean set_qpRange;
+ guint32 hw_preset_level;
+ guint virtual_buffer_size;
+ gboolean measure_latency;
+ gboolean ratecontrol_enable;
+ gboolean force_idr;
+ gboolean force_intra;
+ gboolean maxperf_enable;
+ FILE *tracing_file_enc;
+ GQueue *got_frame_pt;
+ guint32 cudaenc_gpu_id;
+ guint32 cudaenc_preset_id;
+ guint32 cudaenc_tuning_info_id;
+ gboolean slice_output;
+ GstVideoCodecFrame *best_prev;
+ GstClockTime buf_pts_prev;
+ gdouble buffer_in_time;
+ GHashTable* hash_pts_systemtime;
+ gboolean copy_meta;
+#endif
+
+ /* < private > */
+ GstV4l2Object *v4l2output;
+ GstV4l2Object *v4l2capture;
+
+ /* pads */
+ GstCaps *probed_srccaps;
+ GstCaps *probed_sinkcaps;
+
+ /* State */
+ GstVideoCodecState *input_state;
+ gboolean active;
+ gboolean processing;
+ GstFlowReturn output_flow;
+
+};
+
+struct _GstV4l2VideoEncClass
+{
+ GstVideoEncoderClass parent_class;
+
+ gchar *default_device;
+ const char *codec_name;
+
+ guint32 profile_cid;
+ const gchar *(*profile_to_string) (gint v4l2_profile);
+ gint (*profile_from_string) (const gchar * profile);
+
+#ifdef USE_V4L2_TARGET_NV
+ gboolean (*set_encoder_properties) (GstVideoEncoder * encoder);
+ gboolean (*set_video_encoder_properties) (GstVideoEncoder * encoder);
+#endif
+ guint32 level_cid;
+ const gchar *(*level_to_string) (gint v4l2_level);
+ gint (*level_from_string) (const gchar * level);
+
+#ifdef USE_V4L2_TARGET_NV
+ void (*force_IDR) (GstV4l2VideoEnc *);
+#endif
+};
+
+GType gst_v4l2_video_enc_get_type (void);
+
+
+gboolean gst_v4l2_is_video_enc (GstCaps * sink_caps, GstCaps * src_caps,
+ GstCaps * codec_caps);
+
+void gst_v4l2_video_enc_register (GstPlugin * plugin, GType type,
+ const char *codec, const gchar * basename, const gchar * device_path,
+ GstCaps * sink_caps, GstCaps * codec_caps, GstCaps * src_caps);
+
+#ifdef USE_V4L2_TARGET_NV
+void set_encoder_src_caps (GstVideoEncoder *encoder, GstCaps *input_caps);
+gboolean is_drc (GstVideoEncoder *encoder, GstCaps *input_caps);
+gboolean reconfigure_fps (GstVideoEncoder *encoder, GstCaps *input_caps, guint label);
+#endif
+
+G_END_DECLS
+#endif /* __GST_V4L2_VIDEO_ENC_H__ */
diff --git a/gst-v4l2/gstv4l2vp8enc.c b/gst-v4l2/gstv4l2vp8enc.c
new file mode 100644
index 0000000..f4186c0
--- /dev/null
+++ b/gst-v4l2/gstv4l2vp8enc.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2017 Collabora Inc.
+ * Author: Nicolas Dufresne
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+
+#include "gstv4l2object.h"
+#include "gstv4l2vp8enc.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_vp8_enc_debug);
+#define GST_CAT_DEFAULT gst_v4l2_vp8_enc_debug
+
+static GstStaticCaps src_template_caps =
+GST_STATIC_CAPS ("video/x-vp8, profile=(string) { 0, 1, 2, 3 }");
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+#ifdef USE_V4L2_TARGET_NV
+ PROP_ENABLE_HEADER,
+#endif
+ /* TODO */
+};
+
+#define gst_v4l2_vp8_enc_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2Vp8Enc, gst_v4l2_vp8_enc, GST_TYPE_V4L2_VIDEO_ENC);
+
+static void
+gst_v4l2_vp8_enc_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ /* TODO */
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2Vp8Enc *self = GST_V4L2_VP8_ENC (object);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_ENABLE_HEADER:
+ self->EnableHeaders = g_value_get_boolean (value);
+ video_enc->v4l2capture->Enable_headers = g_value_get_boolean (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+#endif
+}
+
+static void
+gst_v4l2_vp8_enc_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ /* TODO */
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2Vp8Enc *self = GST_V4L2_VP8_ENC (object);
+
+ switch (prop_id) {
+ case PROP_ENABLE_HEADER:
+ g_value_set_boolean (value, self->EnableHeaders);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+#endif
+}
+
+static gint
+v4l2_profile_from_string (const gchar * profile)
+{
+ gint v4l2_profile = -1;
+
+ if (g_str_equal (profile, "0"))
+ v4l2_profile = 0;
+ else if (g_str_equal (profile, "1"))
+ v4l2_profile = 1;
+ else if (g_str_equal (profile, "2"))
+ v4l2_profile = 2;
+ else if (g_str_equal (profile, "3"))
+ v4l2_profile = 3;
+ else
+ GST_WARNING ("Unsupported profile string '%s'", profile);
+
+ return v4l2_profile;
+}
+
+static const gchar *
+v4l2_profile_to_string (gint v4l2_profile)
+{
+ switch (v4l2_profile) {
+ case 0:
+ return "0";
+ case 1:
+ return "1";
+ case 2:
+ return "2";
+ case 3:
+ return "3";
+ default:
+ GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
+ break;
+ }
+
+ return NULL;
+}
+
+static void
+gst_v4l2_vp8_enc_init (GstV4l2Vp8Enc * self)
+{
+}
+
+static void
+gst_v4l2_vp8_enc_class_init (GstV4l2Vp8EncClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstV4l2VideoEncClass *baseclass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ baseclass = (GstV4l2VideoEncClass *) (klass);
+
+
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_vp8_enc_debug, "v4l2vp8enc", 0,
+ "V4L2 VP8 Encoder");
+
+ gst_element_class_set_static_metadata (element_class,
+ "V4L2 VP8 Encoder",
+ "Codec/Encoder/Video",
+ "Encode VP8 video streams via V4L2 API",
+ "Nicolas Dufresne set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_vp8_enc_set_property);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_vp8_enc_get_property);
+
+#ifdef USE_V4L2_TARGET_NV
+ g_object_class_install_property (gobject_class, PROP_ENABLE_HEADER,
+ g_param_spec_boolean ("enable-headers",
+ "Enable VP8 headers",
+ "Enable VP8 file and frame headers, if enabled, dump elementary stream",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+#endif
+ baseclass->codec_name = "VP8";
+ baseclass->profile_cid = V4L2_CID_MPEG_VIDEO_VPX_PROFILE;
+ baseclass->profile_to_string = v4l2_profile_to_string;
+ baseclass->profile_from_string = v4l2_profile_from_string;
+}
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_vp8_enc (GstCaps * sink_caps, GstCaps * src_caps)
+{
+ return gst_v4l2_is_video_enc (sink_caps, src_caps,
+ gst_static_caps_get (&src_template_caps));
+}
+
+void
+gst_v4l2_vp8_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gst_v4l2_video_enc_register (plugin, GST_TYPE_V4L2_VP8_ENC,
+ "vp8", basename, device_path, sink_caps,
+ gst_static_caps_get (&src_template_caps), src_caps);
+}
diff --git a/gst-v4l2/gstv4l2vp8enc.h b/gst-v4l2/gstv4l2vp8enc.h
new file mode 100644
index 0000000..e33c878
--- /dev/null
+++ b/gst-v4l2/gstv4l2vp8enc.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2017 Collabora Inc.
+ * Author: Nicolas Dufresne
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_VP8_ENC_H__
+#define __GST_V4L2_VP8_ENC_H__
+
+#include
+#include "gstv4l2videoenc.h"
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_VP8_ENC \
+ (gst_v4l2_vp8_enc_get_type())
+#define GST_V4L2_VP8_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_VP8_ENC,GstV4l2Vp8Enc))
+#define GST_V4L2_VP8_ENC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_VP8_ENC,GstV4l2Vp8EncClass))
+#define GST_IS_V4L2_VP8_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_VP8_ENC))
+#define GST_IS_V4L2_VP8_ENC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_VP8_ENC))
+typedef struct _GstV4l2Vp8Enc GstV4l2Vp8Enc;
+typedef struct _GstV4l2Vp8EncClass GstV4l2Vp8EncClass;
+
+struct _GstV4l2Vp8Enc
+{
+ GstV4l2VideoEnc parent;
+#ifdef USE_V4L2_TARGET_NV
+ gboolean EnableHeaders;
+#endif
+};
+
+struct _GstV4l2Vp8EncClass
+{
+ GstV4l2VideoEncClass parent_class;
+};
+
+GType gst_v4l2_vp8_enc_get_type (void);
+
+gboolean gst_v4l2_is_vp8_enc (GstCaps * sink_caps, GstCaps * src_caps);
+
+void gst_v4l2_vp8_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);
+
+G_END_DECLS
+#endif /* __GST_V4L2_VP8_ENC_H__ */
diff --git a/gst-v4l2/gstv4l2vp9enc.c b/gst-v4l2/gstv4l2vp9enc.c
new file mode 100644
index 0000000..5ec8b8c
--- /dev/null
+++ b/gst-v4l2/gstv4l2vp9enc.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2017 Collabora Inc.
+ * Author: Nicolas Dufresne
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+
+#include "gstv4l2object.h"
+#include "gstv4l2vp9enc.h"
+
+#include
+#include
+
+GST_DEBUG_CATEGORY_STATIC (gst_v4l2_vp9_enc_debug);
+#define GST_CAT_DEFAULT gst_v4l2_vp9_enc_debug
+
+static GstStaticCaps src_template_caps =
+GST_STATIC_CAPS ("video/x-vp9, profile=(string) { 0, 1, 2, 3 }");
+
+enum
+{
+ PROP_0,
+ V4L2_STD_OBJECT_PROPS,
+#ifdef USE_V4L2_TARGET_NV
+ PROP_ENABLE_HEADER,
+#endif
+ /* TODO */
+};
+
+#define gst_v4l2_vp9_enc_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2Vp9Enc, gst_v4l2_vp9_enc, GST_TYPE_V4L2_VIDEO_ENC);
+
+static void
+gst_v4l2_vp9_enc_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ /* TODO */
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2Vp9Enc *self = GST_V4L2_VP9_ENC (object);
+ GstV4l2VideoEnc *video_enc = GST_V4L2_VIDEO_ENC (object);
+
+ switch (prop_id) {
+ case PROP_ENABLE_HEADER:
+ self->EnableHeaders = g_value_get_boolean (value);
+ video_enc->v4l2capture->Enable_headers = g_value_get_boolean (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+#endif
+}
+
+static void
+gst_v4l2_vp9_enc_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ /* TODO */
+#ifdef USE_V4L2_TARGET_NV
+ GstV4l2Vp9Enc *self = GST_V4L2_VP9_ENC (object);
+
+ switch (prop_id) {
+ case PROP_ENABLE_HEADER:
+ g_value_set_boolean (value, self->EnableHeaders);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+#endif
+}
+
+static gint
+v4l2_profile_from_string (const gchar * profile)
+{
+ gint v4l2_profile = -1;
+
+ if (g_str_equal (profile, "0"))
+ v4l2_profile = 0;
+ else if (g_str_equal (profile, "1"))
+ v4l2_profile = 1;
+ else if (g_str_equal (profile, "2"))
+ v4l2_profile = 2;
+ else if (g_str_equal (profile, "3"))
+ v4l2_profile = 3;
+ else
+ GST_WARNING ("Unsupported profile string '%s'", profile);
+
+ return v4l2_profile;
+}
+
+static const gchar *
+v4l2_profile_to_string (gint v4l2_profile)
+{
+ switch (v4l2_profile) {
+ case 0:
+ return "0";
+ case 1:
+ return "1";
+ case 2:
+ return "2";
+ case 3:
+ return "3";
+ default:
+ GST_WARNING ("Unsupported V4L2 profile %i", v4l2_profile);
+ break;
+ }
+
+ return NULL;
+}
+
+static void
+gst_v4l2_vp9_enc_init (GstV4l2Vp9Enc * self)
+{
+}
+
+static void
+gst_v4l2_vp9_enc_class_init (GstV4l2Vp9EncClass * klass)
+{
+ GstElementClass *element_class;
+ GObjectClass *gobject_class;
+ GstV4l2VideoEncClass *baseclass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ element_class = (GstElementClass *) klass;
+ gobject_class = (GObjectClass *) klass;
+ baseclass = (GstV4l2VideoEncClass *) (klass);
+
+ GST_DEBUG_CATEGORY_INIT (gst_v4l2_vp9_enc_debug, "v4l2vp9enc", 0,
+ "V4L2 VP9 Encoder");
+
+ gst_element_class_set_static_metadata (element_class,
+ "V4L2 VP9 Encoder",
+ "Codec/Encoder/Video",
+ "Encode VP9 video streams via V4L2 API",
+ "Nicolas Dufresne set_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_vp9_enc_set_property);
+ gobject_class->get_property =
+ GST_DEBUG_FUNCPTR (gst_v4l2_vp9_enc_get_property);
+
+#ifdef USE_V4L2_TARGET_NV
+ g_object_class_install_property (gobject_class, PROP_ENABLE_HEADER,
+ g_param_spec_boolean ("enable-headers",
+ "Enable VP9 headers",
+ "Enable VP9 file and frame headers, if enabled, dump elementary stream",
+ FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ GST_PARAM_MUTABLE_READY));
+#endif
+ baseclass->codec_name = "VP9";
+ baseclass->profile_cid = V4L2_CID_MPEG_VIDEO_VPX_PROFILE;
+ baseclass->profile_to_string = v4l2_profile_to_string;
+ baseclass->profile_from_string = v4l2_profile_from_string;
+}
+
+/* Probing functions */
+gboolean
+gst_v4l2_is_vp9_enc (GstCaps * sink_caps, GstCaps * src_caps)
+{
+ return gst_v4l2_is_video_enc (sink_caps, src_caps,
+ gst_static_caps_get (&src_template_caps));
+}
+
+void
+gst_v4l2_vp9_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps)
+{
+ gst_v4l2_video_enc_register (plugin, GST_TYPE_V4L2_VP9_ENC,
+ "vp9", basename, device_path, sink_caps,
+ gst_static_caps_get (&src_template_caps), src_caps);
+}
diff --git a/gst-v4l2/gstv4l2vp9enc.h b/gst-v4l2/gstv4l2vp9enc.h
new file mode 100644
index 0000000..5af3ac1
--- /dev/null
+++ b/gst-v4l2/gstv4l2vp9enc.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2017 Collabora Inc.
+ * Author: Nicolas Dufresne
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __GST_V4L2_VP9_ENC_H__
+#define __GST_V4L2_VP9_ENC_H__
+
+#include
+#include "gstv4l2videoenc.h"
+
+G_BEGIN_DECLS
+#define GST_TYPE_V4L2_VP9_ENC \
+ (gst_v4l2_vp9_enc_get_type())
+#define GST_V4L2_VP9_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_V4L2_VP9_ENC,GstV4l2Vp9Enc))
+#define GST_V4L2_VP9_ENC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_V4L2_VP9_ENC,GstV4l2Vp9EncClass))
+#define GST_IS_V4L2_VP9_ENC(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_V4L2_VP9_ENC))
+#define GST_IS_V4L2_VP9_ENC_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_V4L2_VP9_ENC))
+typedef struct _GstV4l2Vp9Enc GstV4l2Vp9Enc;
+typedef struct _GstV4l2Vp9EncClass GstV4l2Vp9EncClass;
+
+struct _GstV4l2Vp9Enc
+{
+ GstV4l2VideoEnc parent;
+#ifdef USE_V4L2_TARGET_NV
+ gboolean EnableHeaders;
+#endif
+};
+
+struct _GstV4l2Vp9EncClass
+{
+ GstV4l2VideoEncClass parent_class;
+};
+
+GType gst_v4l2_vp9_enc_get_type (void);
+
+gboolean gst_v4l2_is_vp9_enc (GstCaps * sink_caps, GstCaps * src_caps);
+
+void gst_v4l2_vp9_enc_register (GstPlugin * plugin, const gchar * basename,
+ const gchar * device_path, GstCaps * sink_caps, GstCaps * src_caps);
+
+G_END_DECLS
+#endif /* __GST_V4L2_VP9_ENC_H__ */
diff --git a/gst-v4l2/nalutils.c b/gst-v4l2/nalutils.c
new file mode 100644
index 0000000..b63c63e
--- /dev/null
+++ b/gst-v4l2/nalutils.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "nalutils.h"
+
+/* Compute Ceil(Log2(v)) */
+/* Derived from branchless code for integer log2(v) from:
+ */
+guint
+ceil_log2 (guint32 v)
+{
+ guint r, shift;
+
+ v--;
+ r = (v > 0xFFFF) << 4;
+ v >>= r;
+ shift = (v > 0xFF) << 3;
+ v >>= shift;
+ r |= shift;
+ shift = (v > 0xF) << 2;
+ v >>= shift;
+ r |= shift;
+ shift = (v > 0x3) << 1;
+ v >>= shift;
+ r |= shift;
+ r |= (v >> 1);
+ return r + 1;
+}
+
+/****** Nal parser ******/
+
+void
+init_nal (NalReader * nr, const guint8 * data, guint size)
+{
+ nr->data = data;
+ nr->size = size;
+ nr->n_epb = 0;
+
+ nr->byte = 0;
+ nr->bits_in_cache = 0;
+ /* fill with something other than 0 to detect emulation prevention bytes */
+ nr->first_byte = 0xff;
+ nr->cache = 0xff;
+}
+
+gboolean
+_read (NalReader * nr, guint nbits)
+{
+ if (G_UNLIKELY (nr->byte * 8 + (nbits - nr->bits_in_cache) > nr->size * 8)) {
+ GST_DEBUG ("Can not read %u bits, bits in cache %u, Byte * 8 %u, size in "
+ "bits %u", nbits, nr->bits_in_cache, nr->byte * 8, nr->size * 8);
+ return FALSE;
+ }
+
+ while (nr->bits_in_cache < nbits) {
+ guint8 byte;
+ gboolean check_three_byte;
+
+ check_three_byte = TRUE;
+ next_byte:
+ if (G_UNLIKELY (nr->byte >= nr->size))
+ return FALSE;
+
+ byte = nr->data[nr->byte++];
+
+ /* check if the byte is a emulation_prevention_three_byte */
+ if (check_three_byte && byte == 0x03 && nr->first_byte == 0x00 &&
+ ((nr->cache & 0xff) == 0)) {
+ /* next byte goes unconditionally to the cache, even if it's 0x03 */
+ check_three_byte = FALSE;
+ nr->n_epb++;
+ goto next_byte;
+ }
+ nr->cache = (nr->cache << 8) | nr->first_byte;
+ nr->first_byte = byte;
+ nr->bits_in_cache += 8;
+ }
+
+ return TRUE;
+}
+
+/* Skips the specified amount of bits. This is only suitable to a
+ cacheable number of bits */
+gboolean
+_skip (NalReader * nr, guint nbits)
+{
+ g_assert (nbits <= 8 * sizeof (nr->cache));
+
+ if (G_UNLIKELY (!_read (nr, nbits)))
+ return FALSE;
+
+ nr->bits_in_cache -= nbits;
+
+ return TRUE;
+}
+
+/* Generic version to skip any number of bits */
+gboolean
+_skip_long (NalReader * nr, guint nbits)
+{
+ /* Leave out enough bits in the cache once we are finished */
+ const guint skip_size = 4 * sizeof (nr->cache);
+ guint remaining = nbits;
+
+ nbits %= skip_size;
+ while (remaining > 0) {
+ if (!_skip (nr, nbits))
+ return FALSE;
+ remaining -= nbits;
+ nbits = skip_size;
+ }
+ return TRUE;
+}
+
+guint
+_get_pos (const NalReader * nr)
+{
+ return nr->byte * 8 - nr->bits_in_cache;
+}
+
+guint
+_get_remaining (const NalReader * nr)
+{
+ return (nr->size - nr->byte) * 8 + nr->bits_in_cache;
+}
+
+guint
+_get_epb_count (const NalReader * nr)
+{
+ return nr->n_epb;
+}
+
+#define _READ_BITS(bits) \
+gboolean \
+_get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits) \
+{ \
+ guint shift; \
+ \
+ if (!_read (nr, nbits)) \
+ return FALSE; \
+ \
+ /* bring the required bits down and truncate */ \
+ shift = nr->bits_in_cache - nbits; \
+ *val = nr->first_byte >> shift; \
+ \
+ *val |= nr->cache << (8 - shift); \
+ /* mask out required bits */ \
+ if (nbits < bits) \
+ *val &= ((guint##bits)1 << nbits) - 1; \
+ \
+ nr->bits_in_cache = shift; \
+ \
+ return TRUE; \
+} \
+
+_READ_BITS (8);
+_READ_BITS (16);
+_READ_BITS (32);
+
+#define _PEEK_BITS(bits) \
+gboolean \
+_peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits) \
+{ \
+ NalReader tmp; \
+ \
+ tmp = *nr; \
+ return _get_bits_uint##bits (&tmp, val, nbits); \
+}
+
+_PEEK_BITS (8);
+
+gboolean
+_get_ue (NalReader * nr, guint32 * val)
+{
+ guint i = 0;
+ guint8 bit;
+ guint32 value;
+
+ if (G_UNLIKELY (!_get_bits_uint8 (nr, &bit, 1)))
+ return FALSE;
+
+ while (bit == 0) {
+ i++;
+ if (G_UNLIKELY (!_get_bits_uint8 (nr, &bit, 1)))
+ return FALSE;
+ }
+
+ if (G_UNLIKELY (i > 31))
+ return FALSE;
+
+ if (G_UNLIKELY (!_get_bits_uint32 (nr, &value, i)))
+ return FALSE;
+
+ *val = (1 << i) - 1 + value;
+
+ return TRUE;
+}
+
+gboolean
+_get_se (NalReader * nr, gint32 * val)
+{
+ guint32 value;
+
+ if (G_UNLIKELY (!_get_ue (nr, &value)))
+ return FALSE;
+
+ if (value % 2)
+ *val = (value / 2) + 1;
+ else
+ *val = -(value / 2);
+
+ return TRUE;
+}
+
+gboolean
+_is_byte_aligned (NalReader * nr)
+{
+ if (nr->bits_in_cache != 0)
+ return FALSE;
+ return TRUE;
+}
+
+gboolean
+_has_more_data (NalReader * nr)
+{
+ NalReader nr_tmp;
+ guint remaining, nbits;
+ guint8 rbsp_stop_one_bit, zero_bits;
+
+ remaining = _get_remaining (nr);
+ if (remaining == 0)
+ return FALSE;
+
+ nr_tmp = *nr;
+ nr = &nr_tmp;
+
+ /* The spec defines that more_rbsp_data() searches for the last bit
+ equal to 1, and that it is the rbsp_stop_one_bit. Subsequent bits
+ until byte boundary is reached shall be zero.
+
+ This means that more_rbsp_data() is FALSE if the next bit is 1
+ and the remaining bits until byte boundary are zero. One way to
+ be sure that this bit was the very last one, is that every other
+ bit after we reached byte boundary are also set to zero.
+ Otherwise, if the next bit is 0 or if there are non-zero bits
+ afterwards, then then we have more_rbsp_data() */
+ if (!_get_bits_uint8 (nr, &rbsp_stop_one_bit, 1))
+ return FALSE;
+ if (!rbsp_stop_one_bit)
+ return TRUE;
+
+ nbits = --remaining % 8;
+ while (remaining > 0) {
+ if (!_get_bits_uint8 (nr, &zero_bits, nbits))
+ return FALSE;
+ if (zero_bits != 0)
+ return TRUE;
+ remaining -= nbits;
+ nbits = 8;
+ }
+ return FALSE;
+}
+
+/*********** end of nal parser ***************/
+
+gint
+scan_for_start_codes (const guint8 * data, guint size)
+{
+ GstByteReader br;
+ gst_byte_reader_init (&br, data, size);
+
+ /* NALU not empty, so we can at least expect 1 (even 2) bytes following sc */
+ return gst_byte_reader_masked_scan_uint32 (&br, 0xffffff00, 0x00000100,
+ 0, size);
+}
diff --git a/gst-v4l2/nalutils.h b/gst-v4l2/nalutils.h
new file mode 100644
index 0000000..26eadfc
--- /dev/null
+++ b/gst-v4l2/nalutils.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include
+#include
+#include
+
+guint ceil_log2 (guint32 v);
+
+typedef struct
+{
+ const guint8 *data;
+ guint size;
+
+ guint n_epb; /* Number of emulation prevention bytes */
+ guint byte; /* Byte position */
+ guint bits_in_cache; /* bitpos in the cache of next bit */
+ guint8 first_byte;
+ guint64 cache; /* cached bytes */
+} NalReader;
+
+G_GNUC_INTERNAL
+void init_nal (NalReader * nr, const guint8 * data, guint size);
+
+G_GNUC_INTERNAL
+gboolean _read (NalReader * nr, guint nbits);
+
+G_GNUC_INTERNAL
+gboolean _skip (NalReader * nr, guint nbits);
+
+G_GNUC_INTERNAL
+gboolean _skip_long (NalReader * nr, guint nbits);
+
+G_GNUC_INTERNAL
+guint _get_pos (const NalReader * nr);
+
+G_GNUC_INTERNAL
+guint _get_remaining (const NalReader * nr);
+
+G_GNUC_INTERNAL
+guint _get_epb_count (const NalReader * nr);
+
+G_GNUC_INTERNAL
+gboolean _is_byte_aligned (NalReader * nr);
+
+G_GNUC_INTERNAL
+gboolean _has_more_data (NalReader * nr);
+
+#define _READ_BITS_H(bits) \
+G_GNUC_INTERNAL \
+gboolean _get_bits_uint##bits (NalReader *nr, guint##bits *val, guint nbits)
+
+_READ_BITS_H (8);
+_READ_BITS_H (16);
+_READ_BITS_H (32);
+
+#define _PEEK_BITS_H(bits) \
+G_GNUC_INTERNAL \
+gboolean _peek_bits_uint##bits (const NalReader *nr, guint##bits *val, guint nbits)
+
+_PEEK_BITS_H (8);
+
+G_GNUC_INTERNAL
+gboolean _get_ue (NalReader * nr, guint32 * val);
+
+G_GNUC_INTERNAL
+gboolean _get_se (NalReader * nr, gint32 * val);
+
+#define CHECK_ALLOWED_MAX(val, max) { \
+ if (val > max) { \
+ GST_WARNING ("value greater than max. value: %d, max %d", \
+ val, max); \
+ goto error; \
+ } \
+}
+
+#define CHECK_ALLOWED(val, min, max) { \
+ if (val < min || val > max) { \
+ GST_WARNING ("value not in allowed range. value: %d, range %d-%d", \
+ val, min, max); \
+ goto error; \
+ } \
+}
+
+#define READ_UINT8(nr, val, nbits) { \
+ if (!_get_bits_uint8 (nr, &val, nbits)) { \
+ GST_WARNING ("failed to read uint8, nbits: %d", nbits); \
+ goto error; \
+ } \
+}
+
+#define READ_UINT16(nr, val, nbits) { \
+ if (!_get_bits_uint16 (nr, &val, nbits)) { \
+ GST_WARNING ("failed to read uint16, nbits: %d", nbits); \
+ goto error; \
+ } \
+}
+
+#define READ_UINT32(nr, val, nbits) { \
+ if (!_get_bits_uint32 (nr, &val, nbits)) { \
+ GST_WARNING ("failed to read uint32, nbits: %d", nbits); \
+ goto error; \
+ } \
+}
+
+#define READ_UINT64(nr, val, nbits) { \
+ if (!_get_bits_uint64 (nr, &val, nbits)) { \
+ GST_WARNING ("failed to read uint32, nbits: %d", nbits); \
+ goto error; \
+ } \
+}
+
+#define READ_UE(nr, val) { \
+ if (!_get_ue (nr, &val)) { \
+ GST_WARNING ("failed to read UE"); \
+ goto error; \
+ } \
+}
+
+#define READ_UE_ALLOWED(nr, val, min, max) { \
+ guint32 tmp; \
+ READ_UE (nr, tmp); \
+ CHECK_ALLOWED (tmp, min, max); \
+ val = tmp; \
+}
+
+#define READ_UE_MAX(nr, val, max) { \
+ guint32 tmp; \
+ READ_UE (nr, tmp); \
+ CHECK_ALLOWED_MAX (tmp, max); \
+ val = tmp; \
+}
+
+#define READ_SE(nr, val) { \
+ if (!_get_se (nr, &val)) { \
+ GST_WARNING ("failed to read SE"); \
+ goto error; \
+ } \
+}
+
+#define READ_SE_ALLOWED(nr, val, min, max) { \
+ gint32 tmp; \
+ READ_SE (nr, tmp); \
+ CHECK_ALLOWED (tmp, min, max); \
+ val = tmp; \
+}
+
+G_GNUC_INTERNAL
+gint scan_for_start_codes (const guint8 * data, guint size);
diff --git a/gst-v4l2/sei_parse.c b/gst-v4l2/sei_parse.c
new file mode 100644
index 0000000..af320e3
--- /dev/null
+++ b/gst-v4l2/sei_parse.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define UUID_SIZE 16
+#define USER_DATA_UNREGISTERED_TYPE 5
+
+gboolean check_uuid(uint8_t *stream, char *sei_uuid_string);
+uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string);
+uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size, char *sei_uuid_string);
+
+gboolean check_uuid(uint8_t *stream, char *sei_uuid_string)
+{
+ char uuid_string[UUID_SIZE] = {0};
+ uint32_t size = snprintf (uuid_string, UUID_SIZE, "%s", stream);
+ if (size == (UUID_SIZE-1))
+ {
+ if (!strncmp (uuid_string, sei_uuid_string, (UUID_SIZE-1)))
+ return TRUE;
+ else
+ return FALSE;
+ }
+ else
+ return FALSE;
+}
+
+uint8_t* parse_sei_unit(uint8_t * bs_ptr, guint *size, char *sei_uuid_string)
+{
+ int payload_type = 0;
+ int payload_size = 0;
+ uint8_t* payload = NULL;
+ int i;
+
+ /* printf("found a SEI NAL unit!\n"); */
+
+ payload_type = *bs_ptr++;
+
+ while (payload_size % 0xFF == 0)
+ {
+ payload_size += *bs_ptr++;
+ }
+ //printf("payload_type = %i payload_size = %i\n", payload_type, payload_size);
+
+ if (!check_uuid (bs_ptr, sei_uuid_string))
+ {
+ //printf ("Expected UUID not found\n");
+ bs_ptr += (payload_size - UUID_SIZE);
+ return NULL;
+ }
+ else
+ {
+ bs_ptr += UUID_SIZE;
+ }
+
+ *size = payload_size;
+
+ if (payload_type == USER_DATA_UNREGISTERED_TYPE)
+ {
+ payload = (uint8_t*)malloc((payload_size - UUID_SIZE)*sizeof(uint8_t));
+
+ for (i = 0; i < (payload_size - UUID_SIZE); i++)
+ {
+ payload[i] = *bs_ptr;
+ // drop emulation prevention bytes
+ if ((*(bs_ptr) == 0x03)
+ && (*(bs_ptr - 1) == 0x00)
+ && (*(bs_ptr - 2) == 0x00))
+ {
+ i--;
+ }
+ bs_ptr++;
+ }
+ return payload;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+uint8_t *parse_sei_data (uint8_t *bs, uint32_t size, uint32_t *payload_size, char *sei_uuid_string)
+{
+ if (sei_uuid_string == NULL)
+ return NULL;
+ int checklen = 0;
+ unsigned int sei_payload_size = 0;
+ uint8_t *bs_ptr = bs;
+ uint8_t *bs_ptr_end = bs + size;
+ uint8_t *payload = NULL;
+ while (bs_ptr_end > bs_ptr)
+ {
+ if (checklen < 2 && *bs_ptr++ == 0x00)
+ checklen++;
+ else if (checklen == 2 && *bs_ptr++ == 0x00)
+ checklen++;
+ else if (checklen == 3 && *bs_ptr++ == 0x01)
+ checklen++;
+ else if (checklen == 4 && *bs_ptr++ == 0x06)
+ {
+ payload = parse_sei_unit(bs_ptr, &sei_payload_size, sei_uuid_string);
+ checklen = 0;
+ if (payload != NULL)
+ {
+ *payload_size = (sei_payload_size - 16);
+ return payload;
+ }
+ else
+ {
+ continue;
+ }
+ }
+ else
+ checklen = 0;
+ }
+ return NULL;
+}
diff --git a/gst-v4l2/v4l2-utils.c b/gst-v4l2/v4l2-utils.c
new file mode 100644
index 0000000..260c5df
--- /dev/null
+++ b/gst-v4l2/v4l2-utils.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "v4l2-utils.h"
+
+/**************************/
+/* Common device iterator */
+/**************************/
+
+#ifdef HAVE_GUDEV
+#include
+
+struct _GstV4l2GUdevIterator
+{
+ GstV4l2Iterator parent;
+ GList *devices;
+ GUdevDevice *device;
+ GUdevClient *client;
+};
+
+GstV4l2Iterator *
+gst_v4l2_iterator_new (void)
+{
+ static const gchar *subsystems[] = { "video4linux", NULL };
+ struct _GstV4l2GUdevIterator *it;
+
+ it = g_slice_new0 (struct _GstV4l2GUdevIterator);
+
+ it->client = g_udev_client_new (subsystems);
+ it->devices = g_udev_client_query_by_subsystem (it->client, "video4linux");
+
+ return (GstV4l2Iterator *) it;
+}
+
+gboolean
+gst_v4l2_iterator_next (GstV4l2Iterator * _it)
+{
+ struct _GstV4l2GUdevIterator *it = (struct _GstV4l2GUdevIterator *) _it;
+ const gchar *device_name;
+
+ if (it->device)
+ g_object_unref (it->device);
+
+ it->device = NULL;
+ it->parent.device_path = NULL;
+ it->parent.device_name = NULL;
+
+ if (it->devices == NULL)
+ return FALSE;
+
+ it->device = it->devices->data;
+ it->devices = g_list_delete_link (it->devices, it->devices);
+
+ device_name = g_udev_device_get_property (it->device, "ID_V4L_PRODUCT");
+ if (!device_name)
+ device_name = g_udev_device_get_property (it->device, "ID_MODEL_ENC");
+ if (!device_name)
+ device_name = g_udev_device_get_property (it->device, "ID_MODEL");
+
+ it->parent.device_path = g_udev_device_get_device_file (it->device);
+ it->parent.device_name = device_name;
+ it->parent.sys_path = g_udev_device_get_sysfs_path (it->device);
+
+ return TRUE;
+}
+
+void
+gst_v4l2_iterator_free (GstV4l2Iterator * _it)
+{
+ struct _GstV4l2GUdevIterator *it = (struct _GstV4l2GUdevIterator *) _it;
+ g_list_free_full (it->devices, g_object_unref);
+ gst_object_unref (it->client);
+ g_slice_free (struct _GstV4l2GUdevIterator, it);
+}
+
+#else /* No GUDEV */
+
+struct _GstV4l2FsIterator
+{
+ GstV4l2Iterator parent;
+ gint base_idx;
+ gint video_idx;
+ gchar *device;
+};
+
+GstV4l2Iterator *
+gst_v4l2_iterator_new (void)
+{
+ struct _GstV4l2FsIterator *it;
+
+ it = g_slice_new0 (struct _GstV4l2FsIterator);
+ it->base_idx = 0;
+ it->video_idx = -1;
+ it->device = NULL;
+
+ return (GstV4l2Iterator *) it;
+}
+
+gboolean
+gst_v4l2_iterator_next (GstV4l2Iterator * _it)
+{
+ struct _GstV4l2FsIterator *it = (struct _GstV4l2FsIterator *) _it;
+ static const gchar *dev_base[] = { "/dev/video", "/dev/v4l2/video", NULL };
+ gchar *device = NULL;
+
+ g_free ((gchar *) it->parent.device_path);
+ it->parent.device_path = NULL;
+
+ while (device == NULL) {
+ it->video_idx++;
+
+ if (it->video_idx >= 64) {
+ it->video_idx = 0;
+ it->base_idx++;
+ }
+
+ if (dev_base[it->base_idx] == NULL) {
+ it->video_idx = 0;
+ break;
+ }
+
+ device = g_strdup_printf ("%s%d", dev_base[it->base_idx], it->video_idx);
+
+ if (g_file_test (device, G_FILE_TEST_EXISTS)) {
+ it->parent.device_path = device;
+ break;
+ }
+
+ g_free (device);
+ device = NULL;
+ }
+
+ return it->parent.device_path != NULL;
+}
+
+void
+gst_v4l2_iterator_free (GstV4l2Iterator * _it)
+{
+ struct _GstV4l2FsIterator *it = (struct _GstV4l2FsIterator *) _it;
+ g_free ((gchar *) it->parent.device_path);
+ g_slice_free (struct _GstV4l2FsIterator, it);
+}
+
+#endif
+
+void
+gst_v4l2_clear_error (GstV4l2Error * v4l2err)
+{
+ if (v4l2err) {
+ g_clear_error (&v4l2err->error);
+ g_free (v4l2err->dbg_message);
+ v4l2err->dbg_message = NULL;
+ }
+}
+
+void
+gst_v4l2_error (gpointer element, GstV4l2Error * v4l2err)
+{
+ GError *error;
+
+ if (!v4l2err || !v4l2err->error)
+ return;
+
+ error = v4l2err->error;
+
+ if (error->message)
+ GST_WARNING_OBJECT (element, "error: %s", error->message);
+
+ if (v4l2err->dbg_message)
+ GST_WARNING_OBJECT (element, "error: %s", v4l2err->dbg_message);
+
+ gst_element_message_full (GST_ELEMENT (element), GST_MESSAGE_ERROR,
+ error->domain, error->code, error->message, v4l2err->dbg_message,
+ v4l2err->file, v4l2err->func, v4l2err->line);
+
+ error->message = NULL;
+ v4l2err->dbg_message = NULL;
+
+ gst_v4l2_clear_error (v4l2err);
+}
diff --git a/gst-v4l2/v4l2-utils.h b/gst-v4l2/v4l2-utils.h
new file mode 100644
index 0000000..1bc0062
--- /dev/null
+++ b/gst-v4l2/v4l2-utils.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __V4L2_UTILS_H__
+#define __V4L2_UTILS_H__
+
+#include
+
+G_BEGIN_DECLS
+
+#define GST_V4L2_ERROR_INIT { NULL, NULL }
+#define GST_V4L2_ERROR(v4l2err,domain,code,msg,dbg) \
+{\
+ if (v4l2err) { \
+ gchar *_msg = _gst_element_error_printf msg; \
+ v4l2err->error = g_error_new_literal (GST_##domain##_ERROR, \
+ GST_##domain##_ERROR_##code, _msg); \
+ g_free (_msg); \
+ v4l2err->dbg_message = _gst_element_error_printf dbg; \
+ v4l2err->file = __FILE__; \
+ v4l2err->func = GST_FUNCTION; \
+ v4l2err->line = __LINE__; \
+ } \
+}
+
+typedef struct _GstV4l2Iterator GstV4l2Iterator;
+typedef struct _GstV4l2Error GstV4l2Error;
+
+struct _GstV4l2Iterator
+{
+ const gchar *device_path;
+ const gchar *device_name;
+ const gchar *sys_path;
+};
+
+struct _GstV4l2Error
+{
+ GError *error;
+ gchar *dbg_message;
+ const gchar *file;
+ const gchar *func;
+ gint line;
+};
+
+GstV4l2Iterator * gst_v4l2_iterator_new (void);
+gboolean gst_v4l2_iterator_next (GstV4l2Iterator *it);
+void gst_v4l2_iterator_free (GstV4l2Iterator *it);
+
+const gchar * gst_v4l2_iterator_get_device_path (GstV4l2Iterator *it);
+const gchar * gst_v4l2_iterator_get_device_name (GstV4l2Iterator *it);
+const gchar * gst_v4l2_iterator_get_sys_path (GstV4l2Iterator *it);
+
+void gst_v4l2_clear_error (GstV4l2Error *error);
+void gst_v4l2_error (gpointer element, GstV4l2Error *error);
+
+G_END_DECLS
+
+#endif /* __V4L2_UTILS_H__ */
+
+
diff --git a/gst-v4l2/v4l2_calls.c b/gst-v4l2/v4l2_calls.c
new file mode 100644
index 0000000..dea63b4
--- /dev/null
+++ b/gst-v4l2/v4l2_calls.c
@@ -0,0 +1,1166 @@
+/* GStreamer
+ *
+ * Copyright (C) 2002 Ronald Bultje
+ * 2006 Edgard Lima
+ * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * v4l2_calls.c - generic V4L2 calls handling
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#ifdef __sun
+/* Needed on older Solaris Nevada builds (72 at least) */
+#include
+#include
+#endif
+#include "gstv4l2object.h"
+
+#ifndef USE_V4L2_TARGET_NV
+#include "gstv4l2tuner.h"
+#include "gstv4l2colorbalance.h"
+#include "gstv4l2src.h"
+#include "gstv4l2sink.h"
+#endif
+
+#include "gstv4l2videodec.h"
+
+#include "gst/gst-i18n-plugin.h"
+GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
+#define GST_CAT_DEFAULT v4l2_debug
+
+/******************************************************
+ * gst_v4l2_get_capabilities():
+ * get the device's capturing capabilities
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+static gboolean
+gst_v4l2_get_capabilities (GstV4l2Object * v4l2object)
+{
+ GstElement *e;
+
+ e = v4l2object->element;
+
+ GST_DEBUG_OBJECT (e, "getting capabilities");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_QUERYCAP,
+ &v4l2object->vcap) < 0)
+ goto cap_failed;
+
+ if (v4l2object->vcap.capabilities & V4L2_CAP_DEVICE_CAPS)
+ v4l2object->device_caps = v4l2object->vcap.device_caps;
+ else
+ v4l2object->device_caps = v4l2object->vcap.capabilities;
+
+ GST_LOG_OBJECT (e, "driver: '%s'", v4l2object->vcap.driver);
+ GST_LOG_OBJECT (e, "card: '%s'", v4l2object->vcap.card);
+ GST_LOG_OBJECT (e, "bus_info: '%s'", v4l2object->vcap.bus_info);
+ GST_LOG_OBJECT (e, "version: %08x", v4l2object->vcap.version);
+ GST_LOG_OBJECT (e, "capabilites: %08x", v4l2object->device_caps);
+
+ return TRUE;
+
+ /* ERRORS */
+cap_failed:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Error getting capabilities for device '%s': "
+ "It isn't a v4l2 driver. Check if it is a v4l1 driver."),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+ return FALSE;
+ }
+}
+
+/******************************************************
+ * The video4linux command line tool v4l2-ctrl
+ * normalises the names of the controls received from
+ * the kernel like:
+ *
+ * "Exposure (absolute)" -> "exposure_absolute"
+ *
+ * We follow their lead here. @name is modified
+ * in-place.
+ ******************************************************/
+static void
+gst_v4l2_normalise_control_name (gchar * name)
+{
+ int i, j;
+ for (i = 0, j = 0; name[j]; ++j) {
+ if (g_ascii_isalnum (name[j])) {
+ if (i > 0 && !g_ascii_isalnum (name[j - 1]))
+ name[i++] = '_';
+ name[i++] = g_ascii_tolower (name[j]);
+ }
+ }
+ name[i++] = '\0';
+}
+
+#ifndef USE_V4L2_TARGET_NV
+/******************************************************
+ * gst_v4l2_empty_lists() and gst_v4l2_fill_lists():
+ * fill/empty the lists of enumerations
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+static gboolean
+gst_v4l2_fill_lists (GstV4l2Object * v4l2object)
+{
+ gint n, next;
+ struct v4l2_queryctrl control = { 0, };
+
+ GstElement *e;
+
+ e = v4l2object->element;
+
+ GST_DEBUG_OBJECT (e, "getting enumerations");
+ GST_V4L2_CHECK_OPEN (v4l2object);
+
+ GST_DEBUG_OBJECT (e, " channels");
+ /* and now, the channels */
+ for (n = 0;; n++) {
+ struct v4l2_input input;
+ GstV4l2TunerChannel *v4l2channel;
+ GstTunerChannel *channel;
+
+ memset (&input, 0, sizeof (input));
+
+ input.index = n;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_ENUMINPUT, &input) < 0) {
+ if (errno == EINVAL || errno == ENOTTY)
+ break; /* end of enumeration */
+ else {
+ GST_ELEMENT_ERROR (e, RESOURCE, SETTINGS,
+ (_("Failed to query attributes of input %d in device %s"),
+ n, v4l2object->videodev),
+ ("Failed to get %d in input enumeration for %s. (%d - %s)",
+ n, v4l2object->videodev, errno, strerror (errno)));
+ return FALSE;
+ }
+ }
+
+ GST_LOG_OBJECT (e, " index: %d", input.index);
+ GST_LOG_OBJECT (e, " name: '%s'", input.name);
+ GST_LOG_OBJECT (e, " type: %08x", input.type);
+ GST_LOG_OBJECT (e, " audioset: %08x", input.audioset);
+ GST_LOG_OBJECT (e, " std: %016" G_GINT64_MODIFIER "x",
+ (guint64) input.std);
+ GST_LOG_OBJECT (e, " status: %08x", input.status);
+
+ v4l2channel = g_object_new (GST_TYPE_V4L2_TUNER_CHANNEL, NULL);
+ channel = GST_TUNER_CHANNEL (v4l2channel);
+ channel->label = g_strdup ((const gchar *) input.name);
+ channel->flags = GST_TUNER_CHANNEL_INPUT;
+ v4l2channel->index = n;
+
+ if (input.type == V4L2_INPUT_TYPE_TUNER) {
+ struct v4l2_tuner vtun;
+
+ v4l2channel->tuner = input.tuner;
+ channel->flags |= GST_TUNER_CHANNEL_FREQUENCY;
+
+ vtun.index = input.tuner;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_TUNER, &vtun) < 0) {
+ GST_ELEMENT_ERROR (e, RESOURCE, SETTINGS,
+ (_("Failed to get setting of tuner %d on device '%s'."),
+ input.tuner, v4l2object->videodev), GST_ERROR_SYSTEM);
+ g_object_unref (G_OBJECT (channel));
+ return FALSE;
+ }
+
+ channel->freq_multiplicator =
+ 62.5 * ((vtun.capability & V4L2_TUNER_CAP_LOW) ? 1 : 1000);
+ channel->min_frequency = vtun.rangelow * channel->freq_multiplicator;
+ channel->max_frequency = vtun.rangehigh * channel->freq_multiplicator;
+ channel->min_signal = 0;
+ channel->max_signal = 0xffff;
+ }
+ if (input.audioset) {
+ /* we take the first. We don't care for
+ * the others for now */
+ while (!(input.audioset & (1 << v4l2channel->audio)))
+ v4l2channel->audio++;
+ channel->flags |= GST_TUNER_CHANNEL_AUDIO;
+ }
+
+ v4l2object->channels =
+ g_list_prepend (v4l2object->channels, (gpointer) channel);
+ }
+ v4l2object->channels = g_list_reverse (v4l2object->channels);
+
+ GST_DEBUG_OBJECT (e, " norms");
+ /* norms... */
+ for (n = 0;; n++) {
+ struct v4l2_standard standard = { 0, };
+ GstV4l2TunerNorm *v4l2norm;
+
+ GstTunerNorm *norm;
+
+ /* fill in defaults */
+ standard.frameperiod.numerator = 1;
+ standard.frameperiod.denominator = 0;
+ standard.index = n;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ if (errno == EINVAL || errno == ENOTTY)
+ break; /* end of enumeration */
+#ifdef ENODATA
+ else if (errno == ENODATA)
+ break; /* end of enumeration, as of Linux 3.7-rc1 */
+#endif
+ else {
+ GST_ELEMENT_ERROR (e, RESOURCE, SETTINGS,
+ (_("Failed to query norm on device '%s'."),
+ v4l2object->videodev),
+ ("Failed to get attributes for norm %d on devide '%s'. (%d - %s)",
+ n, v4l2object->videodev, errno, strerror (errno)));
+ return FALSE;
+ }
+ }
+
+ GST_DEBUG_OBJECT (e, " '%s', fps: %d / %d",
+ standard.name, standard.frameperiod.denominator,
+ standard.frameperiod.numerator);
+
+ v4l2norm = g_object_new (GST_TYPE_V4L2_TUNER_NORM, NULL);
+ norm = GST_TUNER_NORM (v4l2norm);
+ norm->label = g_strdup ((const gchar *) standard.name);
+ gst_value_set_fraction (&norm->framerate,
+ standard.frameperiod.denominator, standard.frameperiod.numerator);
+ v4l2norm->index = standard.id;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "index=%08x, label=%s",
+ (unsigned int) v4l2norm->index, norm->label);
+
+ v4l2object->norms = g_list_prepend (v4l2object->norms, (gpointer) norm);
+ }
+ v4l2object->norms = g_list_reverse (v4l2object->norms);
+
+ GST_DEBUG_OBJECT (e, " controls+menus");
+
+ /* and lastly, controls+menus (if appropriate) */
+ next = V4L2_CTRL_FLAG_NEXT_CTRL;
+ n = 0;
+ control.id = next;
+
+ while (TRUE) {
+ GstV4l2ColorBalanceChannel *v4l2channel;
+ GstColorBalanceChannel *channel;
+
+ if (!next)
+ n++;
+
+ retry:
+ /* when we reached the last official CID, continue with private CIDs */
+ if (n == V4L2_CID_LASTP1) {
+ GST_DEBUG_OBJECT (e, "checking private CIDs");
+ n = V4L2_CID_PRIVATE_BASE;
+ }
+ GST_DEBUG_OBJECT (e, "checking control %08x", n);
+
+ control.id = n | next;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_QUERYCTRL,
+ &control) < 0) {
+ if (next) {
+ if (n > 0) {
+ GST_DEBUG_OBJECT (e, "controls finished");
+ break;
+ } else {
+ GST_DEBUG_OBJECT (e, "V4L2_CTRL_FLAG_NEXT_CTRL not supported.");
+ next = 0;
+ n = V4L2_CID_BASE;
+ goto retry;
+ }
+ }
+ if (errno == EINVAL || errno == ENOTTY || errno == EIO || errno == ENOENT) {
+ if (n < V4L2_CID_PRIVATE_BASE) {
+ GST_DEBUG_OBJECT (e, "skipping control %08x", n);
+ /* continue so that we also check private controls */
+ n = V4L2_CID_PRIVATE_BASE - 1;
+ continue;
+ } else {
+ GST_DEBUG_OBJECT (e, "controls finished");
+ break;
+ }
+ } else {
+ GST_WARNING_OBJECT (e, "Failed querying control %d on device '%s'. "
+ "(%d - %s)", n, v4l2object->videodev, errno, strerror (errno));
+ continue;
+ }
+ }
+ /* bogus driver might mess with id in unexpected ways (e.g. set to 0), so
+ * make sure to simply try all if V4L2_CTRL_FLAG_NEXT_CTRL not supported */
+ if (next)
+ n = control.id;
+ if (control.flags & V4L2_CTRL_FLAG_DISABLED) {
+ GST_DEBUG_OBJECT (e, "skipping disabled control");
+ continue;
+ }
+
+ if (control.type == V4L2_CTRL_TYPE_CTRL_CLASS) {
+ GST_DEBUG_OBJECT (e, "starting control class '%s'", control.name);
+ continue;
+ }
+
+ switch (control.type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BUTTON:{
+ control.name[31] = '\0';
+ gst_v4l2_normalise_control_name ((gchar *) control.name);
+ g_datalist_id_set_data (&v4l2object->controls,
+ g_quark_from_string ((const gchar *) control.name),
+ GINT_TO_POINTER (n));
+ break;
+ }
+ default:
+ GST_DEBUG_OBJECT (e,
+ "Control type for '%s' not suppored for extra controls.",
+ control.name);
+ break;
+ }
+
+ switch (n) {
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_BLACK_LEVEL:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ case V4L2_CID_DO_WHITE_BALANCE:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ case V4L2_CID_GAMMA:
+ case V4L2_CID_EXPOSURE:
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_GAIN:
+ case V4L2_CID_SHARPNESS:
+ /* we only handle these for now (why?) */
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_PAN_RESET:
+ case V4L2_CID_TILT_RESET:
+ /* not handled here, handled by VideoOrientation interface */
+ control.id++;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ /* FIXME: We should implement GstMixer interface instead */
+ /* but let's not be pedantic and make element more useful for now */
+ break;
+ case V4L2_CID_ALPHA_COMPONENT:
+ v4l2object->has_alpha_component = TRUE;
+ break;
+ default:
+ GST_DEBUG_OBJECT (e,
+ "ControlID %s (%x) unhandled, FIXME", control.name, n);
+ control.id++;
+ break;
+ }
+ if (n != control.id)
+ continue;
+
+ GST_DEBUG_OBJECT (e, "Adding ControlID %s (%x)", control.name, n);
+ v4l2channel = g_object_new (GST_TYPE_V4L2_COLOR_BALANCE_CHANNEL, NULL);
+ channel = GST_COLOR_BALANCE_CHANNEL (v4l2channel);
+ channel->label = g_strdup ((const gchar *) control.name);
+ v4l2channel->id = n;
+
+#ifndef USE_V4L2_TARGET_NV
+ /* FIXME: it will be need just when handling private controls
+ *(currently none of base controls are of this type) */
+ if (control.type == V4L2_CTRL_TYPE_MENU) {
+ struct v4l2_querymenu menu, *mptr;
+
+ int i;
+
+ menu.id = n;
+ for (i = 0;; i++) {
+ menu.index = i;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_QUERYMENU,
+ &menu) < 0) {
+ if (errno == EINVAL)
+ break; /* end of enumeration */
+ else {
+ GST_ELEMENT_ERROR (e, RESOURCE, SETTINGS,
+ (_("Failed getting controls attributes on device '%s'."),
+ v4l2object->videodev),
+ ("Failed to get %d in menu enumeration for %s. (%d - %s)",
+ n, v4l2object->videodev, errno, strerror (errno)));
+ return FALSE;
+ }
+ }
+ mptr = g_malloc (sizeof (menu));
+ memcpy (mptr, &menu, sizeof (menu));
+ menus = g_list_append (menus, mptr);
+ }
+ }
+ v4l2object->menus = g_list_append (v4l2object->menus, menus);
+#endif
+
+ switch (control.type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ channel->min_value = control.minimum;
+ channel->max_value = control.maximum;
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ channel->min_value = FALSE;
+ channel->max_value = TRUE;
+ break;
+ default:
+ /* FIXME we should find out how to handle V4L2_CTRL_TYPE_BUTTON.
+ BUTTON controls like V4L2_CID_DO_WHITE_BALANCE can just be set (1) or
+ unset (0), but can't be queried */
+ GST_DEBUG_OBJECT (e,
+ "Control with non supported type %s (%x), type=%d",
+ control.name, n, control.type);
+ channel->min_value = channel->max_value = 0;
+ break;
+ }
+
+ v4l2object->colors =
+ g_list_prepend (v4l2object->colors, (gpointer) channel);
+ }
+ v4l2object->colors = g_list_reverse (v4l2object->colors);
+
+ GST_DEBUG_OBJECT (e, "done");
+ return TRUE;
+}
+#endif
+
+
+static void
+gst_v4l2_empty_lists (GstV4l2Object * v4l2object)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "deleting enumerations");
+
+ g_list_foreach (v4l2object->channels, (GFunc) g_object_unref, NULL);
+ g_list_free (v4l2object->channels);
+ v4l2object->channels = NULL;
+
+ g_list_foreach (v4l2object->norms, (GFunc) g_object_unref, NULL);
+ g_list_free (v4l2object->norms);
+ v4l2object->norms = NULL;
+
+ g_list_foreach (v4l2object->colors, (GFunc) g_object_unref, NULL);
+ g_list_free (v4l2object->colors);
+ v4l2object->colors = NULL;
+
+ g_datalist_clear (&v4l2object->controls);
+}
+
+static void
+gst_v4l2_adjust_buf_type (GstV4l2Object * v4l2object)
+{
+ /* when calling gst_v4l2_object_new the user decides the initial type
+ * so adjust it if multi-planar is supported
+ * the driver should make it exclusive. So the driver should
+ * not support both MPLANE and non-PLANE.
+ * Because even when using MPLANE it still possibles to use it
+ * in a contiguous manner. In this case the first v4l2 plane
+ * contains all the gst planes.
+ */
+ switch (v4l2object->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (v4l2object->device_caps &
+ (V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_VIDEO_M2M_MPLANE)) {
+ GST_DEBUG ("adjust type to multi-planar output");
+ v4l2object->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (v4l2object->device_caps &
+ (V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_M2M_MPLANE)) {
+ GST_DEBUG ("adjust type to multi-planar capture");
+ v4l2object->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/******************************************************
+ * gst_v4l2_open():
+ * open the video device (v4l2object->videodev)
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_open (GstV4l2Object * v4l2object)
+{
+ struct stat st;
+ int libv4l2_fd = -1;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Trying to open device %s",
+ v4l2object->videodev);
+
+ GST_V4L2_CHECK_NOT_OPEN (v4l2object);
+ GST_V4L2_CHECK_NOT_ACTIVE (v4l2object);
+
+ /* be sure we have a device */
+ if (!v4l2object->videodev)
+ v4l2object->videodev = g_strdup ("/dev/video");
+
+ char buf[30];
+ int i = 0;
+ if (is_cuvid == TRUE) {
+ for (i = 0; i < 16; i++)
+ {
+ g_snprintf(buf, sizeof(buf), "/dev/nvidia%d", i);
+ v4l2object->video_fd =
+ open (buf, O_RDWR /* | O_NONBLOCK */ );
+ if (v4l2object->video_fd != -1)
+ break;
+ else
+ continue;
+ }
+ } else if (is_cuvid == FALSE) {
+ /* check if it is a device */
+ if (stat (v4l2object->videodev, &st) == -1)
+ goto stat_failed;
+
+ if (!S_ISCHR (st.st_mode))
+ goto no_device;
+
+ /* open the device */
+ v4l2object->video_fd =
+ open (v4l2object->videodev, O_RDWR /* | O_NONBLOCK */ );
+ }
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ goto not_open;
+
+#ifdef HAVE_LIBV4L2
+ if (v4l2object->fd_open)
+ libv4l2_fd = v4l2object->fd_open (v4l2object->video_fd,
+ V4L2_ENABLE_ENUM_FMT_EMULATION);
+#endif
+
+ /* Note the v4l2_xxx functions are designed so that if they get passed an
+ unknown fd, the will behave exactly as their regular xxx counterparts, so
+ if v4l2_fd_open fails, we continue as normal (missing the libv4l2 custom
+ cam format to normal formats conversion). Chances are big we will still
+ fail then though, as normally v4l2_fd_open only fails if the device is not
+ a v4l2 device. */
+ if (libv4l2_fd != -1)
+ v4l2object->video_fd = libv4l2_fd;
+
+ /* get capabilities, error will be posted */
+ if (!gst_v4l2_get_capabilities (v4l2object))
+ goto error;
+
+#ifndef USE_V4L2_TARGET_NV
+ /* do we need to be a capture device? */
+ if (GST_IS_V4L2SRC (v4l2object->element) &&
+ !(v4l2object->device_caps & (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE)))
+ goto not_capture;
+
+ if (GST_IS_V4L2SINK (v4l2object->element) &&
+ !(v4l2object->device_caps & (V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE)))
+ goto not_output;
+#endif
+
+ if (GST_IS_V4L2_VIDEO_DEC (v4l2object->element) &&
+ /* Today's M2M device only expose M2M */
+ !((v4l2object->device_caps & (V4L2_CAP_VIDEO_M2M |
+ V4L2_CAP_VIDEO_M2M_MPLANE)) ||
+ /* But legacy driver may expose both CAPTURE and OUTPUT */
+ ((v4l2object->device_caps &
+ (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_CAPTURE_MPLANE)) &&
+ (v4l2object->device_caps &
+ (V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE)))))
+ goto not_m2m;
+
+ gst_v4l2_adjust_buf_type (v4l2object);
+
+#ifndef USE_V4L2_TARGET_NV
+ /* create enumerations, posts errors. */
+ if (!gst_v4l2_fill_lists (v4l2object))
+ goto error;
+#endif
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj,
+ "Opened device '%s' (%s) successfully",
+ v4l2object->vcap.card, v4l2object->videodev);
+
+ if (v4l2object->extra_controls)
+ gst_v4l2_set_controls (v4l2object, v4l2object->extra_controls);
+
+ /* UVC devices are never interlaced, and doing VIDIOC_TRY_FMT on them
+ * causes expensive and slow USB IO, so don't probe them for interlaced
+ */
+ if (!strcmp ((char *) v4l2object->vcap.driver, "uvcusb") ||
+ !strcmp ((char *) v4l2object->vcap.driver, "uvcvideo")) {
+ v4l2object->never_interlaced = TRUE;
+ }
+
+ return TRUE;
+
+ /* ERRORS */
+stat_failed:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, NOT_FOUND,
+ (_("Cannot identify device '%s'."), v4l2object->videodev),
+ GST_ERROR_SYSTEM);
+ goto error;
+ }
+no_device:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, NOT_FOUND,
+ (_("This isn't a device '%s'."), v4l2object->videodev),
+ GST_ERROR_SYSTEM);
+ goto error;
+ }
+not_open:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, OPEN_READ_WRITE,
+ (_("Could not open device '%s' for reading and writing."),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+ goto error;
+ }
+#ifndef USE_V4L2_TARGET_NV
+not_capture:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, NOT_FOUND,
+ (_("Device '%s' is not a capture device."),
+ v4l2object->videodev),
+ ("Capabilities: 0x%x", v4l2object->device_caps));
+ goto error;
+ }
+not_output:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, NOT_FOUND,
+ (_("Device '%s' is not a output device."),
+ v4l2object->videodev),
+ ("Capabilities: 0x%x", v4l2object->device_caps));
+ goto error;
+ }
+#endif
+not_m2m:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, NOT_FOUND,
+ (_("Device '%s' is not a M2M device."),
+ v4l2object->videodev),
+ ("Capabilities: 0x%x", v4l2object->device_caps));
+ goto error;
+ }
+error:
+ {
+ if (GST_V4L2_IS_OPEN (v4l2object)) {
+ /* close device */
+ v4l2object->close (v4l2object->video_fd);
+ v4l2object->video_fd = -1;
+ }
+ /* empty lists */
+ gst_v4l2_empty_lists (v4l2object);
+
+ return FALSE;
+ }
+}
+
+gboolean
+gst_v4l2_dup (GstV4l2Object * v4l2object, GstV4l2Object * other)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Trying to dup device %s",
+ other->videodev);
+
+ GST_V4L2_CHECK_OPEN (other);
+ GST_V4L2_CHECK_NOT_OPEN (v4l2object);
+ GST_V4L2_CHECK_NOT_ACTIVE (other);
+ GST_V4L2_CHECK_NOT_ACTIVE (v4l2object);
+
+ v4l2object->vcap = other->vcap;
+ v4l2object->device_caps = other->device_caps;
+ gst_v4l2_adjust_buf_type (v4l2object);
+
+ v4l2object->video_fd = v4l2object->dup (other->video_fd);
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ goto not_open;
+
+ g_free (v4l2object->videodev);
+ v4l2object->videodev = g_strdup (other->videodev);
+
+ GST_INFO_OBJECT (v4l2object->dbg_obj,
+ "Cloned device '%s' (%s) successfully",
+ v4l2object->vcap.card, v4l2object->videodev);
+
+ v4l2object->never_interlaced = other->never_interlaced;
+
+ return TRUE;
+
+not_open:
+ {
+ GST_ELEMENT_ERROR (v4l2object->element, RESOURCE, OPEN_READ_WRITE,
+ (_("Could not dup device '%s' for reading and writing."),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+
+ return FALSE;
+ }
+}
+
+
+/******************************************************
+ * gst_v4l2_close():
+ * close the video device (v4l2object->video_fd)
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_close (GstV4l2Object * v4l2object)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "Trying to close %s",
+ v4l2object->videodev);
+
+ GST_V4L2_CHECK_OPEN (v4l2object);
+ GST_V4L2_CHECK_NOT_ACTIVE (v4l2object);
+
+ /* close device */
+ v4l2object->close (v4l2object->video_fd);
+ v4l2object->video_fd = -1;
+
+ /* empty lists */
+ gst_v4l2_empty_lists (v4l2object);
+
+ return TRUE;
+}
+
+
+/******************************************************
+ * gst_v4l2_get_norm()
+ * Get the norm of the current device
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_get_norm (GstV4l2Object * v4l2object, v4l2_std_id * norm)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "getting norm");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_STD, norm) < 0)
+ goto std_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+std_failed:
+ {
+ GST_DEBUG ("Failed to get the current norm for device %s",
+ v4l2object->videodev);
+ return FALSE;
+ }
+}
+
+
+/******************************************************
+ * gst_v4l2_set_norm()
+ * Set the norm of the current device
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_set_norm (GstV4l2Object * v4l2object, v4l2_std_id norm)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "trying to set norm to "
+ "%" G_GINT64_MODIFIER "x", (guint64) norm);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_STD, &norm) < 0)
+ goto std_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+std_failed:
+ {
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to set norm for device '%s'."),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+ return FALSE;
+ }
+}
+
+#ifndef USE_V4L2_TARGET_NV
+/******************************************************
+ * gst_v4l2_get_frequency():
+ * get the current frequency
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_get_frequency (GstV4l2Object * v4l2object,
+ gint tunernum, gulong * frequency)
+{
+ struct v4l2_frequency freq = { 0, };
+
+ GstTunerChannel *channel;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "getting current tuner frequency");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ channel = gst_tuner_get_channel (GST_TUNER (v4l2object->element));
+
+ freq.tuner = tunernum;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_FREQUENCY, &freq) < 0)
+ goto freq_failed;
+
+ *frequency = freq.frequency * channel->freq_multiplicator;
+
+ return TRUE;
+
+ /* ERRORS */
+freq_failed:
+ {
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to get current tuner frequency for device '%s'."),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+ return FALSE;
+ }
+}
+
+
+/******************************************************
+ * gst_v4l2_set_frequency():
+ * set frequency
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_set_frequency (GstV4l2Object * v4l2object,
+ gint tunernum, gulong frequency)
+{
+ struct v4l2_frequency freq = { 0, };
+
+ GstTunerChannel *channel;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj,
+ "setting current tuner frequency to %lu", frequency);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ channel = gst_tuner_get_channel (GST_TUNER (v4l2object->element));
+
+ freq.tuner = tunernum;
+ /* fill in type - ignore error */
+ (void) v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_FREQUENCY, &freq);
+ freq.frequency = frequency / channel->freq_multiplicator;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_FREQUENCY, &freq) < 0)
+ goto freq_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+freq_failed:
+ {
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to set current tuner frequency for device '%s' to %lu Hz."),
+ v4l2object->videodev, frequency), GST_ERROR_SYSTEM);
+ return FALSE;
+ }
+}
+#endif
+
+/******************************************************
+ * gst_v4l2_signal_strength():
+ * get the strength of the signal on the current input
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_signal_strength (GstV4l2Object * v4l2object,
+ gint tunernum, gulong * signal_strength)
+{
+ struct v4l2_tuner tuner = { 0, };
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "trying to get signal strength");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ tuner.index = tunernum;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_TUNER, &tuner) < 0)
+ goto tuner_failed;
+
+ *signal_strength = tuner.signal;
+
+ return TRUE;
+
+ /* ERRORS */
+tuner_failed:
+ {
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to get signal strength for device '%s'."),
+ v4l2object->videodev), GST_ERROR_SYSTEM);
+ return FALSE;
+ }
+}
+
+/******************************************************
+ * gst_v4l2_get_attribute():
+ * try to get the value of one specific attribute
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_get_attribute (GstV4l2Object * v4l2object,
+ int attribute_num, int *value)
+{
+ struct v4l2_control control = { 0, };
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "getting value of attribute %d",
+ attribute_num);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ control.id = attribute_num;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_CTRL, &control) < 0)
+ goto ctrl_failed;
+
+ *value = control.value;
+
+ return TRUE;
+
+ /* ERRORS */
+ctrl_failed:
+ {
+ GST_WARNING_OBJECT (v4l2object,
+ _("Failed to get value for control %d on device '%s'."),
+ attribute_num, v4l2object->videodev);
+ return FALSE;
+ }
+}
+
+
+/******************************************************
+ * gst_v4l2_set_attribute():
+ * try to set the value of one specific attribute
+ * return value: TRUE on success, FALSE on error
+ ******************************************************/
+gboolean
+gst_v4l2_set_attribute (GstV4l2Object * v4l2object,
+ int attribute_num, const int value)
+{
+ struct v4l2_control control = { 0, };
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "setting value of attribute %d to %d",
+ attribute_num, value);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ control.id = attribute_num;
+ control.value = value;
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_CTRL, &control) < 0)
+ goto ctrl_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+ctrl_failed:
+ {
+ GST_WARNING_OBJECT (v4l2object,
+ _("Failed to set value %d for control %d on device '%s'."),
+ value, attribute_num, v4l2object->videodev);
+ return FALSE;
+ }
+}
+
+static gboolean
+set_control (GQuark field_id, const GValue * value, gpointer user_data)
+{
+ GstV4l2Object *v4l2object = user_data;
+ GQuark normalised_field_id;
+ gpointer *d;
+
+ /* 32 bytes is the maximum size for a control name according to v4l2 */
+ gchar name[32];
+
+ /* Backwards compatibility: in the past GStreamer would normalise strings in
+ a subtly different way to v4l2-ctl. e.g. the kernel's "Focus (absolute)"
+ would become "focus__absolute_" whereas now it becomes "focus_absolute".
+ Please remove the following in GStreamer 1.5 for 1.6 */
+ strncpy (name, g_quark_to_string (field_id), sizeof (name));
+ name[31] = '\0';
+ gst_v4l2_normalise_control_name (name);
+ normalised_field_id = g_quark_from_string (name);
+ if (normalised_field_id != field_id)
+ g_warning ("In GStreamer 1.4 the way V4L2 control names were normalised "
+ "changed. Instead of setting \"%s\" please use \"%s\". The former is "
+ "deprecated and will be removed in a future version of GStreamer",
+ g_quark_to_string (field_id), name);
+ field_id = normalised_field_id;
+
+ d = g_datalist_id_get_data (&v4l2object->controls, field_id);
+ if (!d) {
+ GST_WARNING_OBJECT (v4l2object,
+ "Control '%s' does not exist or has an unsupported type.",
+ g_quark_to_string (field_id));
+ return TRUE;
+ }
+ if (!G_VALUE_HOLDS (value, G_TYPE_INT)) {
+ GST_WARNING_OBJECT (v4l2object,
+ "'int' value expected for control '%s'.", g_quark_to_string (field_id));
+ return TRUE;
+ }
+ gst_v4l2_set_attribute (v4l2object, GPOINTER_TO_INT (d),
+ g_value_get_int (value));
+ return TRUE;
+}
+
+gboolean
+gst_v4l2_set_controls (GstV4l2Object * v4l2object, GstStructure * controls)
+{
+ return gst_structure_foreach (controls, set_control, v4l2object);
+}
+
+gboolean
+gst_v4l2_get_input (GstV4l2Object * v4l2object, gint * input)
+{
+ gint n;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "trying to get input");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_INPUT, &n) < 0)
+ goto input_failed;
+
+ *input = n;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "input: %d", n);
+
+ return TRUE;
+
+ /* ERRORS */
+input_failed:
+ if (v4l2object->device_caps & V4L2_CAP_TUNER) {
+ /* only give a warning message if driver actually claims to have tuner
+ * support
+ */
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to get current input on device '%s'. May be it is a radio device"), v4l2object->videodev), GST_ERROR_SYSTEM);
+ }
+ return FALSE;
+}
+
+gboolean
+gst_v4l2_set_input (GstV4l2Object * v4l2object, gint input)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "trying to set input to %d", input);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_INPUT, &input) < 0)
+ goto input_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+input_failed:
+ if (v4l2object->device_caps & V4L2_CAP_TUNER) {
+ /* only give a warning message if driver actually claims to have tuner
+ * support
+ */
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to set input %d on device %s."),
+ input, v4l2object->videodev), GST_ERROR_SYSTEM);
+ }
+ return FALSE;
+}
+
+gboolean
+gst_v4l2_get_output (GstV4l2Object * v4l2object, gint * output)
+{
+ gint n;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "trying to get output");
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_G_OUTPUT, &n) < 0)
+ goto output_failed;
+
+ *output = n;
+
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "output: %d", n);
+
+ return TRUE;
+
+ /* ERRORS */
+output_failed:
+ if (v4l2object->device_caps & V4L2_CAP_TUNER) {
+ /* only give a warning message if driver actually claims to have tuner
+ * support
+ */
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to get current output on device '%s'. May be it is a radio device"), v4l2object->videodev), GST_ERROR_SYSTEM);
+ }
+ return FALSE;
+}
+
+gboolean
+gst_v4l2_set_output (GstV4l2Object * v4l2object, gint output)
+{
+ GST_DEBUG_OBJECT (v4l2object->dbg_obj, "trying to set output to %d", output);
+
+ if (!GST_V4L2_IS_OPEN (v4l2object))
+ return FALSE;
+
+ if (v4l2object->ioctl (v4l2object->video_fd, VIDIOC_S_OUTPUT, &output) < 0)
+ goto output_failed;
+
+ return TRUE;
+
+ /* ERRORS */
+output_failed:
+ if (v4l2object->device_caps & V4L2_CAP_TUNER) {
+ /* only give a warning message if driver actually claims to have tuner
+ * support
+ */
+ GST_ELEMENT_WARNING (v4l2object->element, RESOURCE, SETTINGS,
+ (_("Failed to set output %d on device %s."),
+ output, v4l2object->videodev), GST_ERROR_SYSTEM);
+ }
+ return FALSE;
+}
diff --git a/nvbufsurface.h b/nvbufsurface.h
new file mode 100644
index 0000000..aa38892
--- /dev/null
+++ b/nvbufsurface.h
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA Corporation and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA Corporation is strictly prohibited.
+ */
+
+/**
+ * @file nvbufsurface.h
+ * NvBufSurface Interface
+ *
+ * This file specifies the NvBufSurface management API.
+ *
+ * The NvBufSurface API provides methods to allocate / deallocate, map / unmap
+ * and copy batched buffers.
+ */
+ /**
+ * @defgroup ds_nvbuf_api Buffer Management API module
+ *
+ * This section describes types and functions of NvBufSurface application
+ * programming interface.
+ *
+ */
+
+#ifndef NVBUFSURFACE_H_
+#define NVBUFSURFACE_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @defgroup ds_aaa NvBufSurface Types and Functions
+ * Defines types and functions of \ref NvBufSurface application
+ * programming interface.
+ * @ingroup ds_nvbuf_api
+ * @{ */
+
+/** Defines the default padding length for reserved fields of structures. */
+#define STRUCTURE_PADDING 4
+
+/** Defines the maximum number of planes. */
+#define NVBUF_MAX_PLANES 4
+
+/**
+ * Defines the default values for chroma subsampling.
+ * The default value matches JPEG/MPEG use cases.
+ */
+#define NVBUFSURFACE_CHROMA_SUBSAMPLING_HORIZ_DEFAULT 0
+#define NVBUFSURFACE_CHROMA_SUBSAMPLING_VERT_DEFAULT 1
+
+#define NVBUFSURFACE_CHROMA_SUBSAMPLING_PARAMS_DEFAULT \
+ { \
+ NVBUFSURFACE_CHROMA_SUBSAMPLING_HORIZ_DEFAULT, \
+ NVBUFSURFACE_CHROMA_SUBSAMPLING_VERT_DEFAULT \
+ }
+
+/**
+ * Defines mapping types of NvBufSurface.
+ */
+typedef enum
+{
+ NVBUF_MAP_READ, /**< Specifies \ref NvBufSurface mapping type "read." */
+ NVBUF_MAP_WRITE, /**< Specifies \ref NvBufSurface mapping type
+ "write." */
+ NVBUF_MAP_READ_WRITE, /**< Specifies \ref NvBufSurface mapping type
+ "read/write." */
+} NvBufSurfaceMemMapFlags;
+
+/**
+ * Defines tags that identify the components requesting a memory allocation.
+ * The tags can be used later to identify the total memory allocated to
+ * particular types of components.
+ * TODO: Check if DeepStream require more tags to be defined.
+ */
+typedef enum
+{
+ /** tag None. */
+ NvBufSurfaceTag_NONE = 0x0,
+ /** tag for Camera. */
+ NvBufSurfaceTag_CAMERA = 0x200,
+ /** tag for Jpeg Encoder/Decoder. */
+ NvBufSurfaceTag_JPEG = 0x1500,
+ /** tag for VPR Buffers. */
+ NvBufSurfaceTag_PROTECTED = 0x1504,
+ /** tag for H264/H265 Video Encoder. */
+ NvBufSurfaceTag_VIDEO_ENC = 0x1200,
+ /** tag for H264/H265/VP9 Video Decoder. */
+ NvBufSurfaceTag_VIDEO_DEC = 0x1400,
+ /** tag for Video Transform/Composite/Blend. */
+ NvBufSurfaceTag_VIDEO_CONVERT = 0xf01,
+} NvBufSurfaceTag;
+
+/**
+ * Defines color formats for NvBufSurface.
+ */
+typedef enum
+{
+ /** Specifies an invalid color format. */
+ NVBUF_COLOR_FORMAT_INVALID,
+ /** Specifies 8 bit GRAY scale - single plane */
+ NVBUF_COLOR_FORMAT_GRAY8,
+ /** Specifies BT.601 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420,
+ /** Specifies BT.601 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YVU420,
+ /** Specifies BT.601 colorspace - YUV420 ER multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_ER,
+ /** Specifies BT.601 colorspace - YVU420 ER multi-planar. */
+ NVBUF_COLOR_FORMAT_YVU420_ER,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_ER,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_UYVY,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_UYVY_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_VYUY,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_VYUY_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YUYV,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YUYV_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YVYU,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YVYU_ER,
+ /** Specifies BT.601 colorspace - YUV444 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444,
+ /** Specifies RGBA-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_RGBA,
+ /** Specifies BGRA-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_BGRA,
+ /** Specifies ARGB-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_ARGB,
+ /** Specifies ABGR-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_ABGR,
+ /** Specifies RGBx-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_RGBx,
+ /** Specifies BGRx-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_BGRx,
+ /** Specifies xRGB-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_xRGB,
+ /** Specifies xBGR-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_xBGR,
+ /** Specifies RGB-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_RGB,
+ /** Specifies BGR-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_BGR,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE,
+ /** Specifies BT.709 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_709,
+ /** Specifies BT.709 colorspace - YUV420 ER multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_709_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_709_ER,
+ /** Specifies BT.2020 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_2020,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_2020,
+ /** Specifies color format for packed 2 signed shorts */
+ NVBUF_COLOR_FORMAT_SIGNED_R16G16,
+ /** Specifies RGB- unsigned 8 bit multiplanar plane. */
+ NVBUF_COLOR_FORMAT_R8_G8_B8,
+ /** Specifies BGR- unsigned 8 bit multiplanar plane. */
+ NVBUF_COLOR_FORMAT_B8_G8_R8,
+ /** Specifies RGB-32bit Floating point multiplanar plane. */
+ NVBUF_COLOR_FORMAT_R32F_G32F_B32F,
+ /** Specifies BGR-32bit Floating point multiplanar plane. */
+ NVBUF_COLOR_FORMAT_B32F_G32F_R32F,
+ /** Specifies BT.601 colorspace - YUV422 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV422,
+ /** Specifies BT.601 colorspace - Y/CrCb 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21_10LE,
+ /** Specifies BT.601 colorspace - Y/CrCb 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21_12LE,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_10LE,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24,
+ /** Specifies BT.601 colorspace - Y/CrCb 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE,
+ /** Specifies BT.601_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_ER,
+ /** Specifies BT.601_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_709,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_709,
+ /** Specifies BT.709_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_709_ER,
+ /** Specifies BT.709_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_709_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE_709,
+ /** Specifies BT.709 ER colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE_2020,
+ /** Specifies BT.2020 colorspace - Y/CbCr 12 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_12LE_2020,
+ /** Specifies Non-linear RGB BT.709 colorspace - RGBA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_RGBA_10_10_10_2_709,
+ /** Specifies Non-linear RGB BT.2020 colorspace - RGBA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_RGBA_10_10_10_2_2020,
+ /** Specifies Non-linear RGB BT.709 colorspace - BGRA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_BGRA_10_10_10_2_709,
+ /** Specifies Non-linear RGB BT.2020 colorspace - BGRA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_BGRA_10_10_10_2_2020,
+ /** Specifies Optical flow SAD calculation Buffer format */
+ NVBUF_COLOR_FORMAT_A32,
+ /** Specifies BT.601 colorspace - 10 bit YUV 4:2:2 interleaved. */
+ NVBUF_COLOR_FORMAT_UYVP,
+ /** Specifies BT.601 colorspace - 10 bit YUV ER 4:2:2 interleaved. */
+ NVBUF_COLOR_FORMAT_UYVP_ER,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_709_ER,
+ NVBUF_COLOR_FORMAT_LAST
+} NvBufSurfaceColorFormat;
+
+/**
+ * Specifies layout formats for \ref NvBufSurface video planes.
+ */
+typedef enum
+{
+ /** Specifies pitch layout. */
+ NVBUF_LAYOUT_PITCH,
+ /** Specifies block linear layout. */
+ NVBUF_LAYOUT_BLOCK_LINEAR,
+} NvBufSurfaceLayout;
+
+/**
+ * Specifies memory types for \ref NvBufSurface.
+ */
+typedef enum
+{
+ /** Specifies the default memory type, i.e. \ref NVBUF_MEM_CUDA_DEVICE
+ for dGPU, \ref NVBUF_MEM_SURFACE_ARRAY for Jetson. Use \ref NVBUF_MEM_DEFAULT
+ to allocate whichever type of memory is appropriate for the platform. */
+ NVBUF_MEM_DEFAULT,
+ /** Specifies CUDA Host memory type. */
+ NVBUF_MEM_CUDA_PINNED,
+ /** Specifies CUDA Device memory type. */
+ NVBUF_MEM_CUDA_DEVICE,
+ /** Specifies CUDA Unified memory type. */
+ NVBUF_MEM_CUDA_UNIFIED,
+ /** Specifies NVRM Surface Array type. Valid only for Jetson. */
+ NVBUF_MEM_SURFACE_ARRAY,
+ /** Specifies NVRM Handle type. Valid only for Jetson. */
+ NVBUF_MEM_HANDLE,
+ /** Specifies memory allocated by malloc(). */
+ NVBUF_MEM_SYSTEM,
+} NvBufSurfaceMemType;
+
+/**
+ * Defines display scan formats for NvBufSurface video planes.
+ */
+typedef enum
+{
+ /** Progessive scan formats. */
+ NVBUF_DISPLAYSCANFORMAT_PROGRESSIVE,
+ /** Interlaced scan formats. */
+ NVBUF_DISPLAYSCANFORMAT_INTERLACED,
+} NvBufSurfaceDisplayScanFormat;
+
+/**
+ * Holds plane wise parameters(extended) of a buffer.
+ */
+typedef struct NvBufSurfacePlaneParamsEx
+{
+ /** display scan format - progressive/interlaced. */
+ NvBufSurfaceDisplayScanFormat scanformat[NVBUF_MAX_PLANES];
+ /** offset of the second field for interlaced buffer. */
+ uint32_t secondfieldoffset[NVBUF_MAX_PLANES];
+ /** block height of the planes for blockLinear layout buffer. */
+ uint32_t blockheightlog2[NVBUF_MAX_PLANES];
+ /** physical address of allocated planes. */
+ uint32_t physicaladdress[NVBUF_MAX_PLANES];
+ /** flags associated with planes */
+ uint64_t flags[NVBUF_MAX_PLANES];
+
+ void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
+} NvBufSurfacePlaneParamsEx;
+
+/**
+ * Holds plane wise parameters of a buffer.
+ */
+typedef struct NvBufSurfacePlaneParams
+{
+ /** Holds the number of planes. */
+ uint32_t num_planes;
+ /** Holds the widths of planes. */
+ uint32_t width[NVBUF_MAX_PLANES];
+ /** Holds the heights of planes. */
+ uint32_t height[NVBUF_MAX_PLANES];
+ /** Holds the pitches of planes in bytes. */
+ uint32_t pitch[NVBUF_MAX_PLANES];
+ /** Holds the offsets of planes in bytes. */
+ uint32_t offset[NVBUF_MAX_PLANES];
+ /** Holds the sizes of planes in bytes. */
+ uint32_t psize[NVBUF_MAX_PLANES];
+ /** Holds the number of bytes occupied by a pixel in each plane. */
+ uint32_t bytesPerPix[NVBUF_MAX_PLANES];
+
+ void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
+} NvBufSurfacePlaneParams;
+
+
+/**
+ * Holds Chroma Subsampling parameters for NvBufSurface allocation.
+ */
+typedef struct NvBufSurfaceChromaSubsamplingParams
+{
+ /** location settings */
+ uint8_t chromaLocHoriz;
+ uint8_t chromaLocVert;
+} NvBufSurfaceChromaSubsamplingParams;
+
+/**
+ * Holds parameters required to allocate an \ref NvBufSurface.
+ */
+typedef struct NvBufSurfaceCreateParams {
+ /** Holds the GPU ID. Valid only for a multi-GPU system. */
+ uint32_t gpuId;
+ /** Holds the width of the buffer. */
+ uint32_t width;
+ /** Holds the height of the buffer. */
+ uint32_t height;
+ /** Holds the amount of memory to be allocated. Optional; if set, all other
+ parameters (width, height, etc.) are ignored. */
+ uint32_t size;
+ /** Holds a "contiguous memory" flag. If set, contiguous memory is allocated
+ for the batch. Valid only for CUDA memory types. */
+ bool isContiguous;
+ /** Holds the color format of the buffer. */
+ NvBufSurfaceColorFormat colorFormat;
+ /** Holds the surface layout. May be Block Linear (BL) or Pitch Linear (PL).
+ For a dGPU, only PL is valid. */
+ NvBufSurfaceLayout layout;
+ /** Holds the type of memory to be allocated. */
+ NvBufSurfaceMemType memType;
+} NvBufSurfaceCreateParams;
+
+/**
+ * Hold extended parameters required to allocate NvBufSurface.
+ * (Applicable for NvBufSurfaceAllocate API)
+ */
+typedef struct NvBufSurfaceAllocateParams {
+ /** Hold legacy NvBufSurface creation parameters */
+ NvBufSurfaceCreateParams params;
+ /** Display scan format */
+ NvBufSurfaceDisplayScanFormat displayscanformat;
+ /** Chroma Subsampling parameters */
+ NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
+ /** components tag to be used for memory allocation */
+ NvBufSurfaceTag memtag;
+ /** disable pitch padding allocation only applicable for cuda and system memory allocation
+ pitch would be width times bytes per pixel for the plane, for odd width it would be
+ multiple of 2, also note for some non standard video resolution cuda kernels may fail
+ due to unaligned pitch
+ */
+ bool disablePitchPadding;
+ /** Used void* from custom param for 64 bit machine, using other uint32_t param */
+ uint32_t _reservedParam;
+
+ void * _reserved[STRUCTURE_PADDING-1];
+} NvBufSurfaceAllocateParams;
+
+/**
+ * Hold the pointers of mapped buffer.
+ */
+typedef struct NvBufSurfaceMappedAddr {
+ /** Holds planewise pointers to a CPU mapped buffer. */
+ void * addr[NVBUF_MAX_PLANES];
+ /** Holds a pointer to a mapped EGLImage. */
+ void *eglImage;
+
+ void * _reserved[STRUCTURE_PADDING];
+} NvBufSurfaceMappedAddr;
+
+/**
+ * Hold the information(extended) of single buffer in the batch.
+ */
+typedef struct NvBufSurfaceParamsEx {
+ /** offset in bytes from the start of the buffer to the first valid byte.
+ (Applicable for NVBUF_MEM_HANDLE) */
+ int32_t startofvaliddata;
+ /** size of the valid data from the first to the last valid byte.
+ (Applicable for NVBUF_MEM_HANDLE) */
+ int32_t sizeofvaliddatainbytes;
+ /** chroma subsampling parameters.
+ (Applicable for NVBUF_MEM_SURFACE_ARRAY) */
+ NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
+ /** get buffer vpr information. */
+ bool is_protected;
+ /** plane wise extended info */
+ NvBufSurfacePlaneParamsEx planeParamsex;
+
+ void * _reserved[STRUCTURE_PADDING];
+} NvBufSurfaceParamsEx;
+
+/**
+ * Hold the information of single buffer in the batch.
+ */
+typedef struct NvBufSurfaceParams {
+ /** Holds the width of the buffer. */
+ uint32_t width;
+ /** Holds the height of the buffer. */
+ uint32_t height;
+ /** Holds the pitch of the buffer. */
+ uint32_t pitch;
+ /** Holds the color format of the buffer. */
+ NvBufSurfaceColorFormat colorFormat;
+ /** Holds BL or PL. For dGPU, only PL is valid. */
+ NvBufSurfaceLayout layout;
+ /** Holds a DMABUF FD. Valid only for \ref NVBUF_MEM_SURFACE_ARRAY and
+ \ref NVBUF_MEM_HANDLE type memory. */
+ uint64_t bufferDesc;
+ /** Holds the amount of allocated memory. */
+ uint32_t dataSize;
+ /** Holds a pointer to allocated memory. Not valid for
+ \ref NVBUF_MEM_SURFACE_ARRAY or \ref NVBUF_MEM_HANDLE. */
+ void * dataPtr;
+ /** Holds planewise information (width, height, pitch, offset, etc.). */
+ NvBufSurfacePlaneParams planeParams;
+ /** Holds pointers to mapped buffers. Initialized to NULL
+ when the structure is created. */
+ NvBufSurfaceMappedAddr mappedAddr;
+ /** pointers of extended parameters of single buffer in the batch.*/
+ NvBufSurfaceParamsEx *paramex;
+
+ void * _reserved[STRUCTURE_PADDING - 1];
+} NvBufSurfaceParams;
+
+/**
+ * Holds information about batched buffers.
+ */
+typedef struct NvBufSurface {
+ /** Holds a GPU ID. Valid only for a multi-GPU system. */
+ uint32_t gpuId;
+ /** Holds the batch size. */
+ uint32_t batchSize;
+ /** Holds the number valid and filled buffers. Initialized to zero when
+ an instance of the structure is created. */
+ uint32_t numFilled;
+ /** Holds an "is contiguous" flag. If set, memory allocated for the batch
+ is contiguous. */
+ bool isContiguous;
+ /** Holds type of memory for buffers in the batch. */
+ NvBufSurfaceMemType memType;
+ /** Holds a pointer to an array of batched buffers. */
+ NvBufSurfaceParams *surfaceList;
+
+ void * _reserved[STRUCTURE_PADDING];
+} NvBufSurface;
+
+/**
+ * Holds plane parameters to map the buffer received from another process.
+ */
+typedef struct NvBufSurfaceMapPlaneParams
+{
+ /** Holds the widths of planes */
+ uint32_t width;
+ /** Holds the heights of planes */
+ uint32_t height;
+ /** Holds the pitches of planes in bytes */
+ uint32_t pitch;
+ /** Holds the offsets of planes in bytes */
+ uint32_t offset;
+ /** Holds the sizes of planes in bytes */
+ uint32_t psize;
+ /** Holds offset of the second field for interlaced buffer */
+ uint32_t secondfieldoffset;
+ /** Holds block height of the planes for blockLinear layout buffer */
+ uint32_t blockheightlog2;
+ /** Holds flags associated with the planes */
+ uint64_t flags;
+ /** Reserved */
+ uint8_t reserved[64];
+} NvBufSurfaceMapPlaneParams;
+
+/**
+ * Holds buffer parameters to map the buffer received from another process.
+ */
+typedef struct NvBufSurfaceMapParams {
+ /** Holds the number of planes. */
+ uint32_t num_planes;
+ /** Holds a GPU ID */
+ uint32_t gpuId;
+ /** Holds a DMABUF FD */
+ uint64_t fd;
+ /** Holds the total size of allocated memory */
+ uint32_t totalSize;
+ /** Holds type of memory */
+ NvBufSurfaceMemType memType;
+ /** Holds BL or PL layout */
+ NvBufSurfaceLayout layout;
+ /** Holds display scan format */
+ NvBufSurfaceDisplayScanFormat scanformat;
+ /** Holds the color format */
+ NvBufSurfaceColorFormat colorFormat;
+ /** Holds chroma subsampling parameters */
+ NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
+ /** Holds plane parameters */
+ NvBufSurfaceMapPlaneParams planes[NVBUF_MAX_PLANES];
+ /** Reserved */
+ uint8_t reserved[64];
+} NvBufSurfaceMapParams;
+
+/**
+ * \brief Allocates a batch of buffers.
+ *
+ * Allocates memory for \a batchSize buffers and returns a pointer to an
+ * allocated \ref NvBufSurface. The \a params structure must have
+ * the allocation parameters of a single buffer. If \a params.size
+ * is set, a buffer of that size is allocated, and all other
+ * parameters (width, height, color format, etc.) are ignored.
+ *
+ * Call NvBufSurfaceDestroy() to free resources allocated by this function.
+ *
+ * @param[out] surf An indirect pointer to the allocated batched
+ * buffers.
+ * @param[in] batchSize Batch size of buffers.
+ * @param[in] params A pointer to an \ref NvBufSurfaceCreateParams
+ * structure.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceCreate (NvBufSurface **surf, uint32_t batchSize,
+ NvBufSurfaceCreateParams *params);
+
+/**
+ * \brief Allocate batch of buffers. (Using extended buffer allocation parameters)
+ *
+ * Allocates memory for batchSize buffers and returns in *surf a pointer to allocated NvBufSurface.
+ * params structure should have allocation parameters of single buffer. If size field in
+ * params is set, buffer of that size will be allocated and all other
+ * parameters (w, h, color format etc.) will be ignored.
+ *
+ * Use NvBufSurfaceDestroy to free all the resources.
+ *
+ * @param[out] surf pointer to allocated batched buffers.
+ * @param[in] batchSize batch size of buffers.
+ * @param[in] paramsext pointer to NvBufSurfaceAllocateParams structure.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurfaceAllocate (NvBufSurface **surf, uint32_t batchSize,
+ NvBufSurfaceAllocateParams *paramsext);
+
+/**
+ * Free the batched buffers previously allocated through NvBufSurfaceCreate.
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface to be freed.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceDestroy (NvBufSurface *surf);
+
+/**
+ * \brief Maps hardware batched buffers to the HOST or CPU address space.
+ *
+ * Valid for \ref NVBUF_MEM_CUDA_UNIFIED type memory for dGPU and
+ * \ref NVBUF_MEM_SURFACE_ARRAY and \ref NVBUF_MEM_HANDLE type memory for
+ * Jetson.
+ *
+ * This function fills an array of pointers at
+ * \a surf->surfaceList->mappedAddr->addr.
+ * \a surf is a pointer to an \ref NvBufSurface.
+ * \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
+ * \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
+ * \a addr is declared as an array of pointers to void, and holds pointers
+ * to the buffers.
+ *
+ * The client must call NvBufSurfaceSyncForCpu() with the virtual address
+ * populated by this function before accessing mapped memory in the CPU.
+ *
+ * After memory mapping is complete, mapped memory modification
+ * must be coordinated between the CPU and the hardware device as
+ * follows:
+ * - CPU: If the CPU modifies mapped memory, the client must call
+ * NvBufSurfaceSyncForDevice() before any hardware device accesses the memory.
+ * - Hardware device: If a hardware device modifies mapped memory, the client
+ * must call NvBufSurfaceSyncForCpu() before the CPU accesses the memory.
+ *
+ * Use NvBufSurfaceUnMap() to unmap buffer(s) and release any resource.
+ *
+ * @param[in,out] surf A pointer to an NvBufSurface structure. The function
+ * stores pointers to the buffers in a descendant of this
+ * structure; see the notes above.
+ * @param[in] index Index of a buffer in the batch. -1 refers to all buffers
+ * in the batch.
+ * @param[in] plane Index of a plane in buffer. -1 refers to all planes
+ * in the buffer.
+ * @param[in] type A flag for mapping type.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceMap (NvBufSurface *surf, int index, int plane, NvBufSurfaceMemMapFlags type);
+
+/**
+ * \brief Unmaps previously mapped buffer(s).
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch. -1 indicates
+ * all buffers in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 indicates
+ * all planes in the buffer.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceUnMap (NvBufSurface *surf, int index, int plane);
+
+/**
+ * \brief Copies the content of source batched buffer(s) to destination
+ * batched buffer(s).
+ *
+ * You can use this function to copy source buffer(s) of one memory type
+ * to destination buffer(s) of another memory type,
+ * e.g. CUDA host to CUDA device, malloc'ed memory to CUDA device, etc.
+ *
+ * The source and destination \ref NvBufSurface objects must have same
+ * buffer and batch size.
+ *
+ * @param[in] srcSurf A pointer to the source NvBufSurface structure.
+ * @param[in] dstSurf A pointer to the destination NvBufSurface structure.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
+
+/**
+ * \brief Copies the NvBufSurface plane memory content to a raw buffer plane for a specific
+ * batched buffer.
+ *
+ * This function can be used to copy plane memory content from source raw buffer pointer
+ * to specific destination batch buffer of supported memory type.
+ *
+ * @param[in] surf pointer to NvBufSurface structure.
+ * @param[in] index index of buffer in the batch.
+ * @param[in] plane index of plane in buffer.
+ * @param[in] out_width aligned width of the raw data plane.
+ * @param[in] out_height aligned height of the raw data plane.
+ * @param[in] ptr pointer to the output raw plane data.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurface2Raw (NvBufSurface *Surf, unsigned int index, unsigned int plane, unsigned int outwidth, unsigned int outheight, unsigned char *ptr);
+
+/**
+ * \brief Copies the raw buffer plane memory content to the NvBufSurface plane memory of a specific
+ * batched buffer.
+ *
+ * This function can be used to copy plane memory content from batch buffer
+ * to specific destination raw buffer pointer.
+ *
+ * @param[in] ptr pointer to the input raw plane data.
+ * @param[in] index index of buffer in the batch.
+ * @param[in] plane index of plane in buffer.
+ * @param[in] in_width aligned width of the raw data plane.
+ * @param[in] in_height aligned height of the raw data plane.
+ * @param[in] surf pointer to NvBufSurface structure.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int Raw2NvBufSurface (unsigned char *ptr, unsigned int index, unsigned int plane, unsigned int inwidth, unsigned int inheight, NvBufSurface *Surf);
+
+/**
+ * Syncs the HW memory cache for the CPU.
+ *
+ * Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
+ * \ref NVBUF_MEM_HANDLE.
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index Index of the buffer in the batch. -1 refers to
+ * all buffers in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 refers to all planes
+ * in the buffer.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceSyncForCpu (NvBufSurface *surf, int index, int plane);
+
+/**
+ * \brief Syncs the hardware memory cache for the device.
+ *
+ * Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
+ * \ref NVBUF_MEM_HANDLE.
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch. -1 refers to all buffers
+ * in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 refers to all planes
+ * in the buffer.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceSyncForDevice (NvBufSurface *surf, int index, int plane);
+
+/**
+ * \brief Gets the \ref NvBufSurface from the DMABUF FD.
+ *
+ * @param[in] dmabuf_fd DMABUF FD of the buffer.
+ * @param[out] buffer A pointer to the NvBufSurface.
+ *
+ * @return 0 for success, or -1 otherwise.
+ */
+int NvBufSurfaceFromFd (int dmabuf_fd, void **buffer);
+
+/**
+ * \brief Fills each byte of the buffer(s) in an \ref NvBufSurface with a
+ * provided value.
+ *
+ * You can also use this function to reset the buffer(s) in the batch.
+ *
+ * @param[in] surf A pointer to the NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch. -1 refers to all buffers
+ * in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 refers to all planes
+ * in the buffer.
+ * @param[in] value The value to be used as fill.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceMemSet (NvBufSurface *surf, int index, int plane, uint8_t value);
+
+/**
+ * \brief Creates an EGLImage from the memory of one or more
+ * \ref NvBufSurface buffers.
+ *
+ * Only memory type \ref NVBUF_MEM_SURFACE_ARRAY is supported.
+ *
+ * This function returns the created EGLImage by storing its address at
+ * \a surf->surfaceList->mappedAddr->eglImage. (\a surf is a pointer to
+ * an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
+ * \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
+ * \a eglImage is declared as a pointer to void, and holds an
+ * EGLImageKHR.)
+ *
+ * You can use this function in scenarios where a CUDA operation on Jetson
+ * hardware memory (identified by \ref NVBUF_MEM_SURFACE_ARRAY) is required.
+ * The EGLImageKHR struct provided by this function can then be registered
+ * with CUDA for further CUDA operations.
+ *
+ * @param[in,out] surf A pointer to an NvBufSurface structure. The function
+ * stores a pointer to the created EGLImage in
+ * a descendant of this structure; see the notes above.
+ * @param[in] index Index of a buffer in the batch. -1 specifies all buffers
+ * in the batch.
+ *
+ * @return 0 for success, or -1 otherwise.
+ */
+int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
+
+/**
+ * \brief Destroys the previously created EGLImage object(s).
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index The index of a buffer in the batch. -1 specifies all
+ * buffers in the batch.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
+
+/**
+ * \brief Import parameters received from another process and create hardware buffer.
+ *
+ * Calling process must need to call NvBufferDestroy() to remove reference count for
+ * hardware buffer handle of the imported DMA buffer.
+ *
+ * @param[out] out_nvbuf_surf Pointer to hardware buffer.
+ * @param[in] in_params Parameters to create hardware buffer.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurfaceImport (NvBufSurface **out_nvbuf_surf, const NvBufSurfaceMapParams *in_params);
+
+/**
+ * \brief Get buffer information to map the buffer in another process.
+ *
+ * @param[in] surf Pointer to NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch.
+ * @param[out] params Pointer to NvBufSurfaceMapParams information of the buffer.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurfaceGetMapParams (const NvBufSurface *surf, int index, NvBufSurfaceMapParams *params);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* NVBUFSURFACE_H_ */
diff --git a/push_info.txt b/push_info.txt
new file mode 100644
index 0000000..0363cca
--- /dev/null
+++ b/push_info.txt
@@ -0,0 +1 @@
+jetson_35.6
diff --git a/v4l2_nv_extensions.h b/v4l2_nv_extensions.h
new file mode 100644
index 0000000..d22c860
--- /dev/null
+++ b/v4l2_nv_extensions.h
@@ -0,0 +1,2731 @@
+/*
+ * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file contains amendments to the V4L2 headers made after the
+ * supported kernel version and NVIDIA extensions.
+ */
+
+#ifndef __V4L2_NV_EXTENSIONS_H__
+#define __V4L2_NV_EXTENSIONS_H__
+
+/**
+ * @file
+ * NVIDIA V4L2 API Extensions
+ *
+ * @b Description: This file declares NVIDIA V4L2 extensions,
+ * controls and structures.
+ */
+
+/**
+ *
+ * @defgroup ee_extensions_group V4L2 NV Extensions API
+ *
+ * This file declares NVIDIA V4L2 extensions, controls, and structures.
+ *
+ */
+/**
+ * Defines V4L2 pixel format for DIVX.
+ */
+#define V4L2_PIX_FMT_DIVX4 v4l2_fourcc('D', 'V', 'X', '4')
+
+#define V4L2_PIX_FMT_DIVX5 v4l2_fourcc('D', 'V', 'X', '5')
+/**
+ * Defines V4L2 pixel format for H.265.
+ */
+#define V4L2_PIX_FMT_H265 v4l2_fourcc('H', '2', '6', '5')
+
+/**
+ * Defines the V4L2 pixel format for VP9.
+ */
+#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0')
+
+/**
+ * Defines the V4L2 pixel format for AV1.
+ */
+#define V4L2_PIX_FMT_AV1 v4l2_fourcc('A', 'V', '1', '0')
+
+/**
+ * Defines the V4L2 pixel format for representing single plane 10-bit Y/CbCr 4:2:0 decoder data.
+ */
+#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* Y/CbCr 4:2:0, 10 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing semi-planar 10-bit Y/CbCr 4:2:0 decoder data.
+ */
+#define V4L2_PIX_FMT_P010M v4l2_fourcc('P', 'M', '1', '0') /* Y/CbCr 4:2:0, 10 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing single plane 12-bit Y/CbCr 4:2:0 decoder data.
+ */
+#define V4L2_PIX_FMT_P012 v4l2_fourcc('P', '0', '1', '2') /* Y/CbCr 4:2:0, 12 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing semi-planar 12-bit Y/CbCr 4:2:0 decoder data.
+ */
+#define V4L2_PIX_FMT_P012M v4l2_fourcc('P', 'M', '1', '2') /* Y/CbCr 4:2:0, 12 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing semi-planar 8-bit Y/CbCr 4:4:4 decoder data.
+ */
+#define V4L2_PIX_FMT_NV24M v4l2_fourcc('N', 'M', '2', '4') /* Y/CbCr 4:4:4, 8 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing semi-planar 10-bit Y/CbCr 4:4:4 decoder data.
+ */
+#define V4L2_PIX_FMT_NV24_10LE v4l2_fourcc('N', 'V', '1', '0') /* Y/CbCr 4:4:4, 10 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing planar 10-bit Y/CbCr 4:4:4 decoder data.
+ */
+#define V4L2_PIX_FMT_YUV444_10LE v4l2_fourcc('P', '4', '1', '0') /* Y/Cb/Cr 4:4:4, 10 bits per channel */
+
+/**
+ * Defines the V4L2 pixel format for representing planar 12-bit Y/CbCr 4:4:4 decoder data.
+ */
+#define V4L2_PIX_FMT_YUV444_12LE v4l2_fourcc('P', '4', '1', '2') /* Y/Cb/Cr 4:4:4, 12 bits per channel */
+
+/** @cond UNUSED */
+/* >> The declarations from here to the next endcond statement are not
+ * >> currently implemented. DO NOT USE. */
+
+#define V4L2_PIX_FMT_YUV422RM v4l2_fourcc('4', '2', 'R', 'M')
+
+
+#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /** H264 parsed slices. */
+#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /** VP8 parsed frames. */
+
+#define V4L2_CTRL_FLAG_CAN_STORE 0x0200
+
+/** @endcond */
+
+/**
+ * Defines the V4L2 event type for decoder resolution event change.
+ */
+#define V4L2_EVENT_RESOLUTION_CHANGE 5
+
+/** @cond UNUSED */
+/* >> The declarations from here to the next endcond statement are not
+ * >> currently implemented. DO NOT USE. */
+
+/*---------------Below are changes from the v4l2-controls.h----------------------*/
+
+#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+383)
+#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+384)
+#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+385)
+#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM (V4L2_CID_MPEG_BASE+386)
+#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM (V4L2_CID_MPEG_BASE+387)
+
+#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR (V4L2_CID_MPEG_BASE+512)
+
+/** @endcond */
+
+/**
+ * Defines the control ID to set the H.265 encoder profile.
+ *
+ * A v4l2_mpeg_video_h265_profile must be passed.
+ */
+#define V4L2_CID_MPEG_VIDEO_H265_PROFILE (V4L2_CID_MPEG_BASE+513)
+
+/**
+ * Defines the possible profiles for H.265 encoder.
+ */
+enum v4l2_mpeg_video_h265_profile {
+ /** H.265 Main profile. */
+ V4L2_MPEG_VIDEO_H265_PROFILE_MAIN = 0,
+ /** H.265 Main10 profile. */
+ V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10 = 1,
+ /** H.265 MainStillPicture profile. */
+ V4L2_MPEG_VIDEO_H265_PROFILE_MAINSTILLPICTURE = 2,
+ /** H.265 FREXT profile. */
+ V4L2_MPEG_VIDEO_H265_PROFILE_FREXT = 3,
+};
+
+/**
+ * Defines the control ID to set the encoder IDR frame interval.
+ * Must be used with \c VIDIOC_S_EXT_CTRLS IOCTL.
+ */
+#define V4L2_CID_MPEG_VIDEO_IDR_INTERVAL (V4L2_CID_MPEG_BASE+514)
+
+/** @cond UNUSED */
+/* >> The declarations from here to the next endcond statement are not
+ * >> currently implemented. DO NOT USE. */
+
+/* Complex controls */
+
+#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
+#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
+#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
+#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
+#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
+#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
+
+#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
+#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
+#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
+#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
+#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
+#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
+#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
+struct v4l2_ctrl_h264_sps {
+ __u8 profile_idc;
+ __u8 constraint_set_flags;
+ __u8 level_idc;
+ __u8 seq_parameter_set_id;
+ __u8 chroma_format_idc;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_frame_num_minus4;
+ __u8 pic_order_cnt_type;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __s32 offset_for_non_ref_pic;
+ __s32 offset_for_top_to_bottom_field;
+ __u8 num_ref_frames_in_pic_order_cnt_cycle;
+ __s32 offset_for_ref_frame[255];
+ __u8 max_num_ref_frames;
+ __u16 pic_width_in_mbs_minus1;
+ __u16 pic_height_in_map_units_minus1;
+ __u8 flags;
+};
+
+#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
+#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
+#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
+#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
+#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
+#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
+#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
+#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT 0x0080
+struct v4l2_ctrl_h264_pps {
+ __u8 pic_parameter_set_id;
+ __u8 seq_parameter_set_id;
+ __u8 num_slice_groups_minus1;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
+ __u8 weighted_bipred_idc;
+ __s8 pic_init_qp_minus26;
+ __s8 pic_init_qs_minus26;
+ __s8 chroma_qp_index_offset;
+ __s8 second_chroma_qp_index_offset;
+ __u8 flags;
+};
+
+struct v4l2_ctrl_h264_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+};
+
+struct v4l2_h264_weight_factors {
+ __s8 luma_weight[32];
+ __s8 luma_offset[32];
+ __s8 chroma_weight[32][2];
+ __s8 chroma_offset[32][2];
+};
+
+struct v4l2_h264_pred_weight_table {
+ __u8 luma_log2_weight_denom;
+ __u8 chroma_log2_weight_denom;
+ struct v4l2_h264_weight_factors weight_factors[2];
+};
+
+#define V4L2_SLICE_FLAG_FIELD_PIC 0x01
+#define V4L2_SLICE_FLAG_BOTTOM_FIELD 0x02
+#define V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x04
+#define V4L2_SLICE_FLAG_SP_FOR_SWITCH 0x08
+struct v4l2_ctrl_h264_slice_param {
+ /** Holds the size in bytes, including the header. */
+ __u32 size;
+ /** Holds the offset in bits to slice_data() from the beginning of this slice. */
+ __u32 header_bit_size;
+
+ __u16 first_mb_in_slice;
+ __u8 slice_type;
+ __u8 pic_parameter_set_id;
+ __u8 colour_plane_id;
+ __u16 frame_num;
+ __u16 idr_pic_id;
+ __u16 pic_order_cnt_lsb;
+ __s32 delta_pic_order_cnt_bottom;
+ __s32 delta_pic_order_cnt0;
+ __s32 delta_pic_order_cnt1;
+ __u8 redundant_pic_cnt;
+
+ struct v4l2_h264_pred_weight_table pred_weight_table;
+ /* Size in bits of dec_ref_pic_marking() syntax element. */
+ __u32 dec_ref_pic_marking_bit_size;
+ /* Size in bits of pic order count syntax. */
+ __u32 pic_order_cnt_bit_size;
+
+ __u8 cabac_init_idc;
+ __s8 slice_qp_delta;
+ __s8 slice_qs_delta;
+ __u8 disable_deblocking_filter_idc;
+ __s8 slice_alpha_c0_offset_div2;
+ __s8 slice_beta_offset_div2;
+ __u32 slice_group_change_cycle;
+
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ /* Entries on each list are indices
+ * into v4l2_ctrl_h264_decode_param.dpb[]. */
+ __u8 ref_pic_list0[32];
+ __u8 ref_pic_list1[32];
+
+ __u8 flags;
+};
+
+/** Defines whether the v4l2_h264_dpb_entry structure is used.
+If not set, this entry is unused for reference. */
+#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x01
+#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x02
+struct v4l2_h264_dpb_entry {
+ __u32 buf_index; /**< v4l2_buffer index. */
+ __u16 frame_num;
+ __u16 pic_num;
+ /** @note `v4l2_buffer.field` specifies this field. */
+ __s32 top_field_order_cnt;
+ __s32 bottom_field_order_cnt;
+ __u8 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
+};
+
+struct v4l2_ctrl_h264_decode_param {
+ __u32 num_slices;
+ __u8 idr_pic_flag;
+ __u8 nal_ref_idc;
+ __s32 top_field_order_cnt;
+ __s32 bottom_field_order_cnt;
+ __u8 ref_pic_list_p0[32];
+ __u8 ref_pic_list_b0[32];
+ __u8 ref_pic_list_b1[32];
+ struct v4l2_h264_dpb_entry dpb[16];
+};
+
+#define V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED 0x01
+#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA 0x04
+struct v4l2_vp8_sgmnt_hdr {
+ __u8 segment_feature_mode;
+
+ __s8 quant_update[4];
+ __s8 lf_update[4];
+ __u8 segment_probs[3];
+
+ __u8 flags;
+};
+
+#define V4L2_VP8_LF_HDR_ADJ_ENABLE 0x01
+#define V4L2_VP8_LF_HDR_DELTA_UPDATE 0x02
+struct v4l2_vp8_loopfilter_hdr {
+ __u8 type;
+ __u8 level;
+ __u8 sharpness_level;
+ __s8 ref_frm_delta_magnitude[4];
+ __s8 mb_mode_delta_magnitude[4];
+
+ __u8 flags;
+};
+
+struct v4l2_vp8_quantization_hdr {
+ __u8 y_ac_qi;
+ __s8 y_dc_delta;
+ __s8 y2_dc_delta;
+ __s8 y2_ac_delta;
+ __s8 uv_dc_delta;
+ __s8 uv_ac_delta;
+ __u16 dequant_factors[4][3][2];
+};
+
+struct v4l2_vp8_entropy_hdr {
+ __u8 coeff_probs[4][8][3][11];
+ __u8 y_mode_probs[4];
+ __u8 uv_mode_probs[3];
+ __u8 mv_probs[2][19];
+};
+
+#define V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL 0x01
+#define V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME 0x02
+#define V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF 0x04
+struct v4l2_ctrl_vp8_frame_hdr {
+ /** 0: keyframe, 1: not a keyframe. */
+ __u8 key_frame;
+ __u8 version;
+
+ /** Populated also if not a key frame. */
+ __u16 width;
+ __u8 horizontal_scale;
+ __u16 height;
+ __u8 vertical_scale;
+
+ struct v4l2_vp8_sgmnt_hdr sgmnt_hdr;
+ struct v4l2_vp8_loopfilter_hdr lf_hdr;
+ struct v4l2_vp8_quantization_hdr quant_hdr;
+ struct v4l2_vp8_entropy_hdr entropy_hdr;
+
+ __u8 sign_bias_golden;
+ __u8 sign_bias_alternate;
+
+ __u8 prob_skip_false;
+ __u8 prob_intra;
+ __u8 prob_last;
+ __u8 prob_gf;
+
+ __u32 first_part_size;
+ /**
+ * Holds the offset in bits of the MB data in the first partition,
+ * i.e. bit offset starting from first_part_offset.
+ */
+ __u32 first_part_offset;
+ __u32 macroblock_bit_offset;
+
+ __u8 num_dct_parts;
+ __u32 dct_part_sizes[8];
+
+ __u8 bool_dec_range;
+ __u8 bool_dec_value;
+ __u8 bool_dec_count;
+
+ /** Holds the v4l2_buffer index of the last reference frame. */
+ __u32 last_frame;
+ /** Holds the v4l2_buffer index of the golden reference frame. */
+ __u32 golden_frame;
+ /** Holds the v4l2_buffer index of the alt reference frame. */
+ __u32 alt_frame;
+
+ __u8 flags;
+};
+
+/** @endcond */
+
+/*---------------Add below NVIDIA specific extensions ----------------------*/
+
+/**
+ * @defgroup V4L2Dec V4L2 Video Decoder
+ *
+ * @brief NVIDIA V4L2 Video Decoder Description and Extensions
+ *
+ * The video decoder device node is
+ *
+ * /dev/nvhost-nvdec
+ *
+ * ### Supported Pixel Formats
+ * OUTPUT PLANE | CAPTURE PLANE | PLATFORM
+ * :----------------: | :----------------: | :--------------------:
+ * V4L2_PIX_FMT_H264 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_H265 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_VP8 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_VP9 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_MPEG1 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_MPEG2 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_MPEG4 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_DIVX4 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_DIVX5 | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_JPEG | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_MJPEG | V4L2_PIX_FMT_NV12M | T210, T186, T194, T234
+ * V4L2_PIX_FMT_AV1 | V4L2_PIX_FMT_NV12M | T234
+ *
+ * ### Supported Memory Types
+ * MEMORY | OUTPUT PLANE | CAPTURE PLANE
+ * :------------------: | :----------: | :-----------:
+ * V4L2_MEMORY_MMAP | Y | Y
+ * V4L2_MEMORY_DMABUF | N | Y
+ * V4L2_MEMORY_USERPTR | Y | N
+ *
+ * ### Supported Controls
+ * - #V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT
+ * - #V4L2_CID_MPEG_VIDEO_DISABLE_DPB
+ * - #V4L2_CID_MPEG_VIDEO_ERROR_REPORTING
+ * - #V4L2_CID_MPEG_VIDEO_SKIP_FRAMES
+ * - V4L2_CID_MIN_BUFFERS_FOR_CAPTURE (Get the minimum buffers to be allocated on capture plane.
+ * Read only. Valid after #V4L2_EVENT_RESOLUTION_CHANGE)
+ * - #V4L2_CID_MPEG_VIDEODEC_INPUT_METADATA
+ * - #V4L2_CID_MPEG_VIDEODEC_METADATA
+ * - #V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE
+ * - #V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID
+ * - #V4L2_CID_MPEG_VIDEODEC_DROP_FRAME_INTERVAL
+ *
+ * ### Supported Events
+ * Event | Purpose
+ * ----------------------------- | :----------------------------:
+ * #V4L2_EVENT_RESOLUTION_CHANGE | Resolution of the stream has changed.
+ *
+ * ### Handling Resolution Change Events
+ * When the decoder generates a \c V4L2_EVENT_RESOLUTION_CHANGE event, the
+ * application calls \c STREAMOFF on the capture plane to tell the decoder to
+ * deallocate the current buffers by calling REQBUF with count zero, get
+ * the new capture plane format, and then proceed with setting up the buffers
+ * for the capture plane.
+ *
+ * In case of decoder, the buffer format might differ from the display resolution.
+ * The application must use \c VIDIOC_G_CROP to get the display resolution.
+ *
+ * ### EOS Handling
+ * The following sequence must be followed for sending EOS and recieving EOS
+ * from the decoder.
+ * -# Send EOS to decoder by queueing on the output plane a buffer with
+ * bytesused = 0 for the 0th plane (`v4l2_buffer.m.planes[0].bytesused = 0`).
+ * -# Dequeues buffers on the output plane until it gets a buffer with bytesused = 0
+ * for the 0th plane (`v4l2_buffer.m.planes[0].bytesused == 0`)
+ * -# Dequeues buffers on the capture plane until it gets a buffer with bytesused = 0
+ * for the 0th plane.
+ *
+ * ### Decoder Input Frame Metadata
+ * Decoder supports reporting stream header parsing error info as input frame metadata.
+ * See \c V4L2_CID_MPEG_VIDEO_ERROR_REPORTING, \c V4L2_CID_MPEG_VIDEODEC_INPUT_METADATA
+ * and \c v4l2_ctrl_video_metadata for more information.
+ *
+ * ### Decoder Output Frame Metadata
+ * Decoder supports reporting frame related metadata, including error reports and
+ * DPB info. See \c V4L2_CID_MPEG_VIDEO_ERROR_REPORTING, \c V4L2_CID_MPEG_VIDEODEC_METADATA
+ * and \c v4l2_ctrl_video_metadata for more information.
+ *
+ * @note Currently, V4L2 plugins do not support odd resolution.
+ * @{
+ * @ingroup ee_extensions_group
+ */
+
+/**
+ * Defines the Control ID to indicate to the decoder that the input
+ * buffers do not contain complete buffers.
+ *
+ * @note This control must be set in case of frames containing multiple slices
+ * when the input buffers do not contain all the slices of the frame.
+ *
+ * A boolean value must be supplied with this control.
+ *
+ */
+#define V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT (V4L2_CID_MPEG_BASE+515)
+
+/**
+ * Defines the Control ID to disable decoder DPB management.
+ *
+ * @note This only works for streams having a single reference frame.
+ *
+ * A boolean value must be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_DISABLE_DPB (V4L2_CID_MPEG_BASE+516)
+
+/**
+ * Defines the Control ID to enable decoder error and metadata reporting.
+ *
+ * A boolean value must be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_ERROR_REPORTING (V4L2_CID_MPEG_BASE+517)
+
+/**
+ * Defines the Control ID to set the skip frames property of the decoder.
+ *
+ * Decoder must be configured to skip certain types of frames. One
+ * \c v4l2_skip_frames_type must be passed.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ * This control ID is supported only for H264.
+ */
+#define V4L2_CID_MPEG_VIDEO_SKIP_FRAMES (V4L2_CID_MPEG_BASE+518)
+
+/**
+ * Defines the Control ID to get the decoder output metadata.
+ *
+ * @note Metadata reporting must be enabled using
+ * #V4L2_CID_MPEG_VIDEO_ERROR_REPORTING IOCTL for this.
+ *
+ * A pointer to a valid \c v4l2_ctrl_video_metadata structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be read after dequeueing a buffer successfully from
+ * the capture plane. The values in the structure are valid until the buffer is queued
+ * again.
+ */
+#define V4L2_CID_MPEG_VIDEODEC_METADATA (V4L2_CID_MPEG_BASE+519)
+
+/**
+ * Defines the Control ID to get the decoder input header error metadata.
+ *
+ * @note Metadata reporting must be enabled using
+ * #V4L2_CID_MPEG_VIDEO_ERROR_REPORTING IOCTL for this.
+ *
+ * A pointer to a valid \c v4l2_ctrl_video_metadata structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be read after dequeueing a buffer successfully from
+ * the output plane. The values in the structure are valid until the buffer is queued
+ * again.
+ */
+#define V4L2_CID_MPEG_VIDEODEC_INPUT_METADATA (V4L2_CID_MPEG_BASE+520)
+
+/**
+ * Defines the Control ID to check if display data is present.
+ *
+ * This control returns true if HDR metadata is present in the stream.
+ *
+ */
+#define V4L2_CID_VIDEODEC_DISPLAYDATA_PRESENT (V4L2_CID_MPEG_BASE+521)
+
+/**
+ * Defines the Control ID to get display data if V4L2_CID_VIDEODEC_DISPLAYDATA_PRESENT returns true.
+ *
+ * This control returns display data such as display_primaries, white_point and
+ * display_parameter_luminance required for display module.
+ *
+ */
+#define V4L2_CID_VIDEODEC_HDR_MASTERING_DISPLAY_DATA (V4L2_CID_MPEG_BASE+522)
+
+/**
+ * Defines the Control ID to get Sample Aspect Ratio width for decoding.
+ *
+ * This control returns unsigned integer of Sample Aspect Ratio width.
+ *
+ * @attention This control must be set after receiving V4L2_EVENT_RESOLUTION_CHANGE.
+ *
+ */
+#define V4L2_CID_MPEG_VIDEODEC_SAR_WIDTH (V4L2_CID_MPEG_BASE+580)
+
+/**
+ * Defines the Control ID to get Sample Aspect Ratio height for decoding.
+ *
+ * This control returns unsigned integer of Sample Aspect Ratio height.
+ *
+ * @attention This control must be set after receiving V4L2_EVENT_RESOLUTION_CHANGE.
+ *
+ */
+#define V4L2_CID_MPEG_VIDEODEC_SAR_HEIGHT (V4L2_CID_MPEG_BASE+581)
+
+ /**
+ * Defines the Control ID to embed the SEI data coming from upstream plugins.
+ *
+ * This control sets the SEI payload size and SEI data pointer for encoder.
+ *
+ * @attention This control must be set after receiving V4L2_EVENT_RESOLUTION_CHANGE.
+ *
+ */
+ #define V4L2_CID_MPEG_VIDEOENC_DS_SEI_DATA (V4L2_CID_MPEG_BASE+571)
+
+/** @} */
+
+/**
+ * @defgroup V4L2Conv V4L2 Video Converter
+ *
+ * @brief NVIDIA V4L2 Video Converter Description and Extensions
+ *
+ * Use the video converter for color space conversion, scaling, and
+ * conversion between hardware buffer memory (\c V4L2_MEMORY_MMAP/\c
+ * V4L2_MEMORY_DMABUF), software buffer memory (\c V4L2_MEMORY_USERPTR), and
+ * other operations such as cropping, flipping/rotating, and
+ * temporal noise reduction (TNR).
+ * The video converter device node is \c "/dev/nvhost-vic".
+ *
+ * ### Supported Pixelformats
+ * PIXEL FORMAT | PIXEL FORMAT
+ * :---------------------: | :--------------:
+ * V4L2_PIX_FMT_YUV444M | V4L2_PIX_FMT_YVU422M
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_YVU420M
+ * V4L2_PIX_FMT_NV12M | V4L2_PIX_FMT_GREY
+ * V4L2_PIX_FMT_YUYV | V4L2_PIX_FMT_YVYU
+ * V4L2_PIX_FMT_UYVY | V4L2_PIX_FMT_VYUY
+ * V4L2_PIX_FMT_ABGR32 | V4L2_PIX_FMT_XBGR32
+ *
+ * ### Supported Pixel Formats for TNR
+ * PIXEL FORMAT | PIXEL FORMAT
+ * :---------------------: | :--------------:
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_NV12M
+ * V4L2_PIX_FMT_UYVY | V4L2_PIX_FMT_YUYV
+ *
+ * ### Supported Memory Types
+ * MEMORY | OUTPUT PLANE | CAPTURE PLANE
+ * :------------------: | :----------: | :-----------:
+ * V4L2_MEMORY_MMAP | Y | Y
+ * V4L2_MEMORY_DMABUF | Y | Y
+ * V4L2_MEMORY_USERPTR | Y | Y
+ *
+ * ### Supported Controls
+ * - #V4L2_CID_VIDEO_CONVERT_OUTPUT_PLANE_LAYOUT
+ * - #V4L2_CID_VIDEO_CONVERT_CAPTURE_PLANE_LAYOUT
+ * - #V4L2_CID_VIDEO_CONVERT_FLIP_METHOD
+ * - #V4L2_CID_VIDEO_CONVERT_INTERPOLATION_METHOD
+ * - #V4L2_CID_VIDEO_CONVERT_TNR_ALGORITHM
+ * - #V4L2_CID_VIDEO_CONVERT_YUV_RESCALE_METHOD
+ *
+ * ### Cropping
+ * Video converter supports cropping using \c VIDIOC_S_SELECTION IOCTL with type
+ * \c V4L2_BUF_TYPE_VIDEO_CAPTURE and target \c V4L2_SEL_TGT_CROP. This must
+ * be set before requesting buffers on either plane.
+ *
+ * ### EOS Handling
+ * The following sequence must be followed for sending EOS and recieving EOS
+ * from the converter.
+ * -# Send EOS to converter by queueing on the output plane a buffer with
+ * bytesused = 0 for the 0th plane (`v4l2_buffer.m.planes[0].bytesused = 0`).
+ * -# Dequeues buffers on the capture plane until it gets a buffer with bytesused = 0
+ * for the 0th plane.
+ *
+ * @note Currently, V4L2 plugins do not support odd resolution.
+ * @{
+ * @ingroup ee_extensions_group
+ */
+
+/**
+ * Defines the Control ID to set converter output plane buffer layout.
+ *
+ * A value of type \c v4l2_nv_buffer_layout must be supplied with this control.
+ *
+ * @attention This control must be set before requesting buffers on the output plane.
+ */
+#define V4L2_CID_VIDEO_CONVERT_OUTPUT_PLANE_LAYOUT (V4L2_CID_MPEG_BASE+523)
+
+/**
+ * Defines the Control ID to set converter capture plane buffer layout.
+ *
+ * A value of type \c v4l2_nv_buffer_layout must be supplied with this control.
+ *
+ * @attention This control must be set before requesting buffers on the capture plane.
+ */
+#define V4L2_CID_VIDEO_CONVERT_CAPTURE_PLANE_LAYOUT (V4L2_CID_MPEG_BASE+524)
+
+/**
+ * Defines the Control ID to set the converter flip/rotation method.
+ *
+ * A value of type \c v4l2_flip_method must be supplied with this control.
+ *
+ * @attention This control must be set before requesting buffers on either plane.
+ */
+#define V4L2_CID_VIDEO_CONVERT_FLIP_METHOD (V4L2_CID_MPEG_BASE+525)
+
+/**
+ * Defines the Control ID to set the converter interpolation method.
+ *
+ * A value of type \c v4l2_interpolation_method must be supplied with this control.
+ *
+ * @attention This control must be set before requesting buffers on either plane.
+ */
+#define V4L2_CID_VIDEO_CONVERT_INTERPOLATION_METHOD (V4L2_CID_MPEG_BASE+526)
+
+/**
+ * Defines the Control ID to set the converter Temporal Noise Reduction (TNR) algorithm.
+ *
+ * A value of type \c v4l2_tnr_algorithm must be supplied with this control.
+ *
+ * @attention This control must be set before requesting buffers on either plane.
+ * @attention TNR algorithms are not supported with YUV422 and YUV444 capture
+ * plane formats.
+ */
+#define V4L2_CID_VIDEO_CONVERT_TNR_ALGORITHM (V4L2_CID_MPEG_BASE+527)
+/** @} */
+
+/**
+ * @defgroup V4L2Enc V4L2 Video Encoder
+ *
+ * @brief NVIDIA V4L2 Video Encoder Description and Extensions
+ *
+ * The video encoder device node is \c "/dev/nvhost-msenc".
+ *
+ * ### Supported Pixelformats
+ * OUTPUT PLANE | CAPTURE PLANE | PLATFORM
+ * :---------------------: | :-----------------: | :--------------------:
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_H264 | T210, T186, T194, T234
+ * V4L2_PIX_FMT_NV12M | |
+ * V4L2_PIX_FMT_YUV444M | |
+ * V4L2_PIX_FMT_NV24M | |
+ * | |
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_H265 | T210, T186, T194, T234
+ * V4L2_PIX_FMT_NV12M | |
+ * V4L2_PIX_FMT_YUV444M | |
+ * V4L2_PIX_FMT_NV24M | |
+ * V4L2_PIX_FMT_P010M | |
+ * V4L2_PIX_FMT_NV24_10LE | |
+ * | |
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_VP8 | T210, T186
+ * V4L2_PIX_FMT_NV12M | |
+ * | |
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_VP9 | T186, T194
+ * V4L2_PIX_FMT_NV12M | |
+ * | |
+ * V4L2_PIX_FMT_YUV420M | V4L2_PIX_FMT_AV1 | T234
+ * V4L2_PIX_FMT_NV12M | |
+ *
+ * ### Supported Memory Types
+ * MEMORY | OUTPUT PLANE | CAPTURE PLANE
+ * :------------------: | :----------: | :-----------:
+ * V4L2_MEMORY_MMAP | Y | Y
+ * V4L2_MEMORY_DMABUF | Y | N
+ * V4L2_MEMORY_USERPTR | N | N
+ * \attention For the video encoder, it is necessary that the capture plane
+ * format be set before the output plane format and only then request buffers on
+ * any of the planes.
+ *
+ * ### Supported Controls
+ * The following sections describe the supported controls.
+ *
+ * #### Controls From the Open Source V4L2-Controls Header
+ * Control ID | Purpose | Runtime Configurable
+ * -------------------------------- | -------------------- | :------------------:
+ * V4L2_CID_MPEG_VIDEO_BITRATE | Bitrate | Y
+ * V4L2_CID_MPEG_VIDEO_H264_PROFILE | H.264 Encode Profile | N
+ * V4L2_CID_MPEG_VIDEO_BITRATE_MODE | Rate Control Mode | N
+ * V4L2_CID_MPEG_VIDEO_GOP_SIZE | I-frame Interval | N
+ * V4L2_CID_MPEG_VIDEO_H264_LEVEL | Encode Level | N
+ * V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE | Force I-frame on one of queued output plane buffer | Y
+ *
+ * All non-runtime configurable options must be set after setting formats on
+ * both the planes and before requesting buffers on either plane.
+ *
+ * The runtime configurable parameters can be called anytime after setting
+ * formats on both the planes.
+ *
+ * #### NVIDIA-Specific Controls
+ * - #V4L2_CID_MPEG_VIDEO_H265_PROFILE
+ * - #V4L2_CID_MPEG_VIDEO_IDR_INTERVAL
+ * - #V4L2_CID_MPEG_VIDEOENC_TEMPORAL_TRADEOFF_LEVEL
+ * - #V4L2_CID_MPEG_VIDEOENC_SLICE_LENGTH_PARAM
+ * - #V4L2_CID_MPEG_VIDEOENC_ROI_PARAMS
+ * - #V4L2_CID_MPEG_VIDEOENC_VIRTUALBUFFER_SIZE
+ * - #V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES
+ * - #V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM
+ * - #V4L2_CID_MPEG_VIDEOENC_NUM_BFRAMES
+ * - #V4L2_CID_MPEG_VIDEOENC_INSERT_SPS_PPS_AT_IDR
+ * - #V4L2_CID_MPEG_VIDEOENC_METADATA
+ * - #V4L2_CID_MPEG_VIDEOENC_METADATA_MV
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_METADATA_MV
+ * - #V4L2_CID_MPEG_VIDEOENC_QP_RANGE
+ * - #V4L2_CID_MPEG_VIDEOENC_HW_PRESET_TYPE_PARAM
+ * - #V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_EXTERNAL_RPS_CONTROL
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_EXTERNAL_RATE_CONTROL
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_ROI_PARAM
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_RECONCRC_PARAM
+ * - #V4L2_CID_MPEG_VIDEOENC_INSERT_VUI
+ * - #V4L2_CID_MPEG_VIDEOENC_INSERT_AUD
+ * - #V4L2_CID_MPEG_VIDEOENC_EXTEDED_COLORFORMAT
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_ALLIFRAME_ENCODE
+ * - #V4L2_CID_MPEG_VIDEOENC_H265_LEVEL
+ * - #V4L2_CID_MPEG_VIDEOENC_ENABLE_SLICE_LEVEL_ENCODE
+ * - #V4L2_CID_MPEG_VIDEOENC_POC_TYPE
+ * - #V4L2_CID_MPEG_VIDEOENC_H265_VUI_EXT_SAR_WIDTH
+ * - #V4L2_CID_MPEG_VIDEOENC_H265_VUI_EXT_SAR_HEIGHT
+ * - #V4L2_CID_MPEG_VIDEOENC_FORCE_INTRA_FRAME
+ * - #V4L2_CID_MPEG_VIDEOENC_FORCE_IDR_FRAME
+ * - #V4L2_CID_MPEG_VIDEOENC_AV1_HEADERS_WITH_FRAME
+ * - #V4L2_CID_MPEG_VIDEOENC_AV1_TILE_CONFIGURATION
+ * - #V4L2_CID_MPEG_VIDEOENC_AV1_ENABLE_SSIMRDO
+ * - #V4L2_CID_MPEG_VIDEOENC_AV1_DISABLE_CDF_UPDATE
+ * - #V4L2_CID_MPEG_VIDEOENC_PPE_INIT_PARAMS
+ *
+ * #### Setting Framerate
+ * The encoder framerate can be set with \c VIDIOC_S_PARM IOCTL by setting the numerator
+ * and denominator in `v4l2_streamparm.parm.output.timeperframe`.
+ *
+ * ### Supported Encoder Profiles
+ * #### H.264
+ * - V4L2_MPEG_VIDEO_H264_PROFILE_MAIN
+ * - V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE
+ * - V4L2_MPEG_VIDEO_H264_PROFILE_HIGH
+ *
+ * #### H.265
+ * - V4L2_MPEG_VIDEO_H265_PROFILE_MAIN
+ * - V4L2_MPEG_VIDEO_H265_PROFILE_MAIN10
+ *
+ * ### Encoder Output Metadata
+ * The encoder supports reporting frame related metadata, including motion vectors
+ * for that frame. See \c V4L2_CID_MPEG_VIDEOENC_METADATA,
+ * \c V4L2_CID_MPEG_VIDEOENC_METADATA_MV and \c V4L2_CID_MPEG_VIDEOENC_ENABLE_METADATA_MV
+ * for more information.
+ *
+ * ### EOS Handling
+ * The following sequence must be followed for sending EOS and recieving EOS
+ * from the encoder.
+ * -# Send EOS to encoder by queueing on the output plane a buffer with
+ * bytesused = 0 for the 0th plane (`v4l2_buffer.m.planes[0].bytesused = 0`).
+ * -# Dequeues buffers on the capture plane until it gets a buffer with bytesused = 0
+ * for the 0th plane.
+ *
+ * @note Currently, V4L2 plugins do not support odd resolution.
+ * @{
+ * @ingroup ee_extensions_group
+ */
+
+
+/**
+ * Defines the Control ID to configure encoder to drop frames while encoding.
+ *
+ * A value of type \c v4l2_enc_temporal_tradeoff_level_type must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_TEMPORAL_TRADEOFF_LEVEL (V4L2_CID_MPEG_BASE+528)
+
+/**
+ * Defines the Control ID to configure encoder slice length either in terms of MBs or bits.
+ *
+ * A pointer to a valid \c v4l2_enc_slice_length_param structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_SLICE_LENGTH_PARAM (V4L2_CID_MPEG_BASE+529)
+
+/**
+ * Defines the Control ID to configure encoder to encode particular region of frame in high
+ * quality.
+ *
+ * A pointer to a valid \c v4l2_enc_frame_ROI_params structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after requesting buffers on both the
+ * planes.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ROI_PARAMS (V4L2_CID_MPEG_BASE+530)
+
+/**
+ * Defines the Control ID to specify virtual buffer size in bits for encoder.
+ *
+ * A pointer to a valid \c v4l2_enc_virtual_buffer_size structure must be
+ * supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_VIRTUALBUFFER_SIZE (V4L2_CID_MPEG_BASE+531)
+
+/**
+ * Defines the Control ID to specify maximum number of reference frames that can be used.
+ *
+ * An integer value must be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES (V4L2_CID_MPEG_BASE+532)
+
+/**
+ * Defines the Control ID to specify the encoder slice intra refresh interval.
+ *
+ * A pointer to a valid \c v4l2_enc_slice_intrarefresh_param structure must be
+ * supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM (V4L2_CID_MPEG_BASE+533)
+
+/**
+ * Defines the Control ID to set number of B frames to be encoded between two P frames.
+ *
+ * This works with H.264 encoder. This also works with H.265 encoder for Jetson Xavier and
+ * Jetson Xavier NX platforms. An integer value must be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_NUM_BFRAMES (V4L2_CID_MPEG_BASE+534)
+
+/**
+ * Defines the Control ID to enable/disable inserting SPS and PPS explicitly at IDR interval.
+ *
+ * A boolean value must be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_INSERT_SPS_PPS_AT_IDR (V4L2_CID_MPEG_BASE+535)
+
+/**
+ * Defines the Control ID to get encoder output metadata.
+ *
+ * A pointer to valid #v4l2_ctrl_video_metadata structure must be supplied with
+ * this control.
+ *
+ * @attention This control must be read after dequeueing a buffer successfully from
+ * the capture plane. The values in the structure are valid until the buffer is queued
+ * again.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_METADATA (V4L2_CID_MPEG_BASE+536)
+
+/**
+ * Defines the Control ID to enable/disable encoder motion vector reporting.
+ *
+ * A boolean value must be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_METADATA_MV (V4L2_CID_MPEG_BASE+537)
+
+/**
+ * Defines the Control ID to get encoder output motion vector metadata.
+ *
+ * A pointer to valid \c v4l2_ctrl_videoenc_outputbuf_metadata_MV structure must
+ * be supplied with this control.
+ *
+ * @attention This control must be read after dequeueing a buffer successfully from
+ * the capture plane. The values in the structure are valid until the buffer is queued
+ * again.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_METADATA_MV (V4L2_CID_MPEG_BASE+538)
+
+/**
+ * Defines the Control ID to set QP range for I/P/B frames.
+ *
+ * A pointer to a valid \c v4l2_ctrl_video_qp_range structure must
+ * be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_QP_RANGE (V4L2_CID_MPEG_BASE+539)
+
+/**
+ * Defines the Control ID to set encoder HW Preset type.
+ *
+ * A pointer to valid #v4l2_enc_hw_preset_type_param structure must
+ * be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_HW_PRESET_TYPE_PARAM (V4L2_CID_MPEG_BASE+540)
+
+/**
+ * Defines the Control ID to provide input metadata for encoder buffer.
+ *
+ * A pointer to valid #v4l2_ctrl_videoenc_input_metadata structure must be
+ * supplied with this control.
+ *
+ * @attention This control must be called before queueing a buffer on the output
+ * plane. Use the bitwise OR of v4l2_enc_input_metadata_param in the
+ * v4l2_ctrl_videoenc_input_metadata.metadata_flag to provide different input
+ * metadata parameters in one s_ctrl call.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA (V4L2_CID_MPEG_BASE+541)
+
+/**
+ * Defines the Control ID to configure encoder for external RPS control.
+ *
+ * A pointer to a valid #v4l2_enc_enable_ext_rps_ctr structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after requesting buffers on both the
+ * planes. The value for V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES, if being entered,
+ * must be set after this control.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_EXTERNAL_RPS_CONTROL (V4L2_CID_MPEG_BASE+542)
+
+/**
+ * Defines the Control ID to configure encoder for external rate control.
+ *
+ * A pointer to a valid #v4l2_enc_enable_ext_rate_ctr structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after requesting buffers on both the
+ * planes.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_EXTERNAL_RATE_CONTROL (V4L2_CID_MPEG_BASE+543)
+
+/**
+ * Defines the Control ID to configure ROI encoding for a session.
+ *
+ * A pointer to a valid #v4l2_enc_enable_roi_param structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after requesting buffers on both the
+ * planes.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_ROI_PARAM (V4L2_CID_MPEG_BASE+544)
+
+/**
+ * Defines the Control ID to configure Reconstructed CRC for a session.
+ *
+ * A pointer to a valid #v4l2_enc_enable_reconcrc_param structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after requesting buffers on both the
+ * planes.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_RECONCRC_PARAM (V4L2_CID_MPEG_BASE+545)
+
+/**
+ * Control ID to enable/disable inserting VUI in SPS.
+ *
+ * A boolean value should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_INSERT_VUI (V4L2_CID_MPEG_BASE+546)
+
+/**
+ * Control ID to enable/disable inserting AUD(Access Unit Delimiter).
+ *
+ * A boolean value should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_INSERT_AUD (V4L2_CID_MPEG_BASE+547)
+
+/**
+ * Control ID to enable/disable setting extended color format.
+ *
+ * A boolean value should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane. Also this control should be
+ * enabled/disabled only after V4L2_CID_MPEG_VIDEOENC_INSERT_VUI is set
+ */
+#define V4L2_CID_MPEG_VIDEOENC_EXTEDED_COLORFORMAT (V4L2_CID_MPEG_BASE+548)
+
+/**
+ * Control ID to select which NVDEC IP to decode.
+ *
+ * @note This functionality is currently being deprecated and no longer
+ * functional.
+ *
+ * A v4l2_decode_instance_type should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_DECODE_INSTANCE (V4L2_CID_MPEG_BASE+549)
+/**
+ * Control ID to issue a pseudo POLL call on the fd opened in non blocking mode.
+ *
+ * A pointer to a valid #v4l2_ctrl_video_device_poll must be supplied with this control.
+ *
+ * @attention This should only be called when the Decoder or Encoder is opened with
+ * O_NONBLOCK flag.
+ */
+#define V4L2_CID_MPEG_VIDEO_DEVICE_POLL (V4L2_CID_MPEG_BASE+550)
+
+/**
+ * Control ID to set/clear the polling interrupt mode. Useful when a POLL issued from the
+ * application but wants the wait to be interrupted.
+ *
+ * A boolean value must be supplied with this control, True indicates polling interrupt shall be
+ * enabled and it shall stay enabled (i.e calls to POLL will return immediately) until a call to
+ * same control ID is made by passing a boolean 0 value.
+ *
+ * @attention This should only be called when the Decoder or Encoder is opened with
+ * O_NONBLOCK flag.
+ */
+#define V4L2_CID_MPEG_SET_POLL_INTERRUPT (V4L2_CID_MPEG_BASE+551)
+
+/**
+ * Control ID to enable/disable setting rate control two pass CBR.
+ *
+ * A boolean value should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_TWO_PASS_CBR (V4L2_CID_MPEG_BASE+552)
+
+/**
+ * Defines the Control ID to set the converter YUV Rescale method.
+ *
+ * A value of type \c v4l2_yuv_rescale_method must be supplied with this control.
+ *
+ * @attention This control must be set before requesting buffers on either plane.
+ */
+#define V4L2_CID_VIDEO_CONVERT_YUV_RESCALE_METHOD (V4L2_CID_MPEG_BASE+553)
+
+/**
+ * Control ID to enable maximum Performance.
+ *
+ * An integer value must be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_MAX_PERFORMANCE (V4L2_CID_MPEG_BASE+554)
+
+/**
+ * Control ID to enable/disable setting for all i-Frame encoding.
+ *
+ * A boolean value should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_ALLIFRAME_ENCODE (V4L2_CID_MPEG_BASE+555)
+
+/**
+ * Defines the Control ID to set buf api to be used by decoder/encoder.
+ *
+ * Note: This Control ID is no longer supported.
+ */
+#define V4L2_CID_MPEG_VIDEO_BUF_API_TYPE (V4L2_CID_MPEG_BASE+556)
+
+/**
+ * Defines the Control ID to set cuda memory type to be used by decoder/encoder.
+ *
+ * This control can be used by the decoder to set the memory type for surfaces.
+ * A value of \c v4l2_cuda_mem_type needs to be set with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_CUDA_MEM_TYPE (V4L2_CID_MPEG_BASE+557)
+
+/**
+ * Defines the Control ID to set GPU ID to be used by decoder/encoder.
+ *
+ * An integer value should be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_CUDA_GPU_ID (V4L2_CID_MPEG_BASE+558)
+
+/**
+ * Defines the Control ID to set drop frames interval for decoder.
+ *
+ * An integer value should be supplied with this control. A value of "x"
+ * indicates every "x"th frame should be given out from the decoder, rest shall
+ * dropped after decoding.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEODEC_DROP_FRAME_INTERVAL (V4L2_CID_MPEG_BASE+559)
+
+/**
+ * Control ID to enable/disable setting for attaching VP8/9 headers.
+ * Only to be used for VP8/9 pixel format not for H264/5.
+ *
+ * A boolean value should be supplied with this control.
+ * If value is false headers will be disabled and true will enable the headers.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+ #define V4L2_CID_MPEG_VIDEOENC_VPX_HEADERS_WITH_FRAME (V4L2_CID_MPEG_BASE+560)
+
+/**
+ * Defines the control ID to set the H.265 encoder level.
+ *
+ * A v4l2_mpeg_video_h265_level must be passed.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_H265_LEVEL (V4L2_CID_MPEG_BASE+561)
+
+/**
+ * Control ID to enable/disable slice level encode output.
+ *
+ * A boolean value should be supplied with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_SLICE_LEVEL_ENCODE (V4L2_CID_MPEG_BASE+562)
+
+/**
+ * Defines the Control ID to set Picture Order Count property in frames.
+ *
+ * This works only with H.264 encoder. An integer value must be supplied with this
+ * control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_POC_TYPE (V4L2_CID_MPEG_BASE+563)
+
+/**
+ * Defines the Control ID to set Sample Aspect Ratio width for H265 VUI encoding.
+ *
+ * An integer value must be supplied with this control.
+ * The VUI Sample Aspect Ratio indicator for H265 follows the standard enum defined for
+ * v4l2_mpeg_video_h264_vui_sar_idc.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_H265_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+564)
+
+/**
+ * Defines the Control ID to set Sample Aspect Ratio height for H265 VUI encoding.
+ *
+ * An integer value must be supplied with this control.
+ * The VUI Sample Aspect Ratio indicator for H265 follows the standard enum defined
+ * for v4l2_mpeg_video_h264_vui_sar_idc.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_H265_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+565)
+
+/**
+ * Defines the Control ID to force INTRA frame.
+ *
+ * This control can be used by encoder to force encoding an intra frame.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_FORCE_INTRA_FRAME (V4L2_CID_MPEG_BASE+566)
+
+/**
+ * Defines the Control ID to force IDR frame.
+ *
+ * This control can be used by encoder to force encoding an idr frame.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_FORCE_IDR_FRAME (V4L2_CID_MPEG_BASE+567)
+
+ /**
+ * Defines the Control ID to set low latency to be used by decoder.
+ *
+ * This control can be used by decoder to set low latency for streams having
+ * I and IPPP frames.
+ *
+ * @attention This control must be set before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEO_CUDA_LOW_LATENCY (V4L2_CID_MPEG_BASE+568)
+
+/**
+ * Control ID to enable/disable setting for attaching IVF headers.
+ * Only to be used for AV1 codec.
+ *
+ * A boolean value should be supplied with this control.
+ * If value is false headers will be disabled and true will enable the headers.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_AV1_HEADERS_WITH_FRAME (V4L2_CID_MPEG_BASE+569)
+
+/**
+ * Defines the Control ID to configure AV1 tile for a session.
+ *
+ * A pointer to a valid #v4l2_enc_av1_tile_config structure must be supplied
+ * with this control.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ **/
+#define V4L2_CID_MPEG_VIDEOENC_AV1_TILE_CONFIGURATION (V4L2_CID_MPEG_BASE+570)
+
+/**
+ * Defines the Control ID to enable variance based SSIM RDO for AV1.
+ *
+ * A boolean value should be supplied with this control.
+ * If value is false SSIM RDO is disabled else true when enabled.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ **/
+#define V4L2_CID_MPEG_VIDEOENC_AV1_ENABLE_SSIMRDO (V4L2_CID_MPEG_BASE+571)
+
+/**
+ * Defines the Control ID to enable CDF update for AV1.
+ *
+ * A boolean value should be supplied with this control.
+ * If value is true the CDF update in the symbol decoding process is disabled.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ **/
+#define V4L2_CID_MPEG_VIDEOENC_AV1_DISABLE_CDF_UPDATE (V4L2_CID_MPEG_BASE+572)
+
+/**
+ * Defines the Control ID to set total frames to encode.
+ *
+ * An integer value should be supplied with this control. A value of "x"
+ * indicates the number of frames that are given to encoder for encoding.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ **/
+#define V4L2_CID_MPEG_VIDEOENC_FRAMES_TO_ENCODE (V4L2_CID_MPEG_BASE+573)
+
+/**
+ * Defines the Control ID to configure initial QP parameters for a encoder session.
+ *
+ * A pointer to a valid #v4l2_ctrl_video_init_qp structure must be supplied
+ * with this control.
+ *
+ **/
+#define V4L2_CID_MPEG_VIDEOENC_INIT_FRAME_QP (V4L2_CID_MPEG_BASE+574)
+
+/**
+ * Defines the Control ID to enable lossless H.264/H.265 encoding.
+ *
+ * An boolean value must be supplied with this control. Default is 0.
+ * Lossless encoding is supported only for YUV444 8/10-bit format.
+ * @note This control must be set in case of H.264 YUV444 encoding as
+ * it does not support lossy encoding.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_ENABLE_LOSSLESS (V4L2_CID_MPEG_BASE+575)
+
+/**
+ * Defines the Control ID to set chroma_factor_idc for H.265 encoding.
+ *
+ * An integer value must be supplied with this control. Default is 1, and
+ * 3 for YUV444 8/10-bit format.
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_H265_CHROMA_FACTOR_IDC (V4L2_CID_MPEG_BASE+576)
+
+/**
+ * Defines the Control ID to configure preprocessing enhancement for a session.
+ *
+ * A pointer to a valid #v4l2_enc_ppe_init_params structure must be supplied
+ * with this control.
+ *
+ * @attention This control must be set after requesting buffers on both the
+ * planes.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_PPE_INIT_PARAMS (V4L2_CID_MPEG_BASE+577)
+
+ /**
+ * Defines Control ID to configure PRESET id for CUVID Encoder
+ *
+ * An integer value between 1 to 7 should be supplied with this control.
+ *
+ * Check PRESET Guide for more details at
+ * https://docs.nvidia.com/video-technologies/video-codec-sdk/nvenc-preset-migration-guide/index.html
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_CUDA_PRESET_ID (V4L2_CID_MPEG_BASE+578)
+
+ /**
+ * Defines Control ID to configure TUNING INFO id for CUVID Encoder
+ *
+ * An integer value between 1 to 4 should be supplied with this control.
+ *
+ * Check PRESET Guide for more details at
+ * https://docs.nvidia.com/video-technologies/video-codec-sdk/nvenc-preset-migration-guide/index.html
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_CUDA_TUNING_INFO (V4L2_CID_MPEG_BASE+579)
+
+/** Defines Control ID to configure CONSTQP VALUE for CUVID Encoder
+ *
+ * An integer value between 0 to 51 should be supplied with this control.
+ *
+ * @attention This control must be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_CUDA_CONSTQP (V4L2_CID_MPEG_BASE+580)
+
+/** Defines Control ID to configure FPS VALUE for CUVID Encoder
+ *
+ * A positive integer value should be supplied with this control.
+ *
+ * @attention This control is runtime configurable and can be called anytime after setting
+ * formats on both the planes.
+ */
+#define V4L2_CID_MPEG_VIDEOENC_RECONFIG_FPS (V4L2_CID_MPEG_BASE+581)
+
+/**
+ * Defines the Control ID to disable SAO filter for HEVC.
+ *
+ * A boolean value should be supplied with this control.
+ * If value is true, SAO filter for H265 encoding is disabled. Enabled by default
+ *
+ * @attention This control should be set after setting formats on both the planes
+ * and before requesting buffers on either plane.
+ **/
+#define V4L2_CID_MPEG_VIDEOENC_H265_DISABLE_SAO (V4L2_CID_MPEG_BASE + 582)
+
+/** @} */
+
+/**
+ * @defgroup V4L2Argus V4L2 Video Camera
+ *
+ * @brief NVIDIA V4L2 Camera Description and Extensions
+ *
+ * The camera device node is \c "/dev/video%d".
+ *
+ * ### Supported Pixelformats
+ * CAPTURE PLANE
+ * :---------------------
+ * V4L2_PIX_FMT_NV12M
+ * -
+ *
+ * ### Supported Memory Types
+ * MEMORY | CAPTURE PLANE
+ * :------------------: | :-----------:
+ * V4L2_MEMORY_MMAP | Y
+ * V4L2_MEMORY_DMABUF | Y
+ * V4L2_MEMORY_USERPTR | N
+ * \attention For the camera, it is necessary that the capture plane
+ * format be set and only then request buffers.
+ *
+ * ### Supported Controls
+ * The following sections describe the supported controls.
+ *
+ * #### Controls From the Open Source V4L2-Controls Header
+ * Control ID | Purpose | Runtime Configurable
+ * -------------------------------- | -------------------- | :------------------:
+ * - #V4L2_CID_3A_LOCK | AWB/AE Lock | Y
+ *
+ * All non-runtime configurable options must be set after setting format on
+ * the capture planes and before requesting buffers.
+ *
+ *
+ * ### NVIDIA-Specific Controls
+ * - #V4L2_CID_ARGUS_AUTO_WHITE_BALANCE_MODE
+ * - #V4L2_CID_ARGUS_SENSOR_MODE
+ * - #V4L2_CID_ARGUS_DENOISE_STRENGTH
+ * - #V4L2_CID_ARGUS_DENOISE_MODE
+ * - #V4L2_CID_ARGUS_EE_STRENGTH
+ * - #V4L2_CID_ARGUS_EE_MODE
+ * - #V4L2_CID_ARGUS_AE_ANTIBANDING_MODE
+ * - #V4L2_CID_ARGUS_ISP_DIGITAL_GAIN_RANGE
+ * - #V4L2_CID_ARGUS_COLOR_SATURATION
+ * - #V4L2_CID_ARGUS_GAIN_RANGE
+ * - #V4L2_CID_ARGUS_EXPOSURE_TIME_RANGE
+ * - #V4L2_CID_ARGUS_METADATA
+ *
+ * ### Setting Framerate
+ * The camera framerate can be set with \c VIDIOC_S_PARM IOCTL by setting the numerator
+ * and denominator in `v4l2_streamparm.parm.output.timeperframe`.
+ * Selection of camera mode can override this setting. If the camera mode is to be
+ * selected along with the specified framerate, then camera mode must be selected before
+ * setting the framerate.
+ *
+ * ### Camera Capture Metadata
+ * The camera can be queried to report frame related metadata. See \c V4L2_CID_ARGUS_METADATA
+ * for more information.
+ *
+ * ### EOS Handling
+ * Camera outputs all the queued empty buffers with data and TIME_OUT if no more empty buffers
+ * are queued. If error is encountered, am empty buffer is queued to the output with
+ * V4L2_BUF_FLAG_LAST flag.
+ * @{
+ * @ingroup ee_extensions_group
+ */
+/**
+ * Defines the Control ID to set auto white balance mode for camera.
+ *
+ * A value of type \c v4l2_argus_ac_awb_mode must be supplied
+ * with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_AUTO_WHITE_BALANCE_MODE (V4L2_CID_CAMERA_CLASS_BASE+20)
+
+/**
+ * Defines the Control ID to set sensor mode for camera.
+ *
+ * A non-negative integer value must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * and before requesting buffers on the capture plane.
+ */
+#define V4L2_CID_ARGUS_SENSOR_MODE (V4L2_CID_CAMERA_CLASS_BASE+32)
+
+/**
+ * Defines the Control ID to set denoise strength for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_denoise_strength must be
+ * supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_DENOISE_STRENGTH (V4L2_CID_CAMERA_CLASS_BASE+33)
+
+/**
+ * Defines the Control ID to set denoise mode for camera.
+ *
+ * A value of type \c v4l2_argus_denoise_mode must be
+ * supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_DENOISE_MODE (V4L2_CID_CAMERA_CLASS_BASE+34)
+
+/**
+ * Defines the Control ID to set edge enhancement strength for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_edge_enhance_strength
+ * must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_EE_STRENGTH (V4L2_CID_CAMERA_CLASS_BASE+35)
+
+/**
+ * Defines the Control ID to set edge enhancement mode for camera.
+ *
+ * A value of type \c v4l2_argus_edge_enhance_mode
+ * must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_EE_MODE (V4L2_CID_CAMERA_CLASS_BASE+36)
+
+/**
+ * Defines the Control ID to set Auto Exposure antibanding mode for camera.
+ *
+ * A value of type \c v4l2_argus_ac_ae_antibanding_mode must be supplied
+ * with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_AE_ANTIBANDING_MODE (V4L2_CID_CAMERA_CLASS_BASE+37)
+
+/**
+ * Defines the Control ID to set edge enhancement settings for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_exposure_compensation
+ * must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_EXPOSURE_COMPENSATION (V4L2_CID_CAMERA_CLASS_BASE+38)
+
+/**
+ * Defines the Control ID to set edge enhancement settings for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_ispdigital_gainrange
+ * must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_ISP_DIGITAL_GAIN_RANGE (V4L2_CID_CAMERA_CLASS_BASE+39)
+
+/**
+ * Defines the Control ID to set sensor mode for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_color_saturation
+ * must be supplied with this control.
+ * The flag `EnableSaturation` must be set to true to enable setting
+ * the specified color saturation
+ *
+ * @attention This control should be set after setting format
+ * and before requesting buffers on the capture plane.
+ */
+#define V4L2_CID_ARGUS_COLOR_SATURATION (V4L2_CID_CAMERA_CLASS_BASE+40)
+
+/**
+ * Defines the Control ID to set edge enhancement settings for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_gainrange
+ * must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_GAIN_RANGE (V4L2_CID_CAMERA_CLASS_BASE+41)
+
+/**
+ * Defines the Control ID to set edge enhancement settings for camera.
+ *
+ * A pointer to a valid structure \c v4l2_argus_exposure_timerange
+ * must be supplied with this control.
+ *
+ * @attention This control should be set after setting format
+ * on the capture plane.
+ */
+#define V4L2_CID_ARGUS_EXPOSURE_TIME_RANGE (V4L2_CID_CAMERA_CLASS_BASE+42)
+
+/**
+ * Defines the Control ID to get the camera argus output metadata.
+ *
+ * A pointer to a valid structure \c v4l2_argus_ctrl_metadata must be supplied
+ * with this control.
+ *
+ * @attention This control must be read after dequeueing a buffer successfully from
+ * the capture plane. The values in the structure are valid until the buffer is queued
+ * again.
+ */
+#define V4L2_CID_ARGUS_METADATA (V4L2_CID_CAMERA_CLASS_BASE+43)
+
+/** @} */
+
+/** @addtogroup V4L2Dec */
+/** @{ */
+/**
+ * Enum v4l2_skip_frames_type, possible methods for decoder skip frames. */
+enum v4l2_skip_frames_type {
+ /** Do not skip any frame. */
+ V4L2_SKIP_FRAMES_TYPE_NONE = 0,
+ /** Skip all non-reference frames. */
+ V4L2_SKIP_FRAMES_TYPE_NONREF = 1,
+ /** Skip all frames except IDR */
+ V4L2_SKIP_FRAMES_TYPE_DECODE_IDR_ONLY = 2,
+};
+
+/**
+ * Enum v4l2_cuda_mem_type, possible methods for cuda memory tpye. */
+enum v4l2_cuda_mem_type {
+ /** Memory type device. */
+ V4L2_CUDA_MEM_TYPE_DEVICE = 0,
+ /** Memory type host. */
+ V4L2_CUDA_MEM_TYPE_PINNED = 1,
+ /** Memory type unified. */
+ V4L2_CUDA_MEM_TYPE_UNIFIED = 2,
+};
+
+/**
+ * Enum v4l2_videodec_input_error_type, possible error types for input stream. */
+enum v4l2_videodec_input_error_type {
+ /** no error. */
+ V4L2_DEC_ERROR_NONE = 0x0,
+ /** sps error. */
+ V4L2_DEC_ERROR_SPS = 0x1,
+ /** pps error. */
+ V4L2_DEC_ERROR_PPS = 0x2,
+ /** slice header error. */
+ V4L2_DEC_ERROR_SLICE_HDR = 0x4,
+ /** missing reference frame error. */
+ V4L2_DEC_ERROR_MISSING_REF_FRAME = 0x8,
+ /** VPS error. */
+ V4L2_DEC_ERROR_VPS = 0x10,
+};
+
+/**
+ * Holds the decoder error status metadata for the frame.
+ */
+typedef struct v4l2_ctrl_videodec_statusmetadata_
+{
+ /** Error types:
+ * bit 0: Fatal
+ * bit 1: MB level syntax
+ * bit 2: Missing Slice(s)
+ * bit 3: PrevFrameLostFlag */
+ __u32 DecodeError;
+ /** Number of macro blocks decoded without error. */
+ __u32 DecodedMBs;
+ /** Number of macro blocks where error was concealed. */
+ __u32 ConcealedMBs;
+ /** POC of the reference frame used for concealment. */
+ __u32 nConcealedFromPOC;
+ /** Time required to decode the frame, in microseconds. */
+ __u32 FrameDecodeTime;
+}v4l2_ctrl_videodec_statusmetadata;
+
+/**
+ * Holds the the frame specific metadata for a reference frame.
+ */
+typedef struct v4l2_ctrl_videodec_refframe_metadata_
+{
+ /** Boolean value indicating if the frame is present in DPB. */
+ __u32 bPresent;
+ /** Boolean value indicating if the frame is an IDR. */
+ __u32 bIdrFrame;
+ /** Boolean value indicating if the frame is a long term reference frame. */
+ __u32 bLTRefFrame;
+ /** Boolean value indicating if it is a predicted frame. */
+ __u32 bPredicted;
+ /** Picture order count of the frame. */
+ __u32 nPictureOrderCnt;
+ /** Frame number. Resets to zero for an IDR frame. */
+ __u32 nFrameNum;
+ /** Long Term Frame Index of the frame. */
+ __u32 nLTRFrameIdx;
+} v4l2_ctrl_videodec_refframe_metadata;
+
+/**
+ * Holds the the frame specific metadata for the current frame.
+ */
+typedef struct v4l2_ctrl_videodec_currentframe_metadata_
+{
+ /** Boolean value indicating if the current frame is a reference frame. */
+ __u32 bRefFrame;
+ /** Boolean value indicating if the current frame is an IDR. */
+ __u32 bIdrFrame;
+ /** Boolean value indicating if the current frame is a long term reference frame. */
+ __u32 bLTRefFrame;
+ /** Picture order count of the current frame. */
+ __u32 nPictureOrderCnt;
+ /** Frame number. Resets to zero for an IDR frame. */
+ __u32 nFrameNum;
+ /** Long Term Frame Index of the current frame. */
+ __u32 nLTRFrameIdx;
+} v4l2_ctrl_videodec_currentframe_metadata;
+
+/**
+ * Holds the decoder DPB info metadata.
+ */
+typedef struct v4l2_ctrl_videodec_dpbinfometadata_
+{
+ /** Metadata for the current decoded frame. */
+ v4l2_ctrl_videodec_currentframe_metadata currentFrame;
+ /** Number of active frames present in the DPB. */
+ __u32 nActiveRefFrames;
+ /** An array of metadatas for the active frames in the DPB. Only
+ * nActiveRefFrames elements in the array are valid. */
+ v4l2_ctrl_videodec_refframe_metadata RPSList[16];
+} v4l2_ctrl_videodec_dpbinfometadata;
+
+/**
+ * Holds H.264 specific decoder metadata for the frame.
+ */
+typedef struct v4l2_ctrl_h264dec_bufmetadata_
+{
+ /** Holds the number of bits in the frame. */
+ __u32 nFrameNumBits;
+ /** Type of frame:
+ * 0 = B
+ * 1 = P
+ * 2 = I */
+ __u32 FrameType;
+ /** Holds the current DPB information of the decoder. */
+ v4l2_ctrl_videodec_dpbinfometadata dpbInfo;
+}v4l2_ctrl_h264dec_bufmetadata;
+
+/**
+ * Holds H.265 specific decoder metadata for the frame.
+ */
+typedef struct v4l2_ctrl_hevcdec_bufmetadata_
+{
+ /** Holds the number of bits in the frame. */
+ __u32 nPocLsbBits;
+ /** Type of frame:
+ * 0 = B
+ * 1 = P
+ * 2 = I */
+ __u32 FrameType;
+ /** Holds the current DPB information of the decoder. */
+ v4l2_ctrl_videodec_dpbinfometadata dpbInfo;
+}v4l2_ctrl_hevcdec_bufmetadata;
+
+/**
+ * Holds the video decoder input header error metadata for a frame.
+ */
+typedef struct v4l2_ctrl_videodec_inputbuf_metadata_
+{
+ /** Bits represent types of error as defined
+ * with v4l2_videodec_input_error_type. */
+ __u32 nBitStreamError;
+} v4l2_ctrl_videodec_inputbuf_metadata;
+
+/**
+ * Holds the video decoder output metadata for a frame.
+ */
+typedef struct v4l2_ctrl_videodec_outputbuf_metadata_
+{
+ /** Color primaries. */
+ __u8 ucColorPrimaries;
+ /** Transfer characteristics. */
+ __u8 ucTransferCharacteristics;
+ /** Matrix coefficients. */
+ __u8 ucMatrixCoefficients;
+ /** Boolean value indicating if \c FrameDecStats has valid contents. */
+ __u32 bValidFrameStatus;
+ /** Frame decode statistics. */
+ v4l2_ctrl_videodec_statusmetadata FrameDecStats;
+ /** Codec specific metadata for the frame. */
+ union {
+ /** H.264 specific metadata. */
+ v4l2_ctrl_h264dec_bufmetadata H264DecParams;
+ /** H.265 specific metadata. */
+ v4l2_ctrl_hevcdec_bufmetadata HEVCDecParams;
+ }CodecParams;
+} v4l2_ctrl_videodec_outputbuf_metadata;
+/** @} */
+
+/** @addtogroup V4L2Enc */
+/** @{ */
+
+/**
+ * Specifies the types of encoder temporal tradeoff levels
+ */
+enum v4l2_enc_temporal_tradeoff_level_type {
+ /** Do not drop any buffers. */
+ V4L2_ENC_TEMPORAL_TRADEOFF_LEVEL_DROPNONE = 0,
+ /** Drop 1 in every 5 buffers. */
+ V4L2_ENC_TEMPORAL_TRADEOFF_LEVEL_DROP1IN5,
+ /** Drop 1 in every 3 buffers. */
+ V4L2_ENC_TEMPORAL_TRADEOFF_LEVEL_DROP1IN3,
+ /** Drop 1 in every 2 buffers. */
+ V4L2_ENC_TEMPORAL_TRADEOFF_LEVEL_DROP1IN2,
+ /** Drop 2 in every 3 buffers. */
+ V4L2_ENC_TEMPORAL_TRADEOFF_LEVEL_DROP2IN3,
+};
+
+/**
+ * Specifies the encoder HW Preset type.
+ */
+enum v4l2_enc_hw_preset_type {
+ /** Encoder HWPreset DISABLED. */
+ V4L2_ENC_HW_PRESET_DISABLE = 0,
+ /** Encoder HWPreset with per frame encode time UltraFast. */
+ V4L2_ENC_HW_PRESET_ULTRAFAST = 1,
+ /** Encoder HWPreset with per frame encode time Fast. */
+ V4L2_ENC_HW_PRESET_FAST,
+ /** Encoder HWPreset with per frame encode time Medium. */
+ V4L2_ENC_HW_PRESET_MEDIUM,
+ /** Encoder HWPreset with per frame encode time Slow. */
+ V4L2_ENC_HW_PRESET_SLOW,
+};
+
+/**
+ * Specifies the encoder HW Preset type.
+ */
+enum v4l2_enc_hw_tuning_info_type {
+ /** Encoder Tuning Info Undefined */
+ V4L2_ENC_TUNING_INFO_UNDEFINED = 0,
+ /** Encoder Tuning Info High Quality */
+ V4L2_ENC_TUNING_INFO_HIGH_QUALITY = 1,
+ /** Encoder Tuning Info Low Latency */
+ V4L2_ENC_TUNING_INFO_LOW_LATENCY,
+ /** Encoder Tuning Info Ultra Low Latency */
+ V4L2_ENC_TUNING_INFO_ULTRA_LOW_LATENCY,
+ /** Encoder Tuning Info Lossless */
+ V4L2_ENC_TUNING_INFO_LOSSLESS,
+};
+
+/**
+ * Holds encoder HW Preset type parameters
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_HW_PRESET_TYPE_PARAM IOCTL.
+ */
+typedef struct v4l2_enc_hw_preset_type_param_
+{
+ /** Type in which the encoder hw preset is specified, one of type #v4l2_enc_hw_preset_type. */
+ enum v4l2_enc_hw_preset_type hw_preset_type;
+ /** Boolean value indicating if encoder set to max clock. */
+ __u8 set_max_enc_clock;
+}v4l2_enc_hw_preset_type_param;
+
+/**
+ * Enum specifying the type of slice length.
+ */
+enum v4l2_enc_slice_length_type {
+ /** Slice size is specified in terms of number of bytes. */
+ V4L2_ENC_SLICE_LENGTH_TYPE_BITS = 0,
+ /** Slice size is specified in terms of number of macroblocks. */
+ V4L2_ENC_SLICE_LENGTH_TYPE_MBLK,
+};
+
+/**
+ * Specifies the input buffer metadata flag.
+ */
+enum v4l2_enc_input_metadata_param {
+ /** Input metadata structure contains ROI parameters. */
+ V4L2_ENC_INPUT_ROI_PARAM_FLAG = 1,
+ /** Input metadata structure contains GDR parameters. */
+ V4L2_ENC_INPUT_GDR_PARAM_FLAG = 1 << 1,
+ /** Input metadata structure contains External RPS parameters. */
+ V4L2_ENC_INPUT_RPS_PARAM_FLAG = 1 << 2,
+ /** Input metadata structure contains External RC parameters. */
+ V4L2_ENC_INPUT_RC_PARAM_FLAG = 1 << 3,
+ /** Input metadata structure contains ReconCRC parameters. */
+ V4L2_ENC_INPUT_RECONCRC_PARAM_FLAG = 1 << 4,
+};
+
+/**
+ * Defines the possible levels for H.265 encoder.
+ */
+enum v4l2_mpeg_video_h265_level {
+
+ V4L2_MPEG_VIDEO_H265_LEVEL_1_0_MAIN_TIER = 0,
+ V4L2_MPEG_VIDEO_H265_LEVEL_1_0_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_2_0_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_2_0_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_2_1_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_2_1_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_3_0_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_3_0_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_3_1_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_3_1_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_4_0_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_4_0_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_4_1_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_4_1_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_5_0_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_5_0_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_5_1_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_5_1_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_5_2_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_5_2_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_6_0_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_6_0_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_6_1_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_6_1_HIGH_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_6_2_MAIN_TIER,
+ V4L2_MPEG_VIDEO_H265_LEVEL_6_2_HIGH_TIER,
+};
+
+#define V4L2_MPEG_VIDEO_BITRATE_MODE_CONSTQP 0x2
+
+/**
+ * Holds encoder slice length parameters, to be used with
+ * \c V4L2_CID_MPEG_VIDEOENC_SLICE_LENGTH_PARAM IOCTL.
+ */
+typedef struct v4l2_enc_slice_length_param_
+{
+ /** Type in which the slice length is specified, one of type \c v4l2_enc_slice_length_type. */
+ enum v4l2_enc_slice_length_type slice_length_type;
+ /** Size of the slice in either number of bytes or number of macro blocks. */
+ __u32 slice_length;
+}v4l2_enc_slice_length_param;
+
+/**
+ * Holds encoder virtual buffer size parameters, to be used with
+ * \c V4L2_CID_MPEG_VIDEOENC_VIRTUALBUFFER_SIZE IOCTL.
+ */
+typedef struct v4l2_enc_virtual_buffer_size_
+{
+ /** Size of the virtual buffer, in bits. */
+ __u32 size;
+}v4l2_enc_virtual_buffer_size;
+
+/**
+ * Holds encoder number of reference frame parameters, to be used with
+ * \c V4L2_CID_MPEG_VIDEOENC_NUM_REFERENCE_FRAMES IOCTL.
+ *
+ * This is not supported for H.265.
+ */
+typedef struct v4l2_enc_num_ref_frames_
+{
+ /** Number of reference frames. */
+ __u32 frames;
+}v4l2_enc_num_ref_frames;
+
+/**
+ * Holds encoder slice intrareferesh parameters, to be used with
+ * \c V4L2_CID_MPEG_VIDEOENC_SLICE_INTRAREFRESH_PARAM IOCTL.
+ */
+typedef struct v4l2_enc_slice_intrarefresh_param_
+{
+ /** Slice intrarefresh interval, in number of slices. */
+ __u32 interval;
+}v4l2_enc_slice_intrarefresh_param;
+
+/**
+ * Defines the maximum number of ROI regions supported by the encoder.
+ */
+#define V4L2_MAX_ROI_REGIONS 8
+
+/**
+ * Holds the encoder quality parameters for a single ROI region.
+ */
+typedef struct v4l2_enc_ROI_param_
+{
+ /** Region of interest rectangle. */
+ struct v4l2_rect ROIRect;
+ /** QP delta for the region. */
+ __s32 QPdelta;
+} v4l2_enc_ROI_param;
+
+/**
+ * Holds the encoder frame ROI parameters
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_ROI_PARAMS IOCTL.
+ */
+typedef struct v4l2_enc_frame_ROI_params_
+{
+ /** Number of regions. */
+ __u32 num_ROI_regions;
+ /** Array of indiviudal ROI parameters. */
+ v4l2_enc_ROI_param ROI_params[V4L2_MAX_ROI_REGIONS];
+ /** Config store integer to which this control is to be applied.
+ * This must be same as the value of config store of \c v4l2_buffer to which
+ * the ROI params is applied. */
+ __u32 config_store;
+}v4l2_enc_frame_ROI_params;
+
+/**
+ * Defines the available features for preprocessing enhancement (PPE) module
+ */
+enum v4l2_ppe_feature {
+ /** Default value for no feature enabled */
+ V4L2_PPE_FEATURE_NONE = 0,
+ /** Temporal Adaptive Quantization (TAQ) */
+ V4L2_PPE_FEATURE_TAQ = (1 << 0),
+};
+
+/**
+ * Holds the preprocessing enhancement initialization parameters
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_PPE_INIT_PARAMS IOCTL.
+ */
+typedef struct v4l2_enc_ppe_init_params_
+{
+ /** Boolean value indicating if PPE module should be enabled */
+ __u8 enable_ppe;
+ /** Bit flags to enable/disable each individual feature */
+ __u32 feature_flags;
+ /** Boolean value indicating if profiler should be enabled */
+ __u8 enable_profiler;
+ /** The max number of milliseconds that Nvmedia should wait for each frame processing */
+ __s32 wait_time_ms;
+ /** Maximum strength of QP delta map for TAQ */
+ __u8 taq_max_qp_delta;
+ /** Boolean value indicating if TAQ should be applied for B-frames */
+ __u8 taq_b_frame_mode;
+}v4l2_enc_ppe_init_params;
+
+/**
+ * Holds the motion vector parameters for a single block.
+ * For H.264, nvenc provides one motion vector per 16x16 block(Macroblock).
+ * For H.265, nvenc provides one motion vector per 32x32 block(Coded Tree Block).
+ */
+typedef struct MVInfo_ {
+ /** Number of pixels the macro block moved in horizontal direction. */
+ __s32 mv_x : 16;
+ /** Number of pixels the macro block moved in vertical direction. */
+ __s32 mv_y : 14;
+ /** Temporal hints used by hardware for Motion Estimation. */
+ __u32 weight : 2;
+} MVInfo;
+
+/**
+ * Holds the motion vector parameters for one complete frame.
+ */
+typedef struct v4l2_ctrl_videoenc_outputbuf_metadata_MV_ {
+ /** Size of the pMVInfo buffer, in bytes. */
+ __u32 bufSize;
+ /** Pointer to the buffer containing the motion vectors. */
+ MVInfo *pMVInfo;
+} v4l2_ctrl_videoenc_outputbuf_metadata_MV;
+
+/**
+ * Maximum number of reference frames supported by the encoder.
+ */
+#define V4L2_MAX_REF_FRAMES 8
+
+/**
+ * Holds the RPS List parameters of encoded frame.
+ */
+typedef struct v4l2_enc_frame_full_prop_
+{
+ /** Unique frame ID. */
+ __u32 nFrameId;
+ /** Boolean value indicating if current frame is an IDR. */
+ __u8 bIdrFrame;
+ /** Boolean value indicating if set Long Term Ref Flag. */
+ __u8 bLTRefFrame;
+ /** Picture Order Count. */
+ __u32 nPictureOrderCnt;
+ /** FrameNum. */
+ __u32 nFrameNum;
+ /** LongTermFrameIdx of a picture. */
+ __u32 nLTRFrameIdx;
+} v4l2_enc_frame_full_prop;
+
+/**
+ * Holds the encoder output metadata for a frame, to be used with
+ * \c V4L2_CID_MPEG_VIDEOENC_METADATA IOCTL.
+ */
+typedef struct v4l2_ctrl_videoenc_outputbuf_metadata_
+{
+ /** Boolean value indicating if current frame is a key frame. */
+ __u8 KeyFrame;
+ /** Boolean value indicating end of frame in case of multi slice encoding. */
+ __u8 EndofFrame;
+ /** Average QP value of the frame. */
+ __u16 AvgQP;
+ /** Boolean value indicating if current frame is a golden or alternate frame. */
+ __u8 bIsGoldenOrAlternateFrame;
+ /** CRC for Reconstructed frame. */
+ __u8 bValidReconCRC;
+ /** Recon Y-frame CRC */
+ __u32 ReconFrame_Y_CRC;
+ /** Recon U-frame CRC */
+ __u32 ReconFrame_U_CRC;
+ /** Recon V-frame CRC */
+ __u32 ReconFrame_V_CRC;
+ /** Number of bits needed to encode the frame. */
+ __u32 EncodedFrameBits;
+ /** Minumum QP value in the frame. */
+ __u32 FrameMinQP;
+ /** Maximum QP value in the frame. */
+ __u32 FrameMaxQP;
+ /** RPS Feedback. */
+ __u32 bRPSFeedback_status;
+ /** Reference frame ID used for Motion Estimation of current frame,
+ ignored for IDR */
+ __u32 nCurrentRefFrameId;
+ /** Number of active reference frames. */
+ __u32 nActiveRefFrames;
+ /** RPS List including most recent frame if it is reference frame. */
+ v4l2_enc_frame_full_prop RPSList[V4L2_MAX_REF_FRAMES];
+} v4l2_ctrl_videoenc_outputbuf_metadata;
+
+/**
+ * Holds the metadata parameters for video encoder and decoder.
+ *
+ * The metadata is valid for the buffer with index \c buffer_index after the
+ * buffer is dequeued until it is queued again.
+ */
+typedef struct v4l2_ctrl_video_metadata_
+{
+ /** A pointer to #v4l2_ctrl_videodec_inputbuf_metadata structure.
+ * This must be a valid pointer when used with #V4L2_CID_MPEG_VIDEODEC_INPUT_METADATA
+ * IOCTL. */
+ v4l2_ctrl_videodec_inputbuf_metadata *VideoDecHeaderErrorMetadata;
+ /** A pointer to #v4l2_ctrl_videodec_outputbuf_metadata structure.
+ * This must be a valid pointer when used with #V4L2_CID_MPEG_VIDEODEC_METADATA
+ * IOCTL. */
+ v4l2_ctrl_videodec_outputbuf_metadata *VideoDecMetadata;
+ /** A pointer to #v4l2_ctrl_videoenc_outputbuf_metadata structure.
+ * This must be a valid pointer when used with #V4L2_CID_MPEG_VIDEOENC_METADATA
+ * IOCTL. */
+ v4l2_ctrl_videoenc_outputbuf_metadata *VideoEncMetadata;
+ /** A pointer to #v4l2_ctrl_videoenc_outputbuf_metadata_MV structure.
+ * This must be a valid pointer when used with #V4L2_CID_MPEG_VIDEOENC_METADATA_MV
+ * IOCTL. */
+ v4l2_ctrl_videoenc_outputbuf_metadata_MV *VideoEncMetadataMV;
+ /** Index of the buffer whose metadata is required. */
+ __u32 buffer_index;
+} v4l2_ctrl_video_metadata;
+
+/**
+ * Holds the encoder GDR parameters
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA IOCTL.
+ */
+typedef struct v4l2_enc_gdr_params_
+{
+ /** Parameter for GDR (Intra Refresh) for specified number of frames. */
+ __u32 nGDRFrames;
+} v4l2_enc_gdr_params;
+
+/**
+ * Holds the params to configure encoder for external rps control
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_ENABLE_EXTERNAL_RPS_CONTROL IOCTL.
+ */
+typedef struct v4l2_enc_enable_ext_rps_ctrl_
+{
+ /** Boolean value indicating if enabled External RPS control. */
+ __u8 bEnableExternalRPS;
+ /** Boolean value indicating if allowed gap in frame number. */
+ __u8 bGapsInFrameNumAllowed;
+ /* TODO : Check for field details. */
+ __u32 nH264FrameNumBits;
+ /* TODO : Check for field details. */
+ __u32 nH265PocLsbBits;
+}v4l2_enc_enable_ext_rps_ctr;
+
+
+/**
+ * Holds the encoder frame property.
+ */
+typedef struct _v4l2_enc_frame_prop
+{
+ /** unique Id. */
+ __u32 nFrameId;
+ /** Long Term Ref Flag. */
+ __u8 bLTRefFrame;
+} v4l2_enc_frame_prop;
+
+/**
+ * Holds the encoder frame external rps control parameters
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA IOCTL.
+ */
+typedef struct v4l2_enc_frame_ext_rps_ctrl_params_
+{
+ /** unique Id of current frame. */
+ __u32 nFrameId;
+ /** Boolean value indicating if current frame referenced or non-referenced. */
+ __u8 bRefFrame;
+ /** Boolean value indicating if current frame long Term Ref Flag. */
+ __u8 bLTRefFrame;
+ /** Max Number of reference frames to use for inter-motion search. */
+ __u32 nMaxRefFrames;
+ /** # of valid entries in RPS, 0 means IDR. */
+ __u32 nActiveRefFrames;;
+ /** frame id of reference frame to be used for motion search, ignored for IDR. */
+ __u32 nCurrentRefFrameId;
+ /** Array of RPS */
+ v4l2_enc_frame_prop RPSList[V4L2_MAX_REF_FRAMES];
+}v4l2_enc_frame_ext_rps_ctrl_params;
+
+
+/**
+ * Holds the params to configure encoder for external rate control mode
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_ENABLE_EXTERNAL_RATE_CONTROL IOCTL.
+ */
+typedef struct v4l2_enc_enable_ext_rate_ctrl_
+{
+ /** Boolean value indicating if enabled External Picture RC. */
+ __u8 bEnableExternalPictureRC;
+ /** Max QP per session when external picture RC enabled. */
+ __u32 nsessionMaxQP;
+}v4l2_enc_enable_ext_rate_ctr;
+
+/**
+ * Holds the encoder frame external rate control parameters
+ * to be used with #V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA ioctl.
+ */
+typedef struct v4l2_enc_frame_ext_rate_ctrl_params_
+{
+ /** Target frame bits. */
+ __u32 nTargetFrameBits;
+ /** Frame start QP. */
+ __u32 nFrameQP;
+ /** Frame min QP. */
+ __u32 nFrameMinQp;
+ /** Frame max QP. */
+ __u32 nFrameMaxQp;
+ /** Frame min QP deviation. */
+ __u32 nMaxQPDeviation;
+}v4l2_enc_frame_ext_rate_ctrl_params;
+
+/**
+ * Holds the params to configure encoder for ROI parameters encoding
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_ENABLE_ROI_PARAM IOCTL.
+ */
+typedef struct v4l2_enc_enable_roi_param_
+{
+ /** Boolean value to indicating ROI param encoding. */
+ __u8 bEnableROI;
+}v4l2_enc_enable_roi_param;
+
+/**
+ * Holds the params to configure encoder for Reconstructed CRC encoding
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_ENABLE_RECONCRC_PARAM IOCTL.
+ */
+typedef struct v4l2_enc_enable_reconcrc_param_
+{
+ /** Boolean value to indicating Reconstructed CRC encoding. */
+ __u8 bEnableReconCRC;
+}v4l2_enc_enable_reconcrc_param;
+
+/**
+ * Holds the encoder frame Reconstructed CRC parameters.
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA IOCTL.
+ */
+typedef struct v4l2_enc_frame_ReconCRC_params_
+{
+ /** Rectangle to specify the co-ordinates of the input frame
+ * used to calculate reconstructed picture CRC. */
+ struct v4l2_rect ReconCRCRect;
+}v4l2_enc_frame_ReconCRC_params;
+
+/**
+ * Holds the encoder frame input metadata parameters.
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_INPUT_METADATA IOCTL.
+ */
+typedef struct v4l2_ctrl_videoenc_input_metadata_
+{
+ /** Flag to indicate which inputbuffer metadata is valid. */
+ __u32 flag;
+ /** Pointer to the ROI params structure when ROI param is in metadata_flag. */
+ v4l2_enc_frame_ROI_params *VideoEncROIParams;
+ /** Pointer to the Reconstructed CRC parameter structure when ReconCRC param is in
+ * metadata flag. */
+ v4l2_enc_frame_ReconCRC_params *VideoReconCRCParams;
+ /** Pointer to the GDR params structure when GDR param is in metadata_flag. */
+ v4l2_enc_gdr_params *VideoEncGDRParams;
+ /** Pointer to the External RPL control parameter structure when RPS param is in
+ * metadata flag. */
+ v4l2_enc_frame_ext_rps_ctrl_params *VideoEncRPSParams;
+ /** Pointer to the External Rate control parameter structure when RC param is in
+ * metadata flag. */
+ v4l2_enc_frame_ext_rate_ctrl_params *VideoEncExtRCParams;
+ /** Config store integer to which these parameters are to be applied.
+ * This must be same as the value of config store of queued v4l2_buffer
+ * for which these parameters are valid. */
+ __u32 config_store;
+} v4l2_ctrl_videoenc_input_metadata;
+
+/**
+ * Setting Qp values in #v4l2_ctrl_video_qp_range to QP_RETAIN_VAL
+ * retains default or previously set QP values.
+ */
+#define QP_RETAIN_VAL -1
+
+/**
+ * Holds the encoder frame min/max QP parameters.
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_QP_RANGE IOCTL.
+ */
+typedef struct _v4l2_ctrl_video_qp_range
+{
+ /** Minimum QP value for I frame. */
+ __u32 MinQpI;
+ /** Maximum QP value for I frame. */
+ __u32 MaxQpI;
+ /** Minimum QP value for P frame. */
+ __u32 MinQpP;
+ /** Maximum QP value for P frame. */
+ __u32 MaxQpP;
+ /** Minimum QP value for B frame. */
+ __u32 MinQpB;
+ /** Maximum QP value for B frame. */
+ __u32 MaxQpB;
+} v4l2_ctrl_video_qp_range;
+
+typedef struct _v4l2_ctrl_video_constqp
+{
+ __u32 constQpI;
+ __u32 constQpP;
+ __u32 constQpB;
+} v4l2_ctrl_video_constqp;
+
+typedef struct _v4l2_ctrl_video_framerate
+{
+ __u32 fps_n;
+ __u32 fps_d;
+} v4l2_ctrl_video_framerate;
+
+/**
+ * Holds the encoder init QP parameters.
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_INIT_FRAME_QP IOCTL.
+ */
+typedef struct _v4l2_ctrl_video_init_qp
+{
+ /** Initial QP value for I frame. */
+ __u32 IInitQP;
+ /** Initial QP value for P frame. */
+ __u32 PInitQP;
+ /** Initial QP value for B frame. */
+ __u32 BInitQP;
+} v4l2_ctrl_video_init_qp;
+
+/**
+ * Holds the params to configure tiles for AV1 encoding
+ *
+ * Must be used with #V4L2_CID_MPEG_VIDEOENC_AV1_TILE_CONFIGURATION IOCTL.
+ */
+typedef struct v4l2_enc_av1_tile_config_
+{
+ /** Boolean value to enable multi-tile */
+ __u8 bEnableTile;
+ /** Log2 Tile Rows */
+ __u32 nLog2RowTiles;
+ /** Log2 Tile Columns */
+ __u32 nLog2ColTiles;
+}v4l2_enc_av1_tile_config;
+
+
+/** @} */
+
+/** @addtogroup V4L2Argus */
+/** @{ */
+
+/**
+ * Enum specifying types of denoise modes.
+ */
+enum v4l2_argus_denoise_mode {
+ V4L2_ARGUS_DENOISE_MODE_UNKNOWN = 0,
+ V4L2_ARGUS_DENOISE_MODE_OFF = 1,
+ V4L2_ARGUS_DENOISE_MODE_FAST = 2,
+ V4L2_ARGUS_DENOISE_MODE_HIGH_QUALITY = 3,
+};
+
+/**
+ * Enum specifying types of edge enhancement modes.
+ */
+enum v4l2_argus_edge_enhance_mode {
+ V4L2_ARGUS_EDGE_ENHANCE_MODE_UNKNOWN = 0,
+ V4L2_ARGUS_EDGE_ENHANCE_MODE_OFF = 1,
+ V4L2_ARGUS_EDGE_ENHANCE_MODE_FAST = 2,
+ V4L2_ARGUS_EDGE_ENHANCE_MODE_HIGH_QUALITY = 3,
+};
+
+/**
+ * Enum specifying types of AE antibanding modes.
+ */
+enum v4l2_argus_ac_ae_antibanding_mode {
+ V4L2_ARGUS_AE_ANTIBANDING_MODE_UNKNOWN = 0,
+ V4L2_ARGUS_AE_ANTIBANDING_MODE_OFF = 1,
+ V4L2_ARGUS_AE_ANTIBANDING_MODE_AUTO = 2,
+ V4L2_ARGUS_AE_ANTIBANDING_MODE_50HZ = 3,
+ V4L2_ARGUS_AE_ANTIBANDING_MODE_60HZ = 4,
+};
+
+/**
+ * Enum specifying types of AC AWB modes.
+ */
+enum v4l2_argus_ac_awb_mode {
+ V4L2_ARGUS_AWB_MODE_OFF = 1,
+ V4L2_ARGUS_AWB_MODE_AUTO = 2,
+ V4L2_ARGUS_AWB_MODE_INCANDESCENT = 3,
+ V4L2_ARGUS_AWB_MODE_FLUORESCENT = 4,
+ V4L2_ARGUS_AWB_MODE_WARM_FLUORESCENT = 5,
+ V4L2_ARGUS_AWB_MODE_DAYLIGHT = 6,
+ V4L2_ARGUS_AWB_MODE_CLOUDY_DAYLIGHT = 7,
+ V4L2_ARGUS_AWB_MODE_TWILIGHT = 8,
+ V4L2_ARGUS_AWB_MODE_SHADE = 9,
+ V4L2_ARGUS_AWB_MODE_MANUAL = 10,
+};
+
+/**
+ * Enum specifying types of AE states.
+ */
+enum v4l2_argus_ae_state {
+ V4L2_ARGUS_AeState_Unknown = 0,
+ V4L2_ARGUS_AE_STATE_INACTIVE = 1,
+ V4L2_ARGUS_AE_STATE_SEARCHING = 2,
+ V4L2_ARGUS_AE_STATE_CONVERGED = 3,
+ V4L2_ARGUS_AE_STATE_FLASH_REQUIRED = 4,
+ V4L2_ARGUS_AE_STATE_TIMEOUT = 5,
+};
+
+/**
+ * Enum specifying types of AWB states.
+ */
+enum v4l2_argus_awb_state {
+ V4L2_ARGUS_AwbState_Unknown = 0,
+ V4L2_ARGUS_AWB_STATE_INACTIVE = 1,
+ V4L2_ARGUS_AWB_STATE_SEARCHING = 2,
+ V4L2_ARGUS_AWB_STATE_CONVERGED = 3,
+ V4L2_ARGUS_AWB_STATE_LOCKED = 4,
+};
+
+/**
+ * Holds the strength value for denoise operation.
+ *
+ * Must be used with #V4L2_CID_ARGUS_DENOISE_STRENGTH ioctl.
+ */
+typedef struct _v4l2_argus_denoise_strength
+{
+ /** Denoise Strength. Range: {-1.0f, 1.0f} **/
+ float DenoiseStrength;
+}v4l2_argus_denoise_strength;
+
+/**
+ * Holds the strength value for edge enhancement operation.
+ *
+ * Must be used with #V4L2_CID_ARGUS_EE_STRENGTH ioctl.
+ */
+typedef struct _v4l2_argus_edge_enhance_strength
+{
+ /** Edge Enhance Strength. Range: {-1.0f, 1.0f} **/
+ float EdgeEnhanceStrength;
+}v4l2_argus_edge_enhance_strength;
+
+/**
+ * Holds the value for exposure compensation.
+ *
+ * Must be used with #V4L2_CID_ARGUS_EXPOSURE_COMPENSATION ioctl.
+ */
+typedef struct _v4l2_argus_exposure_compensation
+{
+ /** Exposure Compensation. Range: {-2.0f, 2.0f} **/
+ float ExposureCompensation;
+}v4l2_argus_exposure_compensation;
+
+/**
+ * Holds the value for Isp Digital gain range.
+ *
+ * Must be used with #V4L2_CID_ARGUS_ISP_DIGITAL_GAIN_RANGE ioctl.
+ */
+typedef struct _v4l2_argus_ispdigital_gainrange
+{
+ /** Range: {1, 256} **/
+ /** Digital Gain Range start limit **/
+ float MinISPDigitalGainRange;
+ /** Digital Gain Range end limit **/
+ float MaxISPDigitalGainRange;
+}v4l2_argus_ispdigital_gainrange;
+
+/**
+ * Holds the value for absolute color saturation.
+ *
+ * Must be used with #V4L2_CID_ARGUS_COLOR_SATURATION ioctl.
+ */
+typedef struct _v4l2_argus_color_saturation
+{
+ /** Boolean value to indicate enable of user-specified absolute color saturation **/
+ __u8 EnableSaturation;
+ /** Specified absolute color saturation **/
+ float ColorSaturation;
+}v4l2_argus_color_saturation;
+
+/**
+ * Holds the value for gain range.
+ *
+ * Must be used with #V4L2_CID_ARGUS_GAIN_RANGE ioctl.
+ */
+typedef struct _v4l2_argus_gainrange
+{
+ /** Analog Gain Range start limit **/
+ float MinGainRange;
+ /** Analog Gain Range end limit **/
+ float MaxGainRange;
+}v4l2_argus_gainrange;
+
+/**
+ * Holds the value for exposure range.
+ *
+ * Must be used with #V4L2_CID_ARGUS_EXPOSURE_TIME_RANGE ioctl.
+ */
+typedef struct _v4l2_argus_exposure_timerange
+{
+ /** Exposure Time Range start limit **/
+ __u64 MinExposureTimeRange;
+ /** Exposure Time Range end limit **/
+ __u64 MaxExposureTimeRange;
+}v4l2_argus_exposure_timerange;
+
+/**
+ * Holds the value for camera output metadata.
+ *
+ * Must be used with #V4L2_CID_ARGUS_METADATA ioctl.
+ */
+typedef struct _v4l2_argus_ctrl_metadata
+{
+ /** Boolean value to indicate if AE was locked for this capture **/
+ __u8 AeLocked;
+ /** Boolean value to indicate if metadata has valid contents **/
+ __u8 ValidFrameStatus;
+ /** Index of the buffer captured **/
+ __u32 BufferIndex;
+ /** Focuser Position used for capture**/
+ __u32 FocuserPosition;
+ /** CCT value calculated by AWB **/
+ __u32 AwbCCT;
+ /** ISO value used for capture **/
+ __u32 SensorSensitivity;
+ /** Time (nanoseconds) taken to integrate the capture **/
+ __u64 FrameDuration;
+ /** Frame readout time for the capture **/
+ __u64 FrameReadoutTime;
+ /** Sensor Exposure time value for the capture **/
+ __u64 SensorExposureTime;
+ /** ISP Digital gain value for the capture **/
+ float IspDigitalGain;
+ /** Estimated scene brightness for the capture **/
+ float SceneLux;
+ /** Sensor analog gain for the capture **/
+ float SensorAnalogGain;
+ /** AE state ran for capture **/
+ enum v4l2_argus_ae_state AEState;
+ /** AWB state ran for capture **/
+ enum v4l2_argus_awb_state AWBState;
+}v4l2_argus_ctrl_metadata;
+/** @} */
+
+/** @addtogroup V4L2Conv */
+/** @{ */
+
+/**
+ * Enum specifying types of buffer layouts.
+ */
+enum v4l2_nv_buffer_layout {
+ V4L2_NV_BUFFER_LAYOUT_PITCH = 0, /**< Pitch Linear Layout. */
+ V4L2_NV_BUFFER_LAYOUT_BLOCKLINEAR = 1, /**< Block Linear Layout. */
+};
+
+/**
+ * Specifies the types of rotation/flip algorithms.
+ */
+enum v4l2_flip_method {
+ V4L2_FLIP_METHOD_IDENTITY = 0, /**< Identity (no rotation). */
+ V4L2_FLIP_METHOD_90L = 1, /**< Rotate counter-clockwise 90 degrees. */
+ V4L2_FLIP_METHOD_180 = 2, /**< Rotate 180 degrees. */
+ V4L2_FLIP_METHOD_90R = 3, /**< Rotate clockwise 90 degrees. */
+ V4L2_FLIP_METHOD_HORIZ = 4, /**< Flip horizontally. */
+ V4L2_FLIP_METHOD_INVTRANS = 5, /**< Flip across upper right/lower left diagonal. */
+ V4L2_FLIP_METHOD_VERT = 6, /**< Flip vertically. */
+ V4L2_FLIP_METHOD_TRANS = 7, /**< Flip across upper left/lower right diagonal. */
+};
+
+/**
+ * Specifies the types of interpolation methods.
+ */
+enum v4l2_interpolation_method {
+ V4L2_INTERPOLATION_NEAREST = 1, /**< Nearest interpolation method */
+ V4L2_INTERPOLATION_BILINEAR = 2, /**< Bi-Linear interpolation method */
+ V4L2_INTERPOLATION_5_TAP = 3, /**< 5-Tap interpolation method */
+ V4L2_INTERPOLATION_10_TAP = 4, /**< 10-Tap interpolation method */
+ V4L2_INTERPOLATION_SMART = 5, /**< Smart interpolation method */
+ V4L2_INTERPOLATION_NICEST = 6, /**< Nicest interpolation method */
+};
+
+/**
+ * Specifies the types of TNR algorithms.
+ */
+enum v4l2_tnr_algorithm {
+ V4L2_TNR_ALGO_ORIGINAL = 0, /**< Default TNR algorithm. */
+ V4L2_TNR_ALGO_OUTDOOR_LOW_LIGHT = 1, /**< Outdoor Low Light TNR algorithm. */
+ V4L2_TNR_ALGO_OUTDOOR_MEDIUM_LIGHT = 2, /**< Outdoor Medium Light TNR algorithm. */
+ V4L2_TNR_ALGO_OUTDOOR_HIGH_LIGHT = 3, /**< Outdoor High Light TNR algorithm. */
+ V4L2_TNR_ALGO_INDOOR_LOW_LIGHT = 4, /**< Indoor Low Light TNR algorithm. */
+ V4L2_TNR_ALGO_INDOOR_MEDIUM_LIGHT = 5, /**< Indoor Medium Light TNR algorithm. */
+ V4L2_TNR_ALGO_INDOOR_HIGH_LIGHT = 6, /**< Indoor High Light TNR algorithm. */
+};
+
+/**
+ * Specifies the types of YUV rescale methods.
+ */
+enum v4l2_yuv_rescale_method {
+ /**< Disable */
+ V4L2_YUV_RESCALE_NONE = 0,
+ /**< Standard(limited range [16 235]) to extension(full range [0 255]) */
+ V4L2_YUV_RESCALE_STD_TO_EXT = 1,
+ /**< Extension(full range [0 255] to standard(limited range [16 235]) */
+ V4L2_YUV_RESCALE_EXT_TO_STD = 2,
+};
+
+typedef struct v4l2_ctrl_video_displaydata_
+{
+ __u32 masteringdisplaydatapresent;
+}v4l2_ctrl_video_displaydata;
+
+/**
+ * HDR Metadata.
+ */
+
+typedef struct _v4l2_ctrl_video_hdrmasteringdisplaydata
+{
+ // idx 0 : G, 1 : B, 2 : R
+ __u16 display_primaries_x[3]; // normalized x chromaticity cordinate. It shall be in the range of 0 to 50000
+ __u16 display_primaries_y[3]; // normalized y chromaticity cordinate. It shall be in the range of 0 to 50000
+ __u16 white_point_x; // normalized x chromaticity cordinate of white point of mastering display
+ __u16 white_point_y; // normalized y chromaticity cordinate of white point of mastering display
+ __u32 max_display_parameter_luminance; // nominal maximum display luminance in units of 0.0001 candelas per square metre
+ __u32 min_display_parameter_luminance; // nominal minimum display luminance in units of 0.0001 candelas per square metre
+} v4l2_ctrl_video_hdrmasteringdisplaydata;
+
+
+/**
+ * Poll device
+ */
+typedef struct _v4l2_ctrl_video_device_poll
+{
+ __u16 req_events; // Requested events, a bitmask of POLLIN, POLLOUT, POLLERR, POLLPRI.
+ __u16 resp_events; // Returned events a similar bitmask of above events.
+} v4l2_ctrl_video_device_poll;
+
+/** @} */
+#endif /*__V4L2_NV_EXTENSIONS_H__*/