diff --git a/commitFile.txt b/commitFile.txt
new file mode 100644
index 0000000..1ad3af4
--- /dev/null
+++ b/commitFile.txt
@@ -0,0 +1,25 @@
+Updating prebuilts and/or headers
+
+a2b82d590a9e48bc2f838593042f9775ab723057 - nvbufsurface.h
+9a172f748a2b8f4d6d15648ea353989ccc7aeba6 - gst-plugins-nv-video-sinks/Makefile
+9825d8a113dbf7dd16f791ff1ca66f2de3047b22 - gst-plugins-nv-video-sinks/LICENSE.libgstnvvideosinks
+7ef56486c9e6b3e354473a2959d274517dd709da - gst-plugins-nv-video-sinks/gstnvvideosinks.c
+de3e1e648709f765fbe78b5c365017f573ca1988 - gst-plugins-nv-video-sinks/common/context.c
+b38008a552820bac7742888cd4bc2610bca722eb - gst-plugins-nv-video-sinks/common/display.c
+3b014cb69f5042fd9752fd4bf06a5873fe08b41c - gst-plugins-nv-video-sinks/common/renderer.c
+96b0b4d38692a0aecf70944749684ac938ff192f - gst-plugins-nv-video-sinks/common/display.h
+6e77d54ffc5d1a49d5bad768cdf5cfadf458f1f7 - gst-plugins-nv-video-sinks/common/window.h
+d48e1dae85e3c6a0ba7623be7ee306b8e1ef6695 - gst-plugins-nv-video-sinks/common/gstnvvideofwd.h
+835dc89a20f2a95bea9c4033e40bf6787148ab08 - gst-plugins-nv-video-sinks/common/window.c
+72f9a4b823c4162c9f22cedb7c1cb1764d06fcb6 - gst-plugins-nv-video-sinks/common/renderer.h
+5e13200e9cba5f45d74cf6899dd3356d5f5d1c8e - gst-plugins-nv-video-sinks/common/context.h
+638b0da4ea65d02818289e89bc1d635ddbcdaec5 - gst-plugins-nv-video-sinks/common/x11/window_x11.h
+b3f1b67cae0b4643f6a676b362ceaa61abc9c40f - gst-plugins-nv-video-sinks/common/x11/display_x11.c
+d692399c6d94dbc7814770b08baf9271ed97f8e0 - gst-plugins-nv-video-sinks/common/x11/display_x11.h
+c98945083e215dff26507c1e10b0ebf62a2c6fb7 - gst-plugins-nv-video-sinks/common/x11/window_x11.c
+536a072a8ef84b3c91307777f88121fb88df2c4f - gst-plugins-nv-video-sinks/common/egl/context_egl.h
+35b1e9d33b1f8bb8fc7065ab57696696128e042d - gst-plugins-nv-video-sinks/common/egl/context_egl.c
+707a36267f329bb22afdd19b947be5a99478ec7a - gst-plugins-nv-video-sinks/common/renderer/renderer_gl.c
+f528404a796de5a23dab281588feb72f42343e59 - gst-plugins-nv-video-sinks/common/renderer/renderer_gl.h
+9b7125a2d7ebe2ea647c43d2eb43e8d04cd16c47 - gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.h
+a02ed68d624ec0fc13349cbf5c4e675dfdfec1b9 - gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.c
diff --git a/gst-plugins-nv-video-sinks/LICENSE.libgstnvvideosinks b/gst-plugins-nv-video-sinks/LICENSE.libgstnvvideosinks
new file mode 100644
index 0000000..f6d6818
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/LICENSE.libgstnvvideosinks
@@ -0,0 +1,23 @@
+The software listed below is licensed under the terms of the LGPLv2.1
+(see below). To obtain source code, contact oss-requests@nvidia.com.
+
+libgstnvvideosinks (libgstnvvideosinks.so)
+
+------------------------------------
+
+/*
+ * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
diff --git a/gst-plugins-nv-video-sinks/Makefile b/gst-plugins-nv-video-sinks/Makefile
new file mode 100644
index 0000000..32c9edd
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/Makefile
@@ -0,0 +1,78 @@
+###############################################################################
+#
+# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA Corporation and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA Corporation is strictly prohibited.
+#
+###############################################################################
+
+ifneq ($(MAKECMDGOALS),install)
+ifeq ($(CUDA_VER),)
+ $(error "CUDA_VER is not set. Set it by running - "export CUDA_VER="")
+endif
+endif
+
+SO_NAME := libgstnvvideosinks.so
+DEST_DIR ?= /usr/lib/aarch64-linux-gnu/gstreamer-1.0
+
+SRCS := common/context.c \
+ common/display.c \
+ common/egl/context_egl.c \
+ common/renderer.c \
+ common/renderer/renderer_gl.c \
+ common/window.c \
+ common/x11/display_x11.c \
+ common/x11/window_x11.c \
+ gstnvvideosinks.c \
+ nv3dsink/gstnv3dsink.c
+
+INCLUDES += -I./common \
+ -I./common/egl \
+ -I./common/renderer \
+ -I./common/x11 \
+ -I/usr/local/include/gstreamer-1.0 \
+ -I/usr/local/cuda-$(CUDA_VER)/targets/aarch64-linux/include/ \
+ -I../
+
+PKGS := glib-2.0 \
+ gstreamer-1.0 \
+ gstreamer-base-1.0 \
+ gstreamer-video-1.0
+
+OBJS := $(SRCS:.c=.o)
+
+CFLAGS := -fPIC \
+ -DNV_VIDEO_SINKS_HAS_EGL \
+ -DNV_VIDEO_SINKS_HAS_GL \
+ -DNV_VIDEO_SINKS_HAS_NV3DSINK \
+ -DNV_VIDEO_SINKS_HAS_X11
+
+CFLAGS += `pkg-config --cflags $(PKGS)`
+
+LDFLAGS = -Wl,--no-undefined -L/usr/lib/aarch64-linux-gnu/tegra -L/usr/local/cuda-$(CUDA_VER)/targets/aarch64-linux/lib/
+
+LIBS = -lnvbufsurface -lGLESv2 -lEGL -lX11 -lm -lcuda -lcudart
+
+LIBS += `pkg-config --libs $(PKGS)`
+
+all: $(SO_NAME)
+
+%.o: %.c
+ $(CC) -c $< $(CFLAGS) $(INCLUDES) -o $@
+
+$(SO_NAME): $(OBJS)
+ $(CC) -shared -o $(SO_NAME) $(OBJS) $(LIBS) $(LDFLAGS)
+
+.PHONY: install
+$(DEST_DIR):
+ mkdir -p $(DEST_DIR)
+install: $(SO_NAME) | $(DEST_DIR)
+ cp -vp $(SO_NAME) $(DEST_DIR)
+
+.PHONY: clean
+clean:
+ rm -rf $(OBJS) $(SO_NAME)
diff --git a/gst-plugins-nv-video-sinks/README.txt b/gst-plugins-nv-video-sinks/README.txt
new file mode 100644
index 0000000..f3fc16f
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/README.txt
@@ -0,0 +1,38 @@
+###############################################################################
+#
+# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA Corporation and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA Corporation is strictly prohibited.
+#
+###############################################################################
+
+Steps to compile the "gst-plugins-nv-video-sinks" sources:
+
+1) Install gstreamer related packages using the command:
+
+ sudo apt-get install gstreamer1.0-tools gstreamer1.0-alsa \
+ gstreamer1.0-plugins-base gstreamer1.0-plugins-good \
+ gstreamer1.0-plugins-bad gstreamer1.0-plugins-ugly \
+ gstreamer1.0-libav libgstreamer1.0-dev \
+ libgstreamer-plugins-base1.0-dev libegl1-mesa-dev
+
+2) Install CUDA Runtime 10.0+
+
+3) Extract the package "libgstnvvideosinks_src.tbz2" as follow:
+
+ tar xvjf libgstnvvideosinks_src.tbz2`
+
+4) cd "gst-plugins-nv-video-sinks"
+
+5) Export the appropriate CUDA_VER using - "export CUDA_VER="
+
+5) run "make" to create "libgstnvvideosinks.so"
+
+6) run "sudo make install" to install "libgstnvvideosinks.so" in
+ "/usr/lib/aarch64-linux-gnu/gstreamer-1.0".
+
+7) run "make install DEST_DIR=" to install at different .
diff --git a/gst-plugins-nv-video-sinks/common/context.c b/gst-plugins-nv-video-sinks/common/context.c
new file mode 100644
index 0000000..2c96008
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/context.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include
+
+#include "context.h"
+#include "window.h"
+
+#if NV_VIDEO_SINKS_HAS_EGL
+#include "context_egl.h"
+#endif
+
+#define GST_CAT_DEFAULT gst_debug_nv_video_context
+GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
+
+struct _GstNvVideoContextPrivate
+{
+ GstDataQueue *queue;
+
+ GThread *render_thread;
+ gboolean render_thread_active;
+ gboolean eos_handled;
+ GstFlowReturn last_ret;
+
+ GMutex render_lock;
+ GCond create_cond;
+ GCond quit_cond;
+ GCond eos_cond;
+};
+
+G_DEFINE_ABSTRACT_TYPE_WITH_CODE (GstNvVideoContext, gst_nv_video_context,
+ GST_TYPE_OBJECT, G_ADD_PRIVATE(GstNvVideoContext));
+
+GstNvVideoContextType
+gst_nv_video_context_get_handle_type (GstNvVideoContext * context)
+{
+ g_return_val_if_fail (GST_IS_NV_VIDEO_CONTEXT (context),
+ GST_NV_VIDEO_CONTEXT_TYPE_NONE);
+
+ return context->type;
+}
+
+static gpointer
+gst_nv_video_context_render_thread_func (GstNvVideoContext * context)
+{
+ GstNvVideoContextClass *context_class;
+ GstDataQueueItem *item = NULL;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ context_class = GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
+
+ GST_DEBUG_OBJECT (context, "render thread started");
+
+ context_class->setup (context);
+
+ cudaError_t CUerr = cudaSuccess;
+ GST_LOG_OBJECT (context, "SETTING CUDA DEVICE = %d in func=%s\n", context->gpu_id, __func__);
+ CUerr = cudaSetDevice(context->gpu_id);
+ if (CUerr != cudaSuccess) {
+ GST_LOG_OBJECT (context,"\n *** Unable to set device in %s Line %d\n", __func__, __LINE__);
+ return NULL;
+ }
+
+ g_mutex_lock (&context->priv->render_lock);
+ context->priv->render_thread_active = TRUE;
+ context->priv->last_ret = ret;
+ g_cond_signal (&context->priv->create_cond);
+ g_mutex_unlock (&context->priv->render_lock);
+
+ while (gst_data_queue_pop (context->priv->queue, &item)) {
+ GstMiniObject *object = item->object;
+ GstBuffer *buf = NULL;
+
+ GST_TRACE_OBJECT (context,
+ "render thread: got data queue item %" GST_PTR_FORMAT, object);
+
+ ret = GST_FLOW_ERROR;
+
+ if (GST_IS_BUFFER (object)) {
+ buf = GST_BUFFER_CAST (item->object);
+
+ if (context_class->show_frame (context, buf)) {
+ ret = GST_FLOW_OK;
+ }
+ } else if (!object) {
+ GST_TRACE_OBJECT (context, "render thread: handle EOS");
+
+ context_class->handle_eos (context);
+
+ g_mutex_lock (&context->priv->render_lock);
+ g_cond_signal (&context->priv->eos_cond);
+ context->priv->eos_handled = TRUE;
+ g_mutex_unlock (&context->priv->render_lock);
+
+ GST_TRACE_OBJECT (context, "render thread: handled EOS");
+ } else {
+ g_assert_not_reached ();
+ }
+
+ item->destroy (item);
+
+ g_mutex_lock (&context->priv->render_lock);
+ context->priv->last_ret = ret;
+ g_mutex_unlock (&context->priv->render_lock);
+
+ if (ret != GST_FLOW_OK) {
+ break;
+ }
+
+ GST_TRACE_OBJECT (context, "render thread: handled");
+ }
+
+ GST_DEBUG_OBJECT (context, "tearing down render thread");
+
+ context_class->cleanup (context);
+
+ g_mutex_lock (&context->priv->render_lock);
+ g_cond_signal (&context->priv->quit_cond);
+ context->priv->render_thread_active = FALSE;
+ g_mutex_unlock (&context->priv->render_lock);
+
+ GST_DEBUG_OBJECT (context, "render thread exit");
+
+ return NULL;
+}
+
+static void
+gst_nv_video_context_queue_free_item (GstDataQueueItem * item)
+{
+ GstDataQueueItem *data = item;
+ if (data->object)
+ gst_mini_object_unref (data->object);
+ g_slice_free (GstDataQueueItem, data);
+}
+
+static gboolean
+gst_nv_video_context_render_thread_show_frame (GstNvVideoContext * context,
+ GstBuffer * buf)
+{
+ gboolean last_ret;
+ GstDataQueueItem *item;
+ GstMiniObject *obj = GST_MINI_OBJECT_CAST (buf);
+
+ g_assert (obj);
+
+ g_mutex_lock (&context->priv->render_lock);
+ last_ret = context->priv->last_ret;
+ g_mutex_unlock (&context->priv->render_lock);
+
+ if (last_ret != GST_FLOW_OK) {
+ return FALSE;
+ }
+
+ item = g_slice_new (GstDataQueueItem);
+ item->destroy = (GDestroyNotify) gst_nv_video_context_queue_free_item;
+ item->object = gst_mini_object_ref (obj);
+ item->size = 0;
+ item->duration = GST_CLOCK_TIME_NONE;
+ item->visible = TRUE;
+
+ if (!gst_data_queue_push (context->priv->queue, item)) {
+ item->destroy (item);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+gst_nv_video_context_queue_check_full (GstDataQueue * queue, guint visible,
+ guint bytes, guint64 time, gpointer checkdata)
+{
+ return FALSE;
+}
+
+static void
+gst_nv_video_context_finalize (GObject * object)
+{
+ GstNvVideoContext *context = GST_NV_VIDEO_CONTEXT (object);
+
+ GST_DEBUG_OBJECT (context, "finalize begin");
+
+ if (context->priv->queue) {
+ g_object_unref (context->priv->queue);
+ context->priv->queue = NULL;
+ }
+
+ if (context->priv->render_thread) {
+ g_thread_unref (context->priv->render_thread);
+ context->priv->render_thread = NULL;
+ }
+
+ if (context->window) {
+ gst_object_unref (context->window);
+ context->window = NULL;
+ }
+
+ if (context->display) {
+ gst_object_unref (context->display);
+ context->display = NULL;
+ }
+
+ g_mutex_clear (&context->priv->render_lock);
+ g_cond_clear (&context->priv->create_cond);
+ g_cond_clear (&context->priv->quit_cond);
+ g_cond_clear (&context->priv->eos_cond);
+
+ GST_DEBUG_OBJECT (context, "finalize done");
+
+ G_OBJECT_CLASS (gst_nv_video_context_parent_class)->finalize (object);
+}
+
+static void
+gst_nv_video_context_init (GstNvVideoContext * context)
+{
+ GstNvVideoContext *self = GST_NV_VIDEO_CONTEXT (context);
+ context->priv = (GstNvVideoContextPrivate *)gst_nv_video_context_get_instance_private (self);
+
+ g_mutex_init (&context->priv->render_lock);
+ g_cond_init (&context->priv->create_cond);
+ g_cond_init (&context->priv->quit_cond);
+ g_cond_init (&context->priv->eos_cond);
+
+ context->priv->queue = NULL;
+ context->priv->render_thread = NULL;
+ context->priv->render_thread_active = FALSE;
+ context->priv->eos_handled = FALSE;
+
+ context->using_NVMM = 0;
+ context->cuContext = NULL;
+ context->cuResource[0] = NULL;
+ context->cuResource[1] = NULL;
+ context->cuResource[2] = NULL;
+ context->gpu_id = 0;
+
+ GST_DEBUG_OBJECT (context, "init done");
+}
+
+static void
+gst_nv_video_context_class_init (GstNvVideoContextClass * klass)
+{
+ G_OBJECT_CLASS (klass)->finalize = gst_nv_video_context_finalize;
+}
+
+GstNvVideoContext *
+gst_nv_video_context_new (GstNvVideoDisplay * display)
+{
+ GstNvVideoContext *context = NULL;
+ static gsize debug_init = 0;
+ const gchar *context_name = NULL;
+
+ if (g_once_init_enter (&debug_init)) {
+ GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_context, "nvvideocontext", 0,
+ "nvvideocontext");
+ g_once_init_leave (&debug_init, 1);
+ }
+
+ context_name = g_getenv ("GST_NV_VIDEO_CONTEXT");
+
+#if NV_VIDEO_SINKS_HAS_EGL
+ if (!context && (!context_name || g_strstr_len (context_name, 3, "egl"))) {
+ context = GST_NV_VIDEO_CONTEXT (gst_nv_video_context_egl_new (display));
+ }
+#endif
+
+ if (!context) {
+ GST_ERROR ("couldn't create context. GST_NV_VIDEO_CONTEXT = %s",
+ context_name ? context_name : NULL);
+ return NULL;
+ }
+
+ context->display = gst_object_ref (display);
+
+ GST_DEBUG_OBJECT (context, "created context for display %" GST_PTR_FORMAT,
+ display);
+
+ return context;
+}
+
+gboolean
+gst_nv_video_context_show_frame (GstNvVideoContext * context, GstBuffer * buf)
+{
+ g_mutex_lock (&context->priv->render_lock);
+ if (context->priv->render_thread_active) {
+ g_mutex_unlock (&context->priv->render_lock);
+ return gst_nv_video_context_render_thread_show_frame (context, buf);
+ }
+ g_mutex_unlock (&context->priv->render_lock);
+
+ return FALSE;
+}
+
+void
+gst_nv_video_context_handle_tearing (GstNvVideoContext * context)
+{
+ GstNvVideoContextClass *context_class =
+ GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
+ context_class->handle_tearing (context);
+ return;
+}
+
+void
+gst_nv_video_context_handle_drc (GstNvVideoContext * context)
+{
+ GstNvVideoContextClass *context_class =
+ GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
+
+ context_class->handle_drc (context);
+ return;
+}
+
+void
+gst_nv_video_context_handle_eos (GstNvVideoContext * context)
+{
+ GstNvVideoContextClass *context_class =
+ GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
+ GstDataQueueItem *item;
+
+ g_mutex_lock (&context->priv->render_lock);
+
+ if (!context->priv->render_thread_active) {
+ g_mutex_unlock (&context->priv->render_lock);
+ context_class->handle_eos (context);
+ return;
+ }
+ // Push NULL object in queue to indicate EOS and wait till it is handled.
+ item = g_slice_new (GstDataQueueItem);
+ item->destroy = (GDestroyNotify) gst_nv_video_context_queue_free_item;
+ item->object = NULL;
+ item->size = 0;
+ item->duration = GST_CLOCK_TIME_NONE;
+ item->visible = TRUE;
+
+ if (!gst_data_queue_push (context->priv->queue, item)) {
+ GST_ERROR_OBJECT (context, "faild to send EOS to render thread");
+ item->destroy (item);
+ g_mutex_unlock (&context->priv->render_lock);
+ return;
+ }
+ GST_TRACE_OBJECT (context, "wait for render thread to handle EOS");
+ while (context->priv->render_thread_active && !context->priv->eos_handled) {
+ gint64 end = g_get_monotonic_time () + G_TIME_SPAN_SECOND;
+ g_cond_wait_until (&context->priv->eos_cond, &context->priv->render_lock, end);
+ }
+ GST_TRACE_OBJECT (context, "wait for render thread to handle EOS is done");
+ context->priv->eos_handled = FALSE;
+ g_mutex_unlock (&context->priv->render_lock);
+}
+
+GstCaps *
+gst_nv_video_context_get_caps (GstNvVideoContext * context)
+{
+ GstNvVideoContextClass *context_class;
+
+ if (!context) {
+ return NULL;
+ }
+
+ context_class = GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
+
+ return context_class->get_caps (context);
+}
+
+gboolean
+gst_nv_video_context_set_window (GstNvVideoContext * context,
+ GstNvVideoWindow * window)
+{
+ if (context->window) {
+ gst_object_unref (context->window);
+ }
+
+ if (window) {
+ // Before the object's GObjectClass.dispose method is called, every
+ // GWeakRef associated with becomes empty.
+ g_weak_ref_set (&window->context, context);
+ }
+
+ context->window = window ? gst_object_ref (window) : NULL;
+
+ return TRUE;
+}
+
+void
+gst_nv_video_context_destroy_render_thread (GstNvVideoContext * context)
+{
+ if (context->priv->queue) {
+ gst_data_queue_set_flushing (context->priv->queue, TRUE);
+ gst_data_queue_flush (context->priv->queue);
+ }
+
+ g_mutex_lock (&context->priv->render_lock);
+
+ if (context->priv->render_thread_active) {
+ GST_DEBUG_OBJECT (context, "destroying render thread");
+ while (context->priv->render_thread_active) {
+ g_cond_wait (&context->priv->quit_cond, &context->priv->render_lock);
+ }
+ g_thread_join (context->priv->render_thread);
+ GST_DEBUG_OBJECT (context, "render thread destroyed");
+ }
+
+ g_mutex_unlock (&context->priv->render_lock);
+}
+
+gboolean
+gst_nv_video_context_create_render_thread (GstNvVideoContext * context)
+{
+ g_mutex_lock (&context->priv->render_lock);
+
+ if (!context->priv->render_thread) {
+ g_assert (context->priv->queue == NULL);
+
+ context->priv->queue =
+ gst_data_queue_new (gst_nv_video_context_queue_check_full, NULL, NULL,
+ NULL);
+
+ if (!context->priv->queue) {
+ g_mutex_unlock (&context->priv->render_lock);
+ return FALSE;
+ }
+
+ gst_data_queue_set_flushing (context->priv->queue, FALSE);
+ gst_data_queue_flush (context->priv->queue);
+
+ context->priv->render_thread =
+ g_thread_new ("NvVideoRenderThread",
+ (GThreadFunc) gst_nv_video_context_render_thread_func, context);
+
+ while (!context->priv->render_thread_active) {
+ g_cond_wait (&context->priv->create_cond, &context->priv->render_lock);
+ }
+
+ if (context->priv->last_ret != GST_FLOW_OK) {
+ g_object_unref (context->priv->queue);
+ context->priv->queue = NULL;
+ g_mutex_unlock (&context->priv->render_lock);
+ return FALSE;
+ }
+
+ GST_INFO_OBJECT (context, "render thread created");
+ }
+
+ g_mutex_unlock (&context->priv->render_lock);
+
+ return TRUE;
+}
+
+gboolean
+gst_nv_video_context_create (GstNvVideoContext * context)
+{
+ GstNvVideoContextClass *context_class;
+
+ context_class = GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
+
+ return context_class->create (context);
+}
diff --git a/gst-plugins-nv-video-sinks/common/context.h b/gst-plugins-nv-video-sinks/common/context.h
new file mode 100644
index 0000000..f923d08
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/context.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_CONTEXT_H__
+#define __GST_NV_VIDEO_CONTEXT_H__
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include "gstnvvideofwd.h"
+
+#include
+#include
+#include
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_CONTEXT \
+ (gst_nv_video_context_get_type())
+#define GST_NV_VIDEO_CONTEXT(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_CONTEXT, GstNvVideoContext))
+#define GST_NV_VIDEO_CONTEXT_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_CONTEXT, GstNvVideoContextClass))
+#define GST_IS_NV_VIDEO_CONTEXT(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_CONTEXT))
+#define GST_IS_NV_VIDEO_CONTEXT_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_CONTEXT))
+#define GST_NV_VIDEO_CONTEXT_GET_CLASS(o) \
+ (G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_CONTEXT, GstNvVideoContextClass))
+
+typedef enum
+{
+ GST_NV_VIDEO_CONTEXT_TYPE_NONE = 0,
+ GST_NV_VIDEO_CONTEXT_TYPE_EGL = (1 << 0),
+
+ GST_NV_VIDEO_CONTEXT_TYPE_ANY = G_MAXUINT32
+} GstNvVideoContextType;
+
+struct _GstNvVideoContextClass
+{
+ GstObjectClass parent_class;
+
+ gboolean (*create) (GstNvVideoContext * context);
+ gboolean (*setup) (GstNvVideoContext * context);
+ void (*cleanup) (GstNvVideoContext * context);
+ GstCaps *(*get_caps) (GstNvVideoContext * context);
+ gboolean (*show_frame) (GstNvVideoContext * context, GstBuffer * buf);
+ void (*handle_eos) (GstNvVideoContext * context);
+ void (*handle_drc) (GstNvVideoContext * context);
+ void (*handle_tearing) (GstNvVideoContext * context);
+};
+
+struct _GstNvVideoContext
+{
+ GstObject parent;
+
+ GstNvVideoDisplay *display;
+ GstNvVideoWindow *window;
+
+ GstNvVideoContextType type;
+
+ GstNvVideoContextPrivate *priv;
+
+ guint using_NVMM;
+ GstVideoInfo configured_info;
+
+ gboolean is_cuda_init;
+ CUcontext cuContext;
+ CUgraphicsResource cuResource[3];
+ unsigned int gpu_id;
+
+};
+
+GST_EXPORT
+GstNvVideoContext * gst_nv_video_context_new (GstNvVideoDisplay * display);
+
+GST_EXPORT
+gboolean gst_nv_video_context_create (GstNvVideoContext * context);
+GST_EXPORT
+GstCaps * gst_nv_video_context_get_caps (GstNvVideoContext * context);
+GST_EXPORT
+gboolean gst_nv_video_context_set_window (GstNvVideoContext * context, GstNvVideoWindow * window);
+GST_EXPORT
+gboolean gst_nv_video_context_show_frame (GstNvVideoContext * context, GstBuffer * buf);
+GST_EXPORT
+void gst_nv_video_context_handle_eos (GstNvVideoContext * context);
+GST_EXPORT
+void gst_nv_video_context_handle_drc (GstNvVideoContext * context);
+GST_EXPORT
+void gst_nv_video_context_handle_tearing (GstNvVideoContext * context);
+GST_EXPORT
+gboolean gst_nv_video_context_create_render_thread (GstNvVideoContext * context);
+GST_EXPORT
+void gst_nv_video_context_destroy_render_thread (GstNvVideoContext * context);
+GST_EXPORT
+GstNvVideoContextType gst_nv_video_context_get_handle_type (GstNvVideoContext * context);
+
+GType gst_nv_video_context_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_CONTEXT_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/display.c b/gst-plugins-nv-video-sinks/common/display.c
new file mode 100644
index 0000000..07bf98a
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/display.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include "display.h"
+#include "context.h"
+#include "window.h"
+
+#if NV_VIDEO_SINKS_HAS_X11
+#include "display_x11.h"
+#endif
+
+#define GST_CAT_DEFAULT gst_debug_nv_video_display
+GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
+
+G_DEFINE_ABSTRACT_TYPE (GstNvVideoDisplay, gst_nv_video_display,
+ GST_TYPE_OBJECT);
+
+GstNvVideoDisplayType
+gst_nv_video_display_get_handle_type (GstNvVideoDisplay * display)
+{
+ g_return_val_if_fail (GST_IS_NV_VIDEO_DISPLAY (display),
+ GST_NV_VIDEO_DISPLAY_TYPE_NONE);
+
+ return display->type;
+}
+
+static void
+gst_nv_video_display_init (GstNvVideoDisplay * display)
+{
+
+}
+
+gboolean
+gst_nv_video_display_create_context (GstNvVideoDisplay * display,
+ GstNvVideoContext ** ptr_context)
+{
+ GstNvVideoContext *context = NULL;
+
+ g_return_val_if_fail (display != NULL, FALSE);
+ g_return_val_if_fail (ptr_context != NULL, FALSE);
+
+ context = gst_nv_video_context_new (display);
+ if (!context) {
+ GST_ERROR ("context creation failed");
+ return FALSE;
+ }
+
+ if (!gst_nv_video_context_create (context)) {
+ return FALSE;
+ }
+
+ *ptr_context = context;
+
+ GST_DEBUG_OBJECT (display, "created context %" GST_PTR_FORMAT, context);
+
+ return TRUE;
+}
+
+GstNvVideoWindow *
+gst_nv_video_display_create_window (GstNvVideoDisplay * display)
+{
+ return gst_nv_video_window_new (display);
+}
+
+static void
+gst_nv_video_display_class_init (GstNvVideoDisplayClass * klass)
+{
+}
+
+gboolean
+gst_nv_video_display_new (GstNvVideoDisplay ** display)
+{
+ static gsize debug_init = 0;
+ const gchar *winsys_name = NULL;
+
+ if (g_once_init_enter (&debug_init)) {
+ GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_display, "nvvideodisplay", 0,
+ "nvvideodisplay");
+ g_once_init_leave (&debug_init, 1);
+ }
+
+ winsys_name = g_getenv ("GST_NV_VIDEO_WINSYS");
+
+#if NV_VIDEO_SINKS_HAS_X11
+ if (!*display && (!winsys_name || g_strstr_len (winsys_name, 3, "x11"))) {
+ *display = GST_NV_VIDEO_DISPLAY (gst_nv_video_display_x11_new (NULL));
+ }
+#endif
+
+ if (!*display) {
+ GST_ERROR ("couldn't create display. GST_NV_VIDEO_WINSYS = %s",
+ winsys_name ? winsys_name : NULL);
+ return FALSE;
+ }
+
+ return TRUE;
+}
diff --git a/gst-plugins-nv-video-sinks/common/display.h b/gst-plugins-nv-video-sinks/common/display.h
new file mode 100644
index 0000000..8aad21c
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/display.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_DISPLAY_H__
+#define __GST_NV_VIDEO_DISPLAY_H__
+
+#include
+#include
+#include
+
+#include "gstnvvideofwd.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_DISPLAY \
+ (gst_nv_video_display_get_type())
+#define GST_NV_VIDEO_DISPLAY(obj)\
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_DISPLAY, GstNvVideoDisplay))
+#define GST_NV_VIDEO_DISPLAY_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_DISPLAY, GstNvVideoDisplayClass))
+#define GST_IS_NV_VIDEO_DISPLAY(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_DISPLAY))
+#define GST_IS_NV_VIDEO_DISPLAY_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_DISPLAY))
+#define GST_NV_VIDEO_DISPLAY_CAST(obj) \
+ ((GstNvVideoDisplay*)(obj))
+#define GST_NV_VIDEO_DISPLAY_GET_CLASS(o) \
+ (G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_DISPLAY, GstNvVideoDisplayClass))
+
+struct _GstNvVideoDisplayClass
+{
+ GstObjectClass parent_class;
+
+ guintptr (*get_handle) (GstNvVideoDisplay * display);
+};
+
+typedef enum
+{
+ GST_NV_VIDEO_DISPLAY_TYPE_NONE = 0,
+ GST_NV_VIDEO_DISPLAY_TYPE_X11 = (1 << 0),
+
+ GST_NV_VIDEO_DISPLAY_TYPE_ANY = G_MAXUINT32
+} GstNvVideoDisplayType;
+
+struct _GstNvVideoDisplay
+{
+ GstObject parent;
+
+ GstNvVideoDisplayType type;
+};
+
+GST_EXPORT
+gboolean gst_nv_video_display_new (GstNvVideoDisplay ** display);
+GST_EXPORT
+gboolean gst_nv_video_display_create_context (GstNvVideoDisplay * display, GstNvVideoContext ** ptr_context);
+GST_EXPORT
+GstNvVideoDisplayType gst_nv_video_display_get_handle_type (GstNvVideoDisplay * display);
+GST_EXPORT
+GstNvVideoWindow *gst_nv_video_display_create_window (GstNvVideoDisplay * display);
+
+GType gst_nv_video_display_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_DISPLAY_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/egl/context_egl.c b/gst-plugins-nv-video-sinks/common/egl/context_egl.c
new file mode 100644
index 0000000..1d0889e
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/egl/context_egl.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include "context_egl.h"
+#include "display.h"
+#include "display_x11.h"
+#include "window.h"
+#include "nvbufsurface.h"
+
+#include
+
+G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_context;
+#define GST_CAT_DEFAULT gst_debug_nv_video_context
+
+G_DEFINE_TYPE (GstNvVideoContextEgl, gst_nv_video_context_egl,
+ GST_TYPE_NV_VIDEO_CONTEXT);
+
+static GstCaps *
+gst_nv_video_context_egl_new_template_caps (GstVideoFormat format)
+{
+ return gst_caps_new_simple ("video/x-raw",
+ "format", G_TYPE_STRING, gst_video_format_to_string (format),
+ "width", GST_TYPE_INT_RANGE, 1, G_MAXINT,
+ "height", GST_TYPE_INT_RANGE, 1, G_MAXINT,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+}
+
+static void
+log_egl_error (GstNvVideoContext * context, const char *name)
+{
+ GST_ERROR_OBJECT (context, "egl error: %s returned %x", name, eglGetError ());
+}
+
+static gboolean
+gst_nv_video_context_egl_is_surface_changed (GstNvVideoContextEgl * context_egl)
+{
+ gint w, h;
+
+ eglQuerySurface (context_egl->display, context_egl->surface, EGL_WIDTH, &w);
+ eglQuerySurface (context_egl->display, context_egl->surface, EGL_HEIGHT, &h);
+
+ if (context_egl->surface_width != w || context_egl->surface_height != h) {
+ context_egl->surface_width = w;
+ context_egl->surface_height = h;
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static gboolean
+gst_nv_video_context_egl_show_frame (GstNvVideoContext * context,
+ GstBuffer * buf)
+{
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+ EGLImageKHR image = EGL_NO_IMAGE_KHR;
+ GstMemory *mem;
+ NvBufSurface *in_surface = NULL;
+ gboolean is_cuda_mem = TRUE;
+
+ if (!context_egl->surface) {
+ guintptr handle = gst_nv_video_window_get_handle (context->window);
+
+ context_egl->surface =
+ eglCreateWindowSurface (context_egl->display, context_egl->config,
+ (EGLNativeWindowType) handle, NULL);
+ if (context_egl->surface == EGL_NO_SURFACE) {
+ log_egl_error (context, "eglCreateWindowSurface");
+ return FALSE;
+ }
+
+ if (!eglMakeCurrent (context_egl->display, context_egl->surface,
+ context_egl->surface, context_egl->context)) {
+ log_egl_error (context, "eglMakeCurrent");
+ return FALSE;
+ }
+
+ GST_DEBUG_OBJECT (context, "egl surface %p created", context_egl->surface);
+ }
+
+ if (!context_egl->renderer) {
+ context_egl->renderer = gst_nv_video_renderer_new (context, "gl");
+ if (!context_egl->renderer) {
+ GST_ERROR_OBJECT (context, "renderer creation failed");
+ return FALSE;
+ }
+ if (!gst_nv_video_renderer_setup (context_egl->renderer)) {
+ GST_ERROR_OBJECT (context, "renderer setup failed");
+ return FALSE;
+ }
+ }
+
+ if (context->using_NVMM) {
+ if (!context->is_cuda_init) {
+ if (!gst_nv_video_renderer_cuda_init (context, context_egl->renderer)) {
+ GST_ERROR_OBJECT (context, "cuda init failed");
+ return FALSE;
+ }
+ }
+ }
+
+ if (gst_nv_video_context_egl_is_surface_changed (context_egl)) {
+ GST_DEBUG_OBJECT (context, "surface dimensions changed to %dx%d",
+ context_egl->surface_width, context_egl->surface_height);
+
+ gst_nv_video_renderer_update_viewport (context_egl->renderer,
+ context_egl->surface_width, context_egl->surface_height);
+ }
+
+ if (gst_buffer_n_memory (buf) >= 1 && (mem = gst_buffer_peek_memory (buf, 0))) {
+ //Software buffer handling
+ if (!context->using_NVMM) {
+ if (!gst_nv_video_renderer_fill_texture(context, context_egl->renderer, buf)) {
+ GST_ERROR_OBJECT (context, "fill_texture failed");
+ return FALSE;
+ }
+ if (!gst_nv_video_renderer_draw_2D_Texture (context_egl->renderer)) {
+ GST_ERROR_OBJECT (context, "draw 2D Texture failed");
+ return FALSE;
+ }
+ }
+ else {
+ // NvBufSurface support (NVRM and CUDA)
+ GstMapInfo map = { NULL, (GstMapFlags) 0, NULL, 0, 0, };
+ mem = gst_buffer_peek_memory (buf, 0);
+ gst_memory_map (mem, &map, GST_MAP_READ);
+
+ /* Types of Buffers handled -
+ * NvBufSurface
+ * - NVMM buffer type
+ * - Cuda buffer type
+ */
+ /* NvBufSurface type are handled here */
+ in_surface = (NvBufSurface*) map.data;
+ NvBufSurfaceMemType memType = in_surface->memType;
+
+ if (memType == NVBUF_MEM_DEFAULT) {
+#ifdef IS_DESKTOP
+ memType = NVBUF_MEM_CUDA_DEVICE;
+#else
+ memType = NVBUF_MEM_SURFACE_ARRAY;
+#endif
+ }
+
+ if (memType == NVBUF_MEM_SURFACE_ARRAY || memType == NVBUF_MEM_HANDLE) {
+ is_cuda_mem = FALSE;
+ }
+
+ if (is_cuda_mem == FALSE) {
+ /* NvBufSurface - NVMM buffer type are handled here */
+ if (in_surface->batchSize != 1) {
+ GST_ERROR_OBJECT (context,"ERROR: Batch size not 1\n");
+ return FALSE;
+ }
+ if (NvBufSurfaceMapEglImage (in_surface, 0) !=0 ) {
+ GST_ERROR_OBJECT (context,"ERROR: NvBufSurfaceMapEglImage\n");
+ return FALSE;
+ }
+ image = in_surface->surfaceList[0].mappedAddr.eglImage;
+ gst_nv_video_renderer_draw_eglimage (context_egl->renderer, image);
+ }
+ else {
+ /* NvBufSurface - Cuda buffer type are handled here */
+ if (!gst_nv_video_renderer_cuda_buffer_copy (context, context_egl->renderer, buf))
+ {
+ GST_ERROR_OBJECT (context,"cuda buffer copy failed\n");
+ return FALSE;
+ }
+ if (!gst_nv_video_renderer_draw_2D_Texture (context_egl->renderer)) {
+ GST_ERROR_OBJECT (context,"draw 2D texture failed");
+ return FALSE;
+ }
+ }
+ gst_memory_unmap (mem, &map);
+ }
+ }
+
+
+ if (!eglSwapBuffers (context_egl->display, context_egl->surface)) {
+ log_egl_error (context, "eglSwapBuffers");
+ }
+
+ if (image != EGL_NO_IMAGE_KHR) {
+ NvBufSurfaceUnMapEglImage (in_surface, 0);
+ }
+
+ GST_TRACE_OBJECT (context, "release %p hold %p", context_egl->last_buf, buf);
+
+ // TODO: We hold buffer used in current drawing till next swap buffer
+ // is completed so that decoder won't write it till GL has finished using it.
+ // When Triple buffering in X is enabled, this can cause tearing as completion
+ // of next swap buffer won't guarantee GL has finished with the buffer used in
+ // current swap buffer. This issue will be addresed when we transfer SyncFds
+ // from decoder <-> sink.
+ if (!context_egl->is_drc_on) {
+ gst_buffer_replace (&context_egl->last_buf, buf);
+ }
+ return TRUE;
+}
+
+static void
+gst_nv_video_context_egl_handle_tearing (GstNvVideoContext * context)
+{
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+ context_egl->is_drc_on = 0;
+ return;
+}
+
+static void
+gst_nv_video_context_egl_handle_drc (GstNvVideoContext * context)
+{
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+
+ GST_TRACE_OBJECT (context, "release last frame when resolution changes %p", context_egl->last_buf);
+
+ if (context_egl->last_buf)
+ context_egl->is_drc_on = 1;
+ gst_buffer_replace (&context_egl->last_buf, NULL);
+}
+
+static void
+gst_nv_video_context_egl_handle_eos (GstNvVideoContext * context)
+{
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+
+ GST_TRACE_OBJECT (context, "release last frame %p", context_egl->last_buf);
+
+ gst_buffer_replace (&context_egl->last_buf, NULL);
+}
+
+static gboolean
+gst_nv_video_context_egl_setup (GstNvVideoContext * context)
+{
+ GstNvVideoDisplayX11 *display_x11 = (GstNvVideoDisplayX11 *) context->display;
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+ EGLint major, minor;
+ EGLint num_configs;
+ EGLint attr[] = {
+ EGL_BUFFER_SIZE, 24,
+ EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL_NONE
+ };
+ EGLint attribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
+
+ GST_DEBUG_OBJECT (context, "EGL context setup");
+
+ context_egl->display =
+ eglGetDisplay ((EGLNativeDisplayType) display_x11->dpy);
+
+ if (!eglInitialize (context_egl->display, &major, &minor)) {
+ log_egl_error (context, "eglInitialize");
+ return FALSE;
+ }
+
+ GST_INFO_OBJECT (context, "egl version: %d.%d", major, minor);
+
+ eglBindAPI (EGL_OPENGL_ES_API);
+
+ if (!eglChooseConfig (context_egl->display, attr, &context_egl->config, 1,
+ &num_configs)) {
+ log_egl_error (context, "eglChooseConfig");
+ }
+
+ context_egl->context =
+ eglCreateContext (context_egl->display, context_egl->config,
+ EGL_NO_CONTEXT, attribs);
+ if (context_egl->context == EGL_NO_CONTEXT) {
+ log_egl_error (context, "eglChooseConfig");
+ return FALSE;
+ }
+
+ GST_DEBUG_OBJECT (context, "egl context %p created", context_egl->context);
+
+ return TRUE;
+}
+
+static void
+gst_nv_video_context_egl_cleanup (GstNvVideoContext * context)
+{
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+
+ GST_DEBUG_OBJECT (context, "egl cleanup display=%p surface=%p context=%p",
+ context_egl->display, context_egl->surface, context_egl->context);
+
+ if (context_egl->renderer) {
+ if (context->using_NVMM) {
+ gst_nv_video_renderer_cuda_cleanup (context, context_egl->renderer);
+ }
+ gst_nv_video_renderer_cleanup (context_egl->renderer);
+ gst_object_unref (context_egl->renderer);
+ context_egl->renderer = NULL;
+ }
+
+ if (!eglMakeCurrent (context_egl->display, EGL_NO_SURFACE, EGL_NO_SURFACE,
+ EGL_NO_CONTEXT)) {
+ log_egl_error (context, "eglMakeCurrent");
+ }
+
+ if (context_egl->surface) {
+ eglDestroySurface (context_egl->display, context_egl->surface);
+ context_egl->surface = NULL;
+ }
+
+ if (context_egl->context) {
+ eglDestroyContext (context_egl->display, context_egl->context);
+ context_egl->context = NULL;
+ }
+
+ eglTerminate (context_egl->display);
+ context_egl->display = NULL;
+
+ GST_DEBUG_OBJECT (context, "egl cleanup done");
+
+ return;
+}
+
+static GstCaps *
+gst_nv_video_context_egl_getcaps (GstNvVideoContext * context)
+{
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+
+ GST_LOG_OBJECT (context, "context add_caps %" GST_PTR_FORMAT,
+ context_egl->caps);
+
+ return gst_caps_copy (context_egl->caps);
+}
+
+static gboolean
+gst_nv_video_context_egl_create (GstNvVideoContext * context)
+{
+ return gst_nv_video_context_create_render_thread (context);
+}
+
+static void
+gst_nv_video_context_egl_finalize (GObject * object)
+{
+ GstNvVideoContext *context = GST_NV_VIDEO_CONTEXT (object);
+ GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
+
+ GST_DEBUG_OBJECT (context, "finalize begin");
+
+ gst_nv_video_context_destroy_render_thread (context);
+
+ if (context_egl->caps) {
+ gst_caps_unref (context_egl->caps);
+ }
+
+ G_OBJECT_CLASS (gst_nv_video_context_egl_parent_class)->finalize (object);
+
+ GST_DEBUG_OBJECT (context, "finalize end");
+}
+
+static void
+gst_nv_video_context_egl_class_init (GstNvVideoContextEglClass * klass)
+{
+ GstNvVideoContextClass *context_class = (GstNvVideoContextClass *) klass;
+
+ context_class->create = GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_create);
+ context_class->setup = GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_setup);
+ context_class->get_caps =
+ GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_getcaps);
+ context_class->show_frame =
+ GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_show_frame);
+ context_class->handle_eos =
+ GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_handle_eos);
+ context_class->handle_drc =
+ GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_handle_drc);
+ context_class->handle_tearing =
+ GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_handle_tearing);
+ context_class->cleanup = GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_cleanup);
+
+ G_OBJECT_CLASS (klass)->finalize = gst_nv_video_context_egl_finalize;
+}
+
+static void
+gst_nv_video_context_egl_init (GstNvVideoContextEgl * context_egl)
+{
+ GstNvVideoContext *context = (GstNvVideoContext *) context_egl;
+
+ context->type = GST_NV_VIDEO_CONTEXT_TYPE_EGL;
+
+ context_egl->context = NULL;
+ context_egl->display = NULL;
+ context_egl->surface = NULL;
+ context_egl->config = NULL;
+
+ context_egl->surface_width = 0;
+ context_egl->surface_height = 0;
+
+ context_egl->last_buf = NULL;
+
+ context_egl->is_drc_on = 0;
+}
+
+GstNvVideoContextEgl *
+gst_nv_video_context_egl_new (GstNvVideoDisplay * display)
+{
+ GstNvVideoContextEgl *ret;
+ GstCaps *caps = NULL;
+ guint i, n;
+
+ // for now we need x11 display for EGL context.
+ if ((gst_nv_video_display_get_handle_type (display) &
+ GST_NV_VIDEO_DISPLAY_TYPE_X11)
+ == 0) {
+ return NULL;
+ }
+
+ ret = g_object_new (GST_TYPE_NV_VIDEO_CONTEXT_EGL, NULL);
+ gst_object_ref_sink (ret);
+
+ // TODO: query from egl
+ caps = gst_caps_new_empty ();
+ // Software buffer caps
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGBA));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_BGRA));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_ARGB));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_ABGR));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGBx));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_BGRx));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_xRGB));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_xBGR));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_AYUV));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_Y444));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGB));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_BGR));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_I420));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_YV12));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV12));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV21));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_Y42B));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_Y41B));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGB16));
+
+ n = gst_caps_get_size(caps);
+ // NVMM buffer caps
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV12));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_I420));
+ gst_caps_append (caps,
+ gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGBA));
+ for (i = n; i < n + 3; i++) {
+ GstCapsFeatures *features = gst_caps_features_new ("memory:NVMM", NULL);
+ gst_caps_set_features (caps, i, features);
+ }
+ gst_caps_replace (&ret->caps, caps);
+ gst_caps_unref (caps);
+
+ return ret;
+}
diff --git a/gst-plugins-nv-video-sinks/common/egl/context_egl.h b/gst-plugins-nv-video-sinks/common/egl/context_egl.h
new file mode 100644
index 0000000..9b1e048
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/egl/context_egl.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_CONTEXT_EGL_H__
+#define __GST_NV_VIDEO_CONTEXT_EGL_H__
+
+#include "context.h"
+#include "renderer.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_CONTEXT_EGL \
+ (gst_nv_video_context_egl_get_type())
+#define GST_NV_VIDEO_CONTEXT_EGL(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_CONTEXT_EGL, GstNvVideoContextEgl))
+#define GST_NV_VIDEO_CONTEXT_EGL_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_CONTEXT_EGL, GstNvVideoContextEglClass))
+#define GST_IS_NV_VIDEO_CONTEXT_EGL(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_CONTEXT_EGL))
+#define GST_IS_NV_VIDEO_CONTEXT_EGL_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_CONTEXT_EGL))
+#define GST_NV_VIDEO_CONTEXT_EGL_CAST(obj) \
+ ((GstNvVideoContextEgl*)(obj))
+
+typedef struct _GstNvVideoContextEgl GstNvVideoContextEgl;
+typedef struct _GstNvVideoContextEglClass GstNvVideoContextEglClass;
+
+struct _GstNvVideoContextEgl
+{
+ GstNvVideoContext parent;
+
+ gpointer context;
+ gpointer display;
+ gpointer surface;
+ gpointer config;
+
+ gint surface_width;
+ gint surface_height;
+
+ GstNvVideoRenderer *renderer;
+
+ GstCaps *caps;
+
+ GstBuffer *last_buf;
+
+ gint is_drc_on;
+};
+
+struct _GstNvVideoContextEglClass
+{
+ GstNvVideoContextClass parent_class;
+};
+
+G_GNUC_INTERNAL
+GstNvVideoContextEgl * gst_nv_video_context_egl_new (GstNvVideoDisplay * display);
+
+GType gst_nv_video_context_egl_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_CONTEXT_EGL_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/gstnvvideofwd.h b/gst-plugins-nv-video-sinks/common/gstnvvideofwd.h
new file mode 100644
index 0000000..f7d3837
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/gstnvvideofwd.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_FWD_H__
+#define __GST_NV_VIDEO_FWD_H__
+
+#include
+
+G_BEGIN_DECLS
+
+typedef struct _GstNvVideoDisplay GstNvVideoDisplay;
+typedef struct _GstNvVideoDisplayClass GstNvVideoDisplayClass;
+
+typedef struct _GstNvVideoWindow GstNvVideoWindow;
+typedef struct _GstNvVideoWindowClass GstNvVideoWindowClass;
+
+typedef struct _GstNvVideoContext GstNvVideoContext;
+typedef struct _GstNvVideoContextClass GstNvVideoContextClass;
+typedef struct _GstNvVideoContextPrivate GstNvVideoContextPrivate;
+
+typedef struct _GstNvVideoRenderer GstNvVideoRenderer;
+typedef struct _GstNvVideoRendererClass GstNvVideoRendererClass;
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_FWD_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/renderer.c b/gst-plugins-nv-video-sinks/common/renderer.c
new file mode 100644
index 0000000..d217763
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/renderer.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include
+
+#include "renderer.h"
+#include "context.h"
+
+#if NV_VIDEO_SINKS_HAS_GL
+#include "renderer_gl.h"
+#endif
+
+#define GST_CAT_DEFAULT gst_debug_nv_video_renderer
+GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
+
+G_DEFINE_ABSTRACT_TYPE (GstNvVideoRenderer, gst_nv_video_renderer,
+ GST_TYPE_OBJECT);
+
+static void
+gst_nv_video_renderer_init (GstNvVideoRenderer * renderer)
+{
+}
+
+static void
+gst_nv_video_renderer_class_init (GstNvVideoRendererClass * klass)
+{
+}
+
+GstNvVideoRenderer *
+gst_nv_video_renderer_new (GstNvVideoContext * context, const char *name)
+{
+ GstNvVideoRenderer *renderer = NULL;
+ static gsize debug_init = 0;
+
+ if (g_once_init_enter (&debug_init)) {
+ GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_renderer, "nvvideorenderer", 0,
+ "nvvideorenderer");
+ g_once_init_leave (&debug_init, 1);
+ }
+
+ if (!name) {
+ GST_ERROR ("renderer name not valid");
+ }
+
+#if NV_VIDEO_SINKS_HAS_GL
+ if (g_strstr_len (name, 2, "gl")) {
+ renderer = GST_NV_VIDEO_RENDERER (gst_nv_video_renderer_gl_new (context));
+ }
+#endif
+
+ if (!renderer) {
+ GST_ERROR ("couldn't create renderer name = %s", name);
+ return NULL;
+ }
+
+ renderer->format = context->configured_info.finfo->format;
+
+ GST_DEBUG_OBJECT (renderer, "created %s renderer for context %" GST_PTR_FORMAT, name, context);
+
+ return renderer;
+}
+
+gboolean
+gst_nv_video_renderer_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ return renderer_class->cuda_init (context, renderer);
+}
+
+void
+gst_nv_video_renderer_cuda_cleanup (GstNvVideoContext * context, GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ renderer_class->cuda_cleanup (context, renderer);
+}
+
+void
+gst_nv_video_renderer_cleanup (GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ renderer_class->cleanup (renderer);
+}
+
+gboolean
+gst_nv_video_renderer_setup (GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ return renderer_class->setup (renderer);
+}
+
+void
+gst_nv_video_renderer_update_viewport (GstNvVideoRenderer * renderer, int width, int height)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ renderer_class->update_viewport (renderer, width, height);
+}
+
+gboolean
+gst_nv_video_renderer_fill_texture (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ return renderer_class->fill_texture (context, renderer, buf);
+}
+
+gboolean
+gst_nv_video_renderer_cuda_buffer_copy (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ return renderer_class->cuda_buffer_copy (context, renderer, buf);
+}
+
+gboolean
+gst_nv_video_renderer_draw_2D_Texture (GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ return renderer_class->draw_2D_Texture (renderer);
+}
+
+gboolean
+gst_nv_video_renderer_draw_eglimage (GstNvVideoRenderer * renderer, void * image)
+{
+ GstNvVideoRendererClass *renderer_class;
+
+ renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
+
+ return renderer_class->draw_eglimage (renderer, image);
+}
diff --git a/gst-plugins-nv-video-sinks/common/renderer.h b/gst-plugins-nv-video-sinks/common/renderer.h
new file mode 100644
index 0000000..63835ab
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/renderer.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_RENDERER_H__
+#define __GST_NV_VIDEO_RENDERER_H__
+
+#include
+#include
+
+#include "gstnvvideofwd.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_RENDERER \
+ (gst_nv_video_renderer_get_type())
+#define GST_NV_VIDEO_RENDERER(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_RENDERER, GstNvVideoRenderer))
+#define GST_NV_VIDEO_RENDERER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_RENDERER, GstNvVideoRendererClass))
+#define GST_IS_NV_VIDEO_RENDERER(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_RENDERER))
+#define GST_IS_NV_VIDEO_RENDERER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_RENDERER))
+#define GST_NV_VIDEO_RENDERER_GET_CLASS(o) \
+ (G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_RENDERER, GstNvVideoRendererClass))
+
+struct _GstNvVideoRendererClass
+{
+ GstObjectClass parent_class;
+
+ gboolean (*cuda_init) (GstNvVideoContext *context, GstNvVideoRenderer * renderer);
+ void (*cuda_cleanup) (GstNvVideoContext *context, GstNvVideoRenderer * renderer);
+ gboolean (*setup) (GstNvVideoRenderer * renderer);
+ void (*cleanup) (GstNvVideoRenderer * renderer);
+ void (*update_viewport) (GstNvVideoRenderer * renderer, int width, int height);
+ gboolean (*fill_texture) (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
+ gboolean (*cuda_buffer_copy) (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
+ gboolean (*draw_2D_Texture) (GstNvVideoRenderer * renderer);
+ gboolean (*draw_eglimage) (GstNvVideoRenderer * renderer, void * image);
+};
+
+struct _GstNvVideoRenderer
+{
+ GstObject parent;
+
+ GstNvVideoContext * context;
+
+ GstVideoFormat format;
+};
+
+GST_EXPORT
+GstNvVideoRenderer * gst_nv_video_renderer_new (GstNvVideoContext * context, const char *name);
+
+GST_EXPORT
+gboolean gst_nv_video_renderer_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer);
+
+GST_EXPORT
+void gst_nv_video_renderer_cuda_cleanup (GstNvVideoContext * context, GstNvVideoRenderer * renderer);
+
+GST_EXPORT
+gboolean gst_nv_video_renderer_setup (GstNvVideoRenderer * renderer);
+
+GST_EXPORT
+void gst_nv_video_renderer_cleanup (GstNvVideoRenderer * renderer);
+
+GST_EXPORT
+void gst_nv_video_renderer_update_viewport (GstNvVideoRenderer * renderer, int width, int height);
+
+GST_EXPORT
+gboolean gst_nv_video_renderer_fill_texture (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
+
+GST_EXPORT
+gboolean gst_nv_video_renderer_cuda_buffer_copy (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
+
+GST_EXPORT
+gboolean gst_nv_video_renderer_draw_2D_Texture (GstNvVideoRenderer * renderer);
+
+GST_EXPORT
+gboolean gst_nv_video_renderer_draw_eglimage (GstNvVideoRenderer * renderer, void * image);
+
+GType gst_nv_video_renderer_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_RENDERER_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/renderer/renderer_gl.c b/gst-plugins-nv-video-sinks/common/renderer/renderer_gl.c
new file mode 100644
index 0000000..5282ecf
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/renderer/renderer_gl.c
@@ -0,0 +1,1565 @@
+/*
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2012 Collabora Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include
+#include "renderer.h"
+#include "renderer_gl.h"
+
+#include "nvbufsurface.h"
+
+#include
+#include
+#include
+
+#include // for eglGetProcAddress
+
+/* *INDENT-OFF* */
+//Vertex shader for 2D textures
+static const char *vert_COPY_prog = {
+ "attribute vec3 position;\n"
+ "attribute vec2 texpos;\n"
+ "varying vec2 opos;\n"
+ "void main(void)\n"
+ "{\n"
+ " opos = texpos;\n"
+ " gl_Position = vec4(position, 1.0);\n"
+ "}\n"
+};
+
+static const char *vert_COPY_prog_no_tex = {
+ "attribute vec3 position;\n"
+ "void main(void)\n"
+ "{\n"
+ " gl_Position = vec4(position, 1.0);\n"
+ "}\n"
+};
+
+static const char *vert_source = {
+ "attribute vec3 position;\n"
+ "attribute vec2 tcoord;\n"
+ "varying vec2 vtcoord;\n"
+ "void main(void)\n"
+ "{\n"
+ " vtcoord = tcoord;\n"
+ " gl_Position = vec4(position, 1.0);\n"
+ "}\n"
+};
+
+//Fragment shader for 2D textures
+static const char *frag_COPY_prog = {
+ "precision mediump float;\n"
+ "varying vec2 opos;\n"
+ "uniform sampler2D tex;\n"
+ "uniform vec2 tex_scale0;\n"
+ "uniform vec2 tex_scale1;\n"
+ "uniform vec2 tex_scale2;\n"
+ "void main(void)\n"
+ "{\n"
+ " vec4 t = texture2D(tex, opos/tex_scale0);\n"
+ " gl_FragColor = vec4(t.rgb, 1.0);\n"
+ "}\n"
+};
+
+/* Channel reordering for XYZ <-> ZYX conversion */
+static const char *frag_REORDER_prog = {
+ "precision mediump float;"
+ "varying vec2 opos;"
+ "uniform sampler2D tex;"
+ "uniform vec2 tex_scale0;"
+ "uniform vec2 tex_scale1;"
+ "uniform vec2 tex_scale2;"
+ "void main(void)"
+ "{"
+ " vec4 t = texture2D(tex, opos / tex_scale0);"
+ " gl_FragColor = vec4(t.%c, t.%c, t.%c, 1.0);"
+ "}"
+};
+
+/* Packed YUV converters */
+
+/** AYUV to RGB conversion */
+static const char *frag_AYUV_prog = {
+ "precision mediump float;"
+ "varying vec2 opos;"
+ "uniform sampler2D tex;"
+ "uniform vec2 tex_scale0;"
+ "uniform vec2 tex_scale1;"
+ "uniform vec2 tex_scale2;"
+ "const vec3 offset = vec3(-0.0625, -0.5, -0.5);"
+ "const vec3 rcoeff = vec3(1.164, 0.000, 1.596);"
+ "const vec3 gcoeff = vec3(1.164,-0.391,-0.813);"
+ "const vec3 bcoeff = vec3(1.164, 2.018, 0.000);"
+ "void main(void) {"
+ " float r,g,b;"
+ " vec3 yuv;"
+ " yuv = texture2D(tex,opos / tex_scale0).gba;"
+ " yuv += offset;"
+ " r = dot(yuv, rcoeff);"
+ " g = dot(yuv, gcoeff);"
+ " b = dot(yuv, bcoeff);"
+ " gl_FragColor=vec4(r,g,b,1.0);"
+ "}"
+};
+
+/* Planar YUV converters */
+
+/** YUV to RGB conversion */
+static const char *frag_PLANAR_YUV_prog = {
+ "precision mediump float;"
+ "varying vec2 opos;"
+ "uniform sampler2D Ytex,Utex,Vtex;"
+ "uniform vec2 tex_scale0;"
+ "uniform vec2 tex_scale1;"
+ "uniform vec2 tex_scale2;"
+ "const vec3 offset = vec3(-0.0625, -0.5, -0.5);"
+ "const vec3 rcoeff = vec3(1.164, 0.000, 1.596);"
+ "const vec3 gcoeff = vec3(1.164,-0.391,-0.813);"
+ "const vec3 bcoeff = vec3(1.164, 2.018, 0.000);"
+ "void main(void) {"
+ " float r,g,b;"
+ " vec3 yuv;"
+ " yuv.x=texture2D(Ytex,opos / tex_scale0).r;"
+ " yuv.y=texture2D(Utex,opos / tex_scale1).r;"
+ " yuv.z=texture2D(Vtex,opos / tex_scale2).r;"
+ " yuv += offset;"
+ " r = dot(yuv, rcoeff);"
+ " g = dot(yuv, gcoeff);"
+ " b = dot(yuv, bcoeff);"
+ " gl_FragColor=vec4(r,g,b,1.0);"
+ "}"
+};
+
+/** NV12/NV21 to RGB conversion */
+static const char *frag_NV12_NV21_prog = {
+ "precision mediump float;"
+ "varying vec2 opos;"
+ "uniform sampler2D Ytex,UVtex;"
+ "uniform vec2 tex_scale0;"
+ "uniform vec2 tex_scale1;"
+ "uniform vec2 tex_scale2;"
+ "const vec3 offset = vec3(-0.0625, -0.5, -0.5);"
+ "const vec3 rcoeff = vec3(1.164, 0.000, 1.596);"
+ "const vec3 gcoeff = vec3(1.164,-0.391,-0.813);"
+ "const vec3 bcoeff = vec3(1.164, 2.018, 0.000);"
+ "void main(void) {"
+ " float r,g,b;"
+ " vec3 yuv;"
+ " yuv.x=texture2D(Ytex,opos / tex_scale0).r;"
+ " yuv.yz=texture2D(UVtex,opos / tex_scale1).%c%c;"
+ " yuv += offset;"
+ " r = dot(yuv, rcoeff);"
+ " g = dot(yuv, gcoeff);"
+ " b = dot(yuv, bcoeff);"
+ " gl_FragColor=vec4(r,g,b,1.0);"
+ "}"
+};
+
+/* Paint all black */
+static const char *frag_BLACK_prog = {
+ "precision mediump float;\n"
+ "void main(void)\n"
+ "{\n"
+ " gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);\n"
+ "}\n"
+};
+
+static const char *frag_source = {
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "varying vec2 vtcoord;\n"
+ "uniform samplerExternalOES tex;\n"
+ "void main(void)\n"
+ "{\n"
+ " gl_FragColor = texture2D(tex, vtcoord);\n"
+ "}\n"
+};
+
+/* *INDENT-ON* */
+static const GLfloat vertices_2d[] = {
+ 1.0f, 1.0f, 0.0f, 1.0f, 0.0f,
+ 1.0f, -1.0f, 0.0f, 1.0f, 1.0f,
+ -1.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+ -1.0f, -1.0f, 0.0f, 0.0f, 1.0f,
+
+ 1.0f, 1.0f, 0.0f, 1.0f, 1.0f,
+ 1.0f, -1.0f, 0.0f, 1.0f, 0.0f,
+ -1.0f, 1.0f, 0.0f, 0.0f, 1.0f,
+ -1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
+
+ 1.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+ 1.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+ -1.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+ -1.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+
+ 1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
+ 1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
+ -1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
+ -1.0f, -1.0f, 0.0f, 0.0f, 0.0f
+};
+
+static const GLushort indices_2d[] = {0,1,2,3};
+static const GLfloat vertices[] = {
+ 1.0f, 1.0f, 0.0f, 1.0f, 0.0f,
+ -1.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+ -1.0f, -1.0f, 0.0f, 0.0f, 1.0f,
+ 1.0f, -1.0f, 0.0f, 1.0f, 1.0f
+};
+
+static const GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
+
+G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_renderer;
+#define GST_CAT_DEFAULT gst_debug_nv_video_renderer
+
+G_DEFINE_TYPE (GstNvVideoRendererGl, gst_nv_video_renderer_gl,
+ GST_TYPE_NV_VIDEO_RENDERER);
+
+static gboolean
+check_gl_error (GstNvVideoRenderer * renderer, const char *func)
+{
+ GLuint error = GL_NO_ERROR;
+
+ if ((error = glGetError ()) != GL_NO_ERROR) {
+ GST_ERROR_OBJECT (renderer, "%s returned GL error 0x%x", func, error);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static GLuint
+gst_nv_video_renderer_gl_compile_shader (GLenum shader_type, const char *source)
+{
+ GLint obj, status;
+
+ obj = glCreateShader (shader_type);
+ glShaderSource (obj, 1, &source, NULL);
+ glCompileShader (obj);
+ glGetShaderiv (obj, GL_COMPILE_STATUS, &status);
+ if (status != GL_TRUE) {
+ glDeleteShader (obj);
+ return 0;
+ }
+
+ return obj;
+}
+
+static gboolean
+create_shader_program (GstNvVideoRenderer * renderer, GLint *prog, GLint *vert, GLint *frag, const gchar * vert_shader, const gchar * frag_shader) {
+ GLint status;
+
+ *vert = gst_nv_video_renderer_gl_compile_shader (GL_VERTEX_SHADER, vert_shader);
+ if (!*vert) {
+ GST_DEBUG_OBJECT (renderer, "failed to compile vertex shader");
+ goto fail;
+ }
+
+ *frag =
+ gst_nv_video_renderer_gl_compile_shader (GL_FRAGMENT_SHADER, frag_shader);
+ if (!*frag) {
+ GST_DEBUG_OBJECT (renderer, "failed to compile fragment shader");
+ goto fail;
+ }
+
+ *prog = glCreateProgram ();
+ if (!*prog) {
+ GST_ERROR_OBJECT (renderer, "failed to create GL program object");
+ goto fail;
+ }
+
+ glAttachShader (*prog, *vert);
+ glAttachShader (*prog, *frag);
+ glLinkProgram (*prog);
+ glGetProgramiv (*prog, GL_LINK_STATUS, &status);
+ if (status != GL_TRUE) {
+ GST_ERROR_OBJECT (renderer, "failed to link GL program");
+ goto fail;
+ }
+ return TRUE;
+
+fail:
+ {
+ if (*frag && *prog)
+ glDetachShader (*prog, *frag);
+ if (*vert && *prog)
+ glDetachShader (*prog, *vert);
+ if (*prog)
+ glDeleteProgram (*prog);
+ if (*frag)
+ glDeleteShader (*frag);
+ if (*vert)
+ glDeleteShader (*vert);
+ *prog = 0;
+ *frag = 0;
+ *vert = 0;
+ return FALSE;
+ }
+}
+
+void
+gst_nv_video_renderer_gl_process_shaders (GstNvVideoRenderer * renderer, gchar ** frag_prog, const gchar *texnames[], GstVideoFormat format)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+
+ switch (format) {
+ case GST_VIDEO_FORMAT_AYUV:
+ *frag_prog = (gchar *) frag_AYUV_prog;
+ renderer_gl->num_textures_2d = 1;
+ texnames[0] = "tex";
+ break;
+ case GST_VIDEO_FORMAT_Y444:
+ case GST_VIDEO_FORMAT_I420:
+ case GST_VIDEO_FORMAT_YV12:
+ case GST_VIDEO_FORMAT_Y42B:
+ case GST_VIDEO_FORMAT_Y41B:
+ *frag_prog = (gchar *) frag_PLANAR_YUV_prog;
+ renderer_gl->num_textures_2d = 3;
+ texnames[0] = "Ytex";
+ texnames[1] = "Utex";
+ texnames[2] = "Vtex";
+ break;
+ case GST_VIDEO_FORMAT_NV12:
+ *frag_prog = g_strdup_printf (frag_NV12_NV21_prog, 'r', 'a');
+ renderer_gl->num_textures_2d = 2;
+ texnames[0] = "Ytex";
+ texnames[1] = "UVtex";
+ break;
+ case GST_VIDEO_FORMAT_NV21:
+ *frag_prog = g_strdup_printf (frag_NV12_NV21_prog, 'a', 'r');
+ renderer_gl->num_textures_2d = 2;
+ texnames[0] = "Ytex";
+ texnames[1] = "UVtex";
+ break;
+ case GST_VIDEO_FORMAT_BGR:
+ case GST_VIDEO_FORMAT_BGRx:
+ case GST_VIDEO_FORMAT_BGRA:
+ *frag_prog = g_strdup_printf (frag_REORDER_prog, 'b', 'g', 'r');
+ renderer_gl->num_textures_2d = 1;
+ texnames[0] = "tex";
+ break;
+ case GST_VIDEO_FORMAT_xRGB:
+ case GST_VIDEO_FORMAT_ARGB:
+ *frag_prog = g_strdup_printf (frag_REORDER_prog, 'g', 'b', 'a');
+ renderer_gl->num_textures_2d = 1;
+ texnames[0] = "tex";
+ break;
+ case GST_VIDEO_FORMAT_xBGR:
+ case GST_VIDEO_FORMAT_ABGR:
+ *frag_prog = g_strdup_printf (frag_REORDER_prog, 'a', 'b', 'g');
+ renderer_gl->num_textures_2d = 1;
+ texnames[0] = "tex";
+ break;
+ case GST_VIDEO_FORMAT_RGB:
+ case GST_VIDEO_FORMAT_RGBx:
+ case GST_VIDEO_FORMAT_RGBA:
+ case GST_VIDEO_FORMAT_RGB16:
+ *frag_prog = (gchar *) frag_COPY_prog;
+ renderer_gl->num_textures_2d = 1;
+ texnames[0] = "tex";
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ return;
+}
+
+static gboolean
+gst_nv_video_renderer_gl_setup (GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+ GLint prog_obj = 0;
+ GLint vert_obj = 0;
+ GLint frag_obj = 0;
+
+ /* Setup of 2D textures */
+ g_assert (!renderer_gl->prog_obj[1]);
+ g_assert (!renderer_gl->vert_obj[1]);
+ g_assert (!renderer_gl->frag_obj[1]);
+ const gchar *texnames[3] = { NULL, };
+ gchar *frag_prog = NULL;
+
+ gst_nv_video_renderer_gl_process_shaders (renderer, &frag_prog, texnames, renderer->format);
+
+ if (!create_shader_program (renderer, &prog_obj, &vert_obj, &frag_obj, vert_COPY_prog, frag_prog))
+ {
+ GST_DEBUG_OBJECT (renderer, "failed to compile shaders");
+ goto fail;
+ }
+ renderer_gl->prog_obj[1] = prog_obj;
+ renderer_gl->vert_obj[1] = vert_obj;
+ renderer_gl->frag_obj[1] = frag_obj;
+
+ renderer_gl->position_loc[0] = glGetAttribLocation (renderer_gl->prog_obj[1], "position");
+ renderer_gl->texpos_loc[0] = glGetAttribLocation (renderer_gl->prog_obj[1], "texpos");
+ renderer_gl->tex_scale_loc[0][0] = glGetUniformLocation (renderer_gl->prog_obj[1], "tex_scale0");
+ renderer_gl->tex_scale_loc[0][1] = glGetUniformLocation (renderer_gl->prog_obj[1], "tex_scale1");
+ renderer_gl->tex_scale_loc[0][2] = glGetUniformLocation (renderer_gl->prog_obj[1], "tex_scale2");
+
+ for (int i=0; i < renderer_gl->num_textures_2d; i++) {
+ renderer_gl->tex_loc[0][i] = glGetUniformLocation (renderer_gl->prog_obj[1], texnames[i]);
+ }
+
+ // Build shader for black borders
+ prog_obj = 0;
+ vert_obj = 0;
+ frag_obj = 0;
+ g_assert (!renderer_gl->prog_obj[2]);
+ g_assert (!renderer_gl->vert_obj[2]);
+ g_assert (!renderer_gl->frag_obj[2]);
+
+ if (!create_shader_program (renderer, &prog_obj, &vert_obj, &frag_obj, vert_COPY_prog_no_tex, frag_BLACK_prog))
+ {
+ GST_DEBUG_OBJECT (renderer, "failed to compile shaders");
+ goto fail;
+ }
+
+ renderer_gl->prog_obj[2] = prog_obj;
+ renderer_gl->vert_obj[2] = vert_obj;
+ renderer_gl->frag_obj[2] = frag_obj;
+
+ renderer_gl->position_loc[1] = glGetAttribLocation (renderer_gl->prog_obj[2], "position");
+
+ //Generate Textures
+ glGenTextures (renderer_gl->num_textures_2d, renderer_gl->textures_2d);
+ if (check_gl_error (renderer, "glGenTextures2d")) {
+ renderer_gl->num_textures = 0;
+ goto fail;
+ }
+
+ for (int i=0; i < renderer_gl->num_textures_2d; i++) {
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[i]);
+ if (check_gl_error (renderer, "glBindTextures")) {
+ goto fail;
+ }
+
+ /* Set 2D resizing params */
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ if (check_gl_error (renderer, "glTexParameteri")) {
+ goto fail;
+ }
+ }
+
+ glGenBuffers (1, &renderer_gl->vertex_buffer_2d);
+ glBindBuffer (GL_ARRAY_BUFFER, renderer_gl->vertex_buffer_2d);
+ glBufferData (GL_ARRAY_BUFFER, sizeof (vertices_2d), vertices_2d,
+ GL_STATIC_DRAW);
+
+ glGenBuffers (1, &renderer_gl->index_buffer_2d);
+ glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, renderer_gl->index_buffer_2d);
+ glBufferData (GL_ELEMENT_ARRAY_BUFFER, sizeof (indices_2d), indices_2d,
+ GL_STATIC_DRAW);
+
+ glUseProgram(0);
+
+ /* Setup for GL_OES Texture */
+ prog_obj = 0;
+ vert_obj = 0;
+ frag_obj = 0;
+ int i;
+
+ g_assert (!renderer_gl->prog_obj[0]);
+ g_assert (!renderer_gl->vert_obj[0]);
+ g_assert (!renderer_gl->frag_obj[0]);
+
+ if (!create_shader_program (renderer, &prog_obj, &vert_obj, &frag_obj, vert_source, frag_source))
+ {
+ GST_DEBUG_OBJECT (renderer, "failed to compile shaders");
+ goto fail;
+ }
+
+ renderer_gl->prog_obj[0] = prog_obj;
+ renderer_gl->vert_obj[0] = vert_obj;
+ renderer_gl->frag_obj[0] = frag_obj;
+
+ renderer_gl->pos = glGetAttribLocation (renderer_gl->prog_obj[0], "position");
+ renderer_gl->tex_pos = glGetAttribLocation (renderer_gl->prog_obj[0], "tcoord");
+ renderer_gl->tex_sampler = glGetUniformLocation (renderer_gl->prog_obj[0], "tex");
+ if (check_gl_error (renderer, "glGetUniformLocation")) {
+ goto fail;
+ }
+
+ renderer_gl->num_textures = RENDERER_NUM_GL_TEXTURES;
+ glGenTextures (renderer_gl->num_textures, renderer_gl->textures);
+ if (check_gl_error (renderer, "glGenTextures")) {
+ renderer_gl->num_textures = 0;
+ goto fail;
+ }
+
+ for (i = 0; i < renderer_gl->num_textures; i++) {
+ glBindTexture (GL_TEXTURE_EXTERNAL_OES, renderer_gl->textures[i]);
+ if (check_gl_error (renderer, "glBindTexture")) {
+ goto fail;
+ }
+
+ glTexParameteri (GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri (GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ if (check_gl_error (renderer, "glTexParameteri")) {
+ goto fail;
+ }
+ }
+
+ glUseProgram (renderer_gl->prog_obj[0]);
+ if (check_gl_error (renderer, "glUseProgram")) {
+ goto fail;
+ }
+
+ glUniform1i (renderer_gl->tex_sampler, 0);
+
+ glGenBuffers (1, &renderer_gl->vertex_buffer);
+ glBindBuffer (GL_ARRAY_BUFFER, renderer_gl->vertex_buffer);
+ glBufferData (GL_ARRAY_BUFFER, 4 * 5 * sizeof (GLfloat), vertices,
+ GL_STATIC_DRAW);
+
+ glGenBuffers (1, &renderer_gl->index_buffer);
+ glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, renderer_gl->index_buffer);
+ glBufferData (GL_ELEMENT_ARRAY_BUFFER, sizeof (indices), indices,
+ GL_STATIC_DRAW);
+
+ renderer_gl->glEGLImageTargetTexture2DOES =
+ (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC)
+ eglGetProcAddress ("glEGLImageTargetTexture2DOES");
+
+ glUseProgram(0);
+
+ return TRUE;
+
+fail:
+ GST_ERROR_OBJECT (renderer, "Gl renderer setup failed");
+
+ //Failed in 2D texture part
+ for (i = 0; i < renderer_gl->num_textures_2d; i++) {
+ glDeleteTextures (renderer_gl->num_textures_2d, renderer_gl->textures_2d);
+ }
+
+ renderer_gl->num_textures_2d = 0;
+
+ if (prog_obj) {
+ glDetachShader (prog_obj, vert_obj);
+ glDetachShader (prog_obj, frag_obj);
+ glDeleteProgram (prog_obj);
+ }
+
+ if (vert_obj) {
+ glDeleteShader (vert_obj);
+ }
+
+ if (frag_obj) {
+ glDeleteShader (frag_obj);
+ }
+
+ //Failed in EGL OES Texture part
+ for (i = 0; i < renderer_gl->num_textures; i++) {
+ glDeleteTextures (renderer_gl->num_textures, renderer_gl->textures);
+ }
+
+ renderer_gl->num_textures = 0;
+
+ return FALSE;
+}
+
+static void
+gst_nv_video_renderer_gl_cleanup (GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+ int i;
+
+ for (i = 0; i < 3; i++)
+ {
+ if (renderer_gl->prog_obj[i] && renderer_gl->vert_obj[i]) {
+ glDetachShader (renderer_gl->prog_obj[i], renderer_gl->vert_obj[i]);
+ }
+ if (renderer_gl->prog_obj[i] && renderer_gl->frag_obj[i]) {
+ glDetachShader (renderer_gl->prog_obj[i], renderer_gl->frag_obj[i]);
+ }
+ if (renderer_gl->prog_obj[i]) {
+ glDeleteProgram (renderer_gl->prog_obj[i]);
+ renderer_gl->prog_obj[i] = 0;
+ }
+ if (renderer_gl->vert_obj[i]) {
+ glDeleteShader (renderer_gl->vert_obj[i]);
+ renderer_gl->vert_obj[i] = 0;
+ }
+ if (renderer_gl->frag_obj[i]) {
+ glDeleteShader (renderer_gl->frag_obj[i]);
+ renderer_gl->frag_obj[i] = 0;
+ }
+ }
+
+ if (renderer_gl->vertex_buffer) {
+ glDeleteBuffers (1, &renderer_gl->vertex_buffer);
+ renderer_gl->vertex_buffer = 0;
+ }
+ if (renderer_gl->vertex_buffer_2d) {
+ glDeleteBuffers (1, &renderer_gl->vertex_buffer_2d);
+ renderer_gl->vertex_buffer_2d = 0;
+ }
+
+ if (renderer_gl->index_buffer) {
+ glDeleteBuffers (1, &renderer_gl->index_buffer);
+ renderer_gl->index_buffer = 0;
+ }
+ if (renderer_gl->index_buffer_2d) {
+ glDeleteBuffers (1, &renderer_gl->index_buffer_2d);
+ renderer_gl->index_buffer_2d = 0;
+ }
+
+ for (i = 0; i < renderer_gl->num_textures; i++) {
+ glDeleteTextures (renderer_gl->num_textures, renderer_gl->textures);
+ }
+ for (i = 0; i < renderer_gl->num_textures_2d; i++) {
+ glDeleteTextures (renderer_gl->num_textures_2d, renderer_gl->textures_2d);
+ }
+
+ renderer_gl->num_textures = 0;
+ renderer_gl->num_textures_2d = 0;
+
+ GST_DEBUG_OBJECT (renderer, "Gl renderer cleanup done");
+
+ return;
+}
+
+gboolean
+gst_nv_video_renderer_gl_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+ CUcontext pctx;
+ CUresult result;
+ GLenum error;
+ int i;
+ guint width, height, pstride;
+ GstVideoFormat videoFormat;
+
+ cuInit(0);
+ result = cuCtxCreate(&pctx, 0, 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuCtxCreate failed with error(%d) %s\n", result, __func__);
+ return FALSE;
+ }
+
+ context->cuContext = pctx;
+
+ width = context->configured_info.width;
+ height = context->configured_info.height;
+
+ videoFormat = context->configured_info.finfo->format;
+
+ switch (videoFormat) {
+ case GST_VIDEO_FORMAT_RGBA:
+ case GST_VIDEO_FORMAT_BGRx:
+ case GST_VIDEO_FORMAT_BGR:
+ case GST_VIDEO_FORMAT_RGB: {
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ if (videoFormat == GST_VIDEO_FORMAT_RGB ||
+ videoFormat == GST_VIDEO_FORMAT_BGR) {
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
+ } else {
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+ }
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ error = glGetError();
+ if (error != GL_NO_ERROR) {
+ g_print("glerror %x error %d\n", error, __LINE__);
+ return FALSE;
+ }
+ result = cuGraphicsGLRegisterImage(&(context->cuResource[0]), renderer_gl->textures_2d[0], GL_TEXTURE_2D, 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsGLRegisterBuffer failed with error(%d) %s texture = %x\n", result, __func__, renderer_gl->textures_2d[0]);
+ return FALSE;
+ }
+ }
+ break;
+ case GST_VIDEO_FORMAT_I420: {
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ glActiveTexture (GL_TEXTURE0);
+ else if (i == 1)
+ glActiveTexture (GL_TEXTURE1);
+ else if (i == 2)
+ glActiveTexture (GL_TEXTURE2);
+
+ width = GST_VIDEO_INFO_COMP_WIDTH(&(context->configured_info), i);
+ height = GST_VIDEO_INFO_COMP_HEIGHT(&(context->configured_info), i);
+
+ glBindTexture(GL_TEXTURE_2D, renderer_gl->textures_2d[i]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ error = glGetError();
+ if (error != GL_NO_ERROR) {
+ g_print("glerror %x error %d\n", error, __LINE__);
+ return FALSE;
+ }
+ result = cuGraphicsGLRegisterImage(&(context->cuResource[i]), renderer_gl->textures_2d[i], GL_TEXTURE_2D, 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsGLRegisterBuffer failed with error(%d) %s texture = %x\n", result, __func__, renderer_gl->textures_2d[i]);
+ return FALSE;
+ }
+ }
+ }
+ break;
+ case GST_VIDEO_FORMAT_NV12: {
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ glActiveTexture (GL_TEXTURE0);
+ else if (i == 1)
+ glActiveTexture (GL_TEXTURE1);
+ glBindTexture(GL_TEXTURE_2D, renderer_gl->textures_2d[i]);
+
+ width = GST_VIDEO_INFO_COMP_WIDTH(&(context->configured_info), i);
+ height = GST_VIDEO_INFO_COMP_HEIGHT(&(context->configured_info), i);
+ pstride = GST_VIDEO_INFO_COMP_PSTRIDE(&(context->configured_info), i);
+
+ if (i == 0)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width*pstride, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
+ else if ( i == 1)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, width*pstride, height, 0, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, NULL);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ error = glGetError();
+ if (error != GL_NO_ERROR) {
+ g_print("glerror %x error %d\n", error, __LINE__);
+ return FALSE;
+ }
+ result = cuGraphicsGLRegisterImage(&(context->cuResource[i]), renderer_gl->textures_2d[i], GL_TEXTURE_2D, 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsGLRegisterBuffer failed with error(%d) %s texture = %x\n", result, __func__, renderer_gl->textures_2d[i]);
+ return FALSE;
+ }
+ }
+ }
+ break;
+ default:
+ g_print("buffer format not supported\n");
+ return FALSE;
+ }
+ context->is_cuda_init = TRUE;
+ return TRUE;
+}
+
+static void
+gst_nv_video_renderer_gl_cuda_cleanup (GstNvVideoContext * context, GstNvVideoRenderer * renderer)
+{
+ CUresult result;
+ guint i;
+
+ for (i = 0; i < 3; i++) {
+ if (context->cuResource[i])
+ cuGraphicsUnregisterResource (context->cuResource[i]);
+ }
+
+ if (context->cuContext) {
+ result = cuCtxDestroy(context->cuContext);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuCtxDestroy failed with error(%d) %s\n", result, __func__);
+ }
+ }
+}
+
+static void
+gst_nv_video_renderer_gl_update_viewport (GstNvVideoRenderer * renderer,
+ int width, int height)
+{
+ glClearColor (0.0f, 0.0f, 0.0f, 0.0f);
+ glClear (GL_COLOR_BUFFER_BIT);
+ glViewport (0, 0, width, height);
+}
+
+static gboolean
+gst_nv_video_renderer_gl_fill_texture (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+ GstVideoFrame vframe;
+ gint w, h;
+
+ memset (&vframe, 0, sizeof (vframe));
+
+ if (!gst_video_frame_map (&vframe, &context->configured_info, buf,
+ GST_MAP_READ)) {
+ GST_ERROR_OBJECT (context, "Couldn't map frame");
+ goto HANDLE_ERROR;
+ }
+
+ w = GST_VIDEO_FRAME_WIDTH (&vframe);
+ h = GST_VIDEO_FRAME_HEIGHT (&vframe);
+
+ GST_DEBUG_OBJECT (context,
+ "Got buffer %p: %dx%d size %" G_GSIZE_FORMAT, buf, w, h,
+ gst_buffer_get_size (buf));
+
+ gint stride;
+ gint stride_width;
+ gint c_w;
+
+ switch (context->configured_info.finfo->format) {
+ case GST_VIDEO_FORMAT_BGR:
+ case GST_VIDEO_FORMAT_RGB:{
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
+ stride_width = c_w = GST_VIDEO_FRAME_WIDTH (&vframe);
+
+ glActiveTexture (GL_TEXTURE0);
+
+ if (GST_ROUND_UP_8 (c_w * 3) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w * 3) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (c_w * 3) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (c_w * 3 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width * 3) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width * 3) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (stride_width * 3) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (stride_width * 3 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[0] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_RGB, stride_width, h, 0, GL_RGB,
+ GL_UNSIGNED_BYTE, GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0));
+ break;
+ }
+ case GST_VIDEO_FORMAT_RGB16:{
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
+ stride_width = c_w = GST_VIDEO_FRAME_WIDTH (&vframe);
+
+ glActiveTexture (GL_TEXTURE0);
+
+ if (GST_ROUND_UP_8 (c_w * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (c_w * 2 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width * 4) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (stride_width * 2 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[0] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_RGB, stride_width, h, 0, GL_RGB,
+ GL_UNSIGNED_SHORT_5_6_5, GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0));
+ break;
+ }
+ case GST_VIDEO_FORMAT_RGBA:
+ case GST_VIDEO_FORMAT_BGRA:
+ case GST_VIDEO_FORMAT_ARGB:
+ case GST_VIDEO_FORMAT_ABGR:
+ case GST_VIDEO_FORMAT_RGBx:
+ case GST_VIDEO_FORMAT_BGRx:
+ case GST_VIDEO_FORMAT_xRGB:
+ case GST_VIDEO_FORMAT_xBGR:{
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
+ stride_width= c_w = GST_VIDEO_FRAME_WIDTH (&vframe);
+ glActiveTexture (GL_TEXTURE0);
+
+ if (GST_ROUND_UP_8 (c_w * 4) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (c_w * 4 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else {
+ stride_width = stride;
+ if (GST_ROUND_UP_8 (stride_width * 4) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (stride_width * 4 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[0] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA, stride_width, h, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0));
+
+ break;
+ }
+ case GST_VIDEO_FORMAT_AYUV:{
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
+ stride_width = c_w = GST_VIDEO_FRAME_WIDTH (&vframe);
+
+ glActiveTexture (GL_TEXTURE0);
+
+ if (GST_ROUND_UP_8 (c_w * 4) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (c_w * 4 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width * 4) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (stride_width * 4 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[0] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA, stride_width, h, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0));
+ break;
+ }
+ case GST_VIDEO_FORMAT_Y444:
+ case GST_VIDEO_FORMAT_I420:
+ case GST_VIDEO_FORMAT_YV12:
+ case GST_VIDEO_FORMAT_Y42B:
+ case GST_VIDEO_FORMAT_Y41B:{
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
+ stride_width = c_w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, 0);
+
+ glActiveTexture (GL_TEXTURE0);
+
+ if (GST_ROUND_UP_8 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (c_w == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (stride_width == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[0] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE,
+ stride_width,
+ GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, 0),
+ 0, GL_LUMINANCE, GL_UNSIGNED_BYTE,
+ GST_VIDEO_FRAME_COMP_DATA (&vframe, 0));
+
+
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 1);
+ stride_width = c_w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, 1);
+
+ glActiveTexture (GL_TEXTURE1);
+
+ if (GST_ROUND_UP_8 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (c_w == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (stride_width == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[1] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[1]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE,
+ stride_width,
+ GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, 1),
+ 0, GL_LUMINANCE, GL_UNSIGNED_BYTE,
+ GST_VIDEO_FRAME_COMP_DATA (&vframe, 1));
+
+
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 2);
+ stride_width = c_w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, 2);
+
+ glActiveTexture (GL_TEXTURE2);
+
+ if (GST_ROUND_UP_8 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (c_w == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (stride_width == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[2] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[2]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE,
+ stride_width,
+ GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, 2),
+ 0, GL_LUMINANCE, GL_UNSIGNED_BYTE,
+ GST_VIDEO_FRAME_COMP_DATA (&vframe, 2));
+ break;
+ }
+ case GST_VIDEO_FORMAT_NV12:
+ case GST_VIDEO_FORMAT_NV21:{
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0);
+ stride_width = c_w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, 0);
+
+ glActiveTexture (GL_TEXTURE0);
+
+ if (GST_ROUND_UP_8 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (c_w) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (c_w == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ stride_width = stride;
+
+ if (GST_ROUND_UP_8 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (GST_ROUND_UP_2 (stride_width) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else if (stride_width == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[0] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE,
+ stride_width,
+ GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, 0),
+ 0, GL_LUMINANCE, GL_UNSIGNED_BYTE,
+ GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0));
+
+ stride = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 1);
+ stride_width = c_w = GST_VIDEO_FRAME_COMP_WIDTH (&vframe, 1);
+
+ glActiveTexture (GL_TEXTURE1);
+
+ if (GST_ROUND_UP_8 (c_w * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (c_w * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (c_w * 2 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else {
+ stride_width = stride / 2;
+
+ if (GST_ROUND_UP_8 (stride_width * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 8);
+ } else if (GST_ROUND_UP_4 (stride_width * 2) == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 4);
+ } else if (stride_width * 2 == stride) {
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 2);
+ } else {
+ GST_ERROR_OBJECT (context, "Unsupported stride %d", stride);
+ goto HANDLE_ERROR;
+ }
+ }
+ if (check_gl_error (renderer,"glPixelStorei"))
+ goto HANDLE_ERROR;
+
+ renderer_gl->stride[1] = ((gdouble) stride_width) / ((gdouble) c_w);
+
+ glBindTexture (GL_TEXTURE_2D, renderer_gl->textures_2d[1]);
+ glTexImage2D (GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA,
+ stride_width,
+ GST_VIDEO_FRAME_COMP_HEIGHT (&vframe, 1),
+ 0, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE,
+ GST_VIDEO_FRAME_PLANE_DATA (&vframe, 1));
+ break;
+ }
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+
+ if (check_gl_error (renderer,"glTexImage2D"))
+ goto HANDLE_ERROR;
+
+
+ gst_video_frame_unmap (&vframe);
+
+ return TRUE;
+
+ HANDLE_ERROR:
+ {
+ if (vframe.buffer)
+ gst_video_frame_unmap (&vframe);
+
+ return FALSE;
+ }
+}
+
+static gboolean
+gst_nv_video_renderer_gl_cuda_buffer_copy (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+ CUarray dpArray;
+ CUresult result;
+ guint width, height;
+ GstMapInfo info = GST_MAP_INFO_INIT;
+ GstVideoFormat videoFormat;
+ NvBufSurface *in_surface = NULL;
+
+ width = context->configured_info.width;
+ height = context->configured_info.height;
+
+ result = cuCtxSetCurrent(context->cuContext);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuCtxSetCurrent failed with error(%d) %s\n", result, __func__);
+ return FALSE;
+ }
+ gst_buffer_map (buf, &info, GST_MAP_READ);
+ in_surface = (NvBufSurface*) info.data;
+ gst_buffer_unmap (buf, &info);
+
+ if (in_surface->batchSize != 1) {
+ GST_ERROR_OBJECT (context,"ERROR: Batch size not 1\n");
+ return FALSE;
+ }
+
+ NvBufSurfaceMemType memType = in_surface->memType;
+ gboolean is_device_memory = FALSE;
+ gboolean is_host_memory = FALSE;
+
+ if (memType == NVBUF_MEM_DEFAULT || memType == NVBUF_MEM_CUDA_DEVICE || memType == NVBUF_MEM_CUDA_UNIFIED) {
+ is_device_memory = TRUE;
+ }
+ else if (memType == NVBUF_MEM_CUDA_PINNED) {
+ is_host_memory = TRUE;
+ }
+
+ CUDA_MEMCPY2D m = { 0 };
+
+ videoFormat = context->configured_info.finfo->format;
+ switch (videoFormat) {
+ case GST_VIDEO_FORMAT_RGBA:
+ case GST_VIDEO_FORMAT_BGRx:
+ case GST_VIDEO_FORMAT_BGR:
+ case GST_VIDEO_FORMAT_RGB: {
+ gint bytesPerPix = 4;
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, renderer_gl->textures_2d[0]);
+
+ result = cuGraphicsMapResources(1, &(context->cuResource[0]), 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsMapResources failed with error(%d) %s\n", result, __func__);
+ return FALSE;
+ }
+ result = cuGraphicsSubResourceGetMappedArray(&dpArray, context->cuResource[0], 0, 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsResourceGetMappedPointer failed with error(%d) %s\n", result, __func__);
+ goto HANDLE_ERROR;
+ }
+
+ if (is_device_memory) {
+ m.srcDevice = (CUdeviceptr) in_surface->surfaceList[0].dataPtr;
+ m.srcMemoryType = CU_MEMORYTYPE_DEVICE;
+ }
+ else if (is_host_memory) {
+ m.srcHost = (void *)in_surface->surfaceList[0].dataPtr;
+ m.srcMemoryType = CU_MEMORYTYPE_HOST;
+ }
+
+ if (videoFormat == GST_VIDEO_FORMAT_BGR ||
+ videoFormat == GST_VIDEO_FORMAT_RGB) {
+ bytesPerPix = 3;
+ }
+
+ m.srcPitch = in_surface->surfaceList[0].planeParams.pitch[0];
+
+ m.dstPitch = width * bytesPerPix;
+ m.WidthInBytes = width * bytesPerPix;
+
+ m.dstMemoryType = CU_MEMORYTYPE_ARRAY;
+ m.dstArray = dpArray;
+ m.Height = height;
+
+ result = cuMemcpy2D(&m);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuMemcpy2D failed with error(%d) %s\n", result, __func__);
+ goto HANDLE_ERROR;
+ }
+
+ result = cuGraphicsUnmapResources(1, &(context->cuResource[0]), 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsUnmapResources failed with error(%d) %s\n", result, __func__);
+ goto HANDLE_ERROR;
+ }
+
+ renderer_gl->stride[0] = 1;
+ renderer_gl->stride[1] = 1;
+ renderer_gl->stride[2] = 1;
+ } // case RGBA
+ break;
+ case GST_VIDEO_FORMAT_I420:
+ case GST_VIDEO_FORMAT_NV12: {
+ uint8_t *ptr;
+ int i, pstride;
+ int num_planes = (int)in_surface->surfaceList[0].planeParams.num_planes;
+
+ for ( i = 0; i < num_planes; i ++) {
+ if (i == 0)
+ glActiveTexture (GL_TEXTURE0);
+ else if (i == 1)
+ glActiveTexture (GL_TEXTURE1);
+ else if (i == 2)
+ glActiveTexture (GL_TEXTURE2);
+ glBindTexture(GL_TEXTURE_2D, renderer_gl->textures_2d[i]);
+
+ result = cuGraphicsMapResources(1, &(context->cuResource[i]), 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsMapResources failed with error(%d) %s\n", result, __func__);
+ return FALSE;
+ }
+ result = cuGraphicsSubResourceGetMappedArray(&dpArray, context->cuResource[i], 0, 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsResourceGetMappedPointer failed with error(%d) %s\n", result, __func__);
+ goto HANDLE_ERROR;
+ }
+
+ ptr = (uint8_t *)in_surface->surfaceList[0].dataPtr + in_surface->surfaceList[0].planeParams.offset[i];
+ if (is_device_memory) {
+ m.srcDevice = (CUdeviceptr) ptr;
+ m.srcMemoryType = CU_MEMORYTYPE_DEVICE;
+ }
+ else if (is_host_memory) {
+ m.srcHost = (void *)ptr;
+ m.srcMemoryType = CU_MEMORYTYPE_HOST;
+ }
+
+ width = GST_VIDEO_INFO_COMP_WIDTH(&(context->configured_info), i);
+ height = GST_VIDEO_INFO_COMP_HEIGHT(&(context->configured_info), i);
+ pstride = GST_VIDEO_INFO_COMP_PSTRIDE(&(context->configured_info), i);
+ m.srcPitch = in_surface->surfaceList[0].planeParams.pitch[i];
+
+ m.dstMemoryType = CU_MEMORYTYPE_ARRAY;
+ m.dstArray = dpArray;
+ m.WidthInBytes = width*pstride;
+ m.Height = height;
+
+ result = cuMemcpy2D(&m);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuMemcpy2D failed with error(%d) %s %d\n", result, __func__, __LINE__);
+ goto HANDLE_ERROR;
+ }
+
+ result = cuGraphicsUnmapResources(1, &(context->cuResource[i]), 0);
+ if (result != CUDA_SUCCESS) {
+ g_print ("cuGraphicsUnmapResources failed with error(%d) %s\n", result, __func__);
+ goto HANDLE_ERROR;
+ }
+
+ renderer_gl->stride[i] = pstride;
+ }
+ }// case I420 or NV12
+ break;
+ default:
+ g_print("buffer format not supported\n");
+ return FALSE;
+ break;
+ } //switch
+ return TRUE;
+
+HANDLE_ERROR:
+ if (context->cuResource[0])
+ cuGraphicsUnmapResources(1, &(context->cuResource[0]), 0);
+ if (context->cuResource[1])
+ cuGraphicsUnmapResources(1, &(context->cuResource[0]), 0);
+ if (context->cuResource[2])
+ cuGraphicsUnmapResources(1, &(context->cuResource[0]), 0);
+ return FALSE;
+}
+
+static gboolean
+gst_nv_video_renderer_gl_draw_2D_Texture (GstNvVideoRenderer * renderer)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+
+ glBindBuffer (GL_ARRAY_BUFFER, renderer_gl->vertex_buffer_2d);
+ glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, renderer_gl->index_buffer_2d);
+
+ //Draw black border 1
+ glUseProgram (renderer_gl->prog_obj[2]);
+ glEnableVertexAttribArray (renderer_gl->position_loc[1]);
+ if (check_gl_error (renderer, "glEnableVertexAttribArray")) {
+ goto HANDLE_ERROR;
+ }
+ glVertexAttribPointer (renderer_gl->position_loc[1], 3,
+ GL_FLOAT, GL_FALSE, 5 * sizeof(GL_FLOAT), (gpointer) (8 * sizeof(GL_FLOAT)));
+ if (check_gl_error (renderer, "glVertexAttribPointer")) {
+ goto HANDLE_ERROR;
+ }
+ glDrawElements (GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_SHORT, 0);
+ if (check_gl_error (renderer, "glDrawElements")) {
+ goto HANDLE_ERROR;
+ }
+
+ //Draw black border 2
+ glVertexAttribPointer (renderer_gl->position_loc[1], 3,
+ GL_FLOAT, GL_FALSE, 5 * sizeof(GL_FLOAT), (gpointer) (12 * sizeof(GL_FLOAT)));
+ if (check_gl_error (renderer, "glVertexAttribPointer")) {
+ goto HANDLE_ERROR;
+ }
+ glDrawElements (GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_SHORT, 0);
+ if (check_gl_error (renderer, "glDrawElements")) {
+ goto HANDLE_ERROR;
+ }
+
+ glDisableVertexAttribArray (renderer_gl->position_loc[1]);
+
+ //Draw Video frame
+ glUseProgram (renderer_gl->prog_obj[1]);
+
+ glUniform2f (renderer_gl->tex_scale_loc[0][0], renderer_gl->stride[0], 1);
+ glUniform2f (renderer_gl->tex_scale_loc[0][1], renderer_gl->stride[1], 1);
+ glUniform2f (renderer_gl->tex_scale_loc[0][2], renderer_gl->stride[2], 1);
+
+ for (int i=0; i < renderer_gl->num_textures_2d; i++)
+ {
+ glUniform1i (renderer_gl->tex_loc[0][i], i);
+ if (check_gl_error (renderer, "glUniform1i")) {
+ goto HANDLE_ERROR;
+ }
+ }
+
+ glEnableVertexAttribArray (renderer_gl->position_loc[0]);
+ if (check_gl_error (renderer, "glEnableVertexAttribArray")) {
+ goto HANDLE_ERROR;
+ }
+ glEnableVertexAttribArray (renderer_gl->texpos_loc[0]);
+ if (check_gl_error (renderer, "glEnableVertexAttribArray")) {
+ goto HANDLE_ERROR;
+ }
+
+ // TODO: Orientation needed to be taken care of.
+ glVertexAttribPointer (renderer_gl->position_loc[0], 3,
+ GL_FLOAT, GL_FALSE, 5* sizeof (GL_FLOAT), (gpointer) (0 * sizeof (GL_FLOAT)));
+ if (check_gl_error (renderer, "glVertexAttribPointer")) {
+ goto HANDLE_ERROR;
+ }
+ glVertexAttribPointer (renderer_gl->texpos_loc[0], 2,
+ GL_FLOAT, GL_FALSE, 5* sizeof (GL_FLOAT), (gpointer) (3 * sizeof (GL_FLOAT)));
+ if (check_gl_error (renderer, "glVertexAttribPointer")) {
+ goto HANDLE_ERROR;
+ }
+
+ glDrawElements (GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_SHORT, 0);
+ if (check_gl_error (renderer, "glDrawElements")) {
+ goto HANDLE_ERROR;
+ }
+
+ glBindBuffer (GL_ARRAY_BUFFER, 0);
+ glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, 0);
+ glDisableVertexAttribArray (renderer_gl->position_loc[0]);
+ glDisableVertexAttribArray (renderer_gl->texpos_loc[0]);
+
+ glUseProgram (0);
+
+ return TRUE;
+
+HANDLE_ERROR:
+ glDisableVertexAttribArray (renderer_gl->position_loc[0]);
+ glDisableVertexAttribArray (renderer_gl->texpos_loc[0]);
+ glDisableVertexAttribArray (renderer_gl->position_loc[1]);
+
+ return FALSE;
+}
+
+static gboolean
+gst_nv_video_renderer_gl_draw_eglimage (GstNvVideoRenderer * renderer,
+ void *image)
+{
+ GstNvVideoRendererGl *renderer_gl = GST_NV_VIDEO_RENDERER_GL (renderer);
+
+ glActiveTexture (GL_TEXTURE0);
+ glBindTexture (GL_TEXTURE_EXTERNAL_OES, renderer_gl->textures[0]);
+
+ renderer_gl->glEGLImageTargetTexture2DOES (GL_TEXTURE_EXTERNAL_OES,
+ (GLeglImageOES) image);
+
+ glBindBuffer (GL_ARRAY_BUFFER, renderer_gl->vertex_buffer);
+ glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, renderer_gl->index_buffer);
+ glUseProgram (renderer_gl->prog_obj[0]);
+ glVertexAttribPointer (renderer_gl->pos, 3, GL_FLOAT, GL_FALSE,
+ 5 * sizeof (GLfloat), (void *) 0);
+ glVertexAttribPointer (renderer_gl->tex_pos, 2, GL_FLOAT, GL_FALSE,
+ 5 * sizeof (GLfloat), (void *) (3 * sizeof (GLfloat)));
+ glEnableVertexAttribArray (renderer_gl->pos);
+ glEnableVertexAttribArray (renderer_gl->tex_pos);
+
+ glDrawElements (GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0);
+
+ glBindBuffer (GL_ARRAY_BUFFER, 0);
+ glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, 0);
+ glDisableVertexAttribArray (renderer_gl->pos);
+ glDisableVertexAttribArray (renderer_gl->tex_pos);
+
+ glUseProgram (0);
+
+ return TRUE;
+}
+
+static void
+gst_nv_video_renderer_gl_class_init (GstNvVideoRendererGlClass * klass)
+{
+ GstNvVideoRendererClass *renderer_class = (GstNvVideoRendererClass *) klass;
+
+ renderer_class->cuda_init =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_cuda_init);
+ renderer_class->cuda_cleanup =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_cuda_cleanup);
+ renderer_class->setup = GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_setup);
+ renderer_class->cleanup =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_cleanup);
+ renderer_class->update_viewport =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_update_viewport);
+ renderer_class->fill_texture =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_fill_texture);
+ renderer_class->cuda_buffer_copy =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_cuda_buffer_copy);
+ renderer_class->draw_2D_Texture =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_draw_2D_Texture);
+ renderer_class->draw_eglimage =
+ GST_DEBUG_FUNCPTR (gst_nv_video_renderer_gl_draw_eglimage);
+}
+
+static void
+gst_nv_video_renderer_gl_init (GstNvVideoRendererGl * renderer_gl)
+{
+ for (int i =0 ; i < 3 ; i++) {
+ renderer_gl->prog_obj[i] = 0;
+ renderer_gl->vert_obj[i] = 0;
+ renderer_gl->frag_obj[i] = 0;
+ }
+ renderer_gl->num_textures = 0;
+ renderer_gl->num_textures_2d = 0;
+ renderer_gl->vertex_buffer = 0;
+ renderer_gl->vertex_buffer_2d = 0;
+ renderer_gl->index_buffer = 0;
+ renderer_gl->index_buffer_2d = 0;
+}
+
+GstNvVideoRendererGl *
+gst_nv_video_renderer_gl_new (GstNvVideoContext * context)
+{
+ GstNvVideoRendererGl *ret;
+
+ // We need EGL context for GL renderer
+ if ((gst_nv_video_context_get_handle_type (context) &
+ GST_NV_VIDEO_CONTEXT_TYPE_EGL)
+ == 0) {
+ return NULL;
+ }
+
+ ret = g_object_new (GST_TYPE_NV_VIDEO_RENDERER_GL, NULL);
+ gst_object_ref_sink (ret);
+
+ return ret;
+}
diff --git a/gst-plugins-nv-video-sinks/common/renderer/renderer_gl.h b/gst-plugins-nv-video-sinks/common/renderer/renderer_gl.h
new file mode 100644
index 0000000..e42b707
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/renderer/renderer_gl.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_RENDERER_GL_H__
+#define __GST_NV_VIDEO_RENDERER_GL_H__
+
+#include "context.h"
+
+#include
+#include
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_RENDERER_GL \
+ (gst_nv_video_renderer_gl_get_type())
+#define GST_NV_VIDEO_RENDERER_GL(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_RENDERER_GL, GstNvVideoRendererGl))
+#define GST_NV_VIDEO_RENDERER_GL_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_RENDERER_GL, GstNvVideoRendererGlClass))
+#define GST_IS_NV_VIDEO_RENDERER_GL(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_RENDERER_GL))
+#define GST_IS_NV_VIDEO_RENDERER_GL_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_RENDERER_GL))
+#define GST_NV_VIDEO_RENDERER_GL_CAST(obj) \
+ ((GstNvVideoRendererGl*)(obj))
+
+typedef struct _GstNvVideoRendererGl GstNvVideoRendererGl;
+typedef struct _GstNvVideoRendererGlClass GstNvVideoRendererGlClass;
+
+#define RENDERER_NUM_GL_TEXTURES 1
+
+struct _GstNvVideoRendererGl
+{
+ GstNvVideoRenderer parent;
+
+ GLuint vert_obj[3]; /* EGL frame, 2D frame, 2D frame border*/
+ GLuint frag_obj[3]; /* EGL frame, 2D frame, 2D frame border*/
+ GLuint prog_obj[3]; /* EGL frame, 2D frame, 2D frame border*/
+
+ GLint pos;
+ GLint tex_pos;
+ GLint tex_sampler;
+ GLsizei num_textures;
+ GLuint textures[RENDERER_NUM_GL_TEXTURES];
+ unsigned int vertex_buffer;
+ unsigned int index_buffer;
+
+ //Defining different attribs and uniforms for 2D textures
+ GLuint position_loc[2]; /* Frame and Border */
+ GLuint texpos_loc[1]; /* Frame */
+ GLuint tex_scale_loc[1][3]; /* [frame] RGB/Y, U/UV, V */
+ GLuint tex_loc[1][3]; /* [frame] RGB/Y, U/UV, V */
+ unsigned int vertex_buffer_2d;
+ unsigned int index_buffer_2d;
+ gint num_textures_2d;
+ GLuint textures_2d[3];
+ GLuint stride[3];
+
+ PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES;
+};
+
+struct _GstNvVideoRendererGlClass
+{
+ GstNvVideoRendererClass parent_class;
+};
+
+G_GNUC_INTERNAL
+GstNvVideoRendererGl * gst_nv_video_renderer_gl_new (GstNvVideoContext * context);
+void
+gst_nv_video_renderer_gl_process_shaders (GstNvVideoRenderer * renderer, gchar ** frag_prog, const gchar *texnames[], GstVideoFormat format);
+
+gboolean
+gst_nv_video_renderer_gl_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer);
+
+GType gst_nv_video_renderer_gl_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_RENDERER_GL_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/window.c b/gst-plugins-nv-video-sinks/common/window.c
new file mode 100644
index 0000000..202dc82
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/window.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include "window.h"
+
+#if NV_VIDEO_SINKS_HAS_X11
+#include "window_x11.h"
+#endif
+
+#define GST_CAT_DEFAULT gst_debug_nv_video_window
+GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
+
+#define gst_nv_video_window_parent_class parent_class
+G_DEFINE_ABSTRACT_TYPE (GstNvVideoWindow, gst_nv_video_window, GST_TYPE_OBJECT);
+
+static void
+gst_nv_video_window_finalize (GObject * object)
+{
+ GstNvVideoWindow *window = GST_NV_VIDEO_WINDOW (object);
+
+ g_weak_ref_clear (&window->context);
+ gst_object_unref (window->display);
+
+ G_OBJECT_CLASS (gst_nv_video_window_parent_class)->finalize (object);
+}
+
+static void
+gst_nv_video_window_init (GstNvVideoWindow * window)
+{
+ g_weak_ref_init (&window->context, NULL);
+}
+
+static void
+gst_nv_video_window_class_init (GstNvVideoWindowClass * klass)
+{
+ G_OBJECT_CLASS (klass)->finalize = gst_nv_video_window_finalize;
+}
+
+GstNvVideoWindow *
+gst_nv_video_window_new (GstNvVideoDisplay * display)
+{
+ GstNvVideoWindow *window = NULL;
+ static gsize debug_init = 0;
+ const gchar *winsys_name = NULL;
+
+ if (g_once_init_enter (&debug_init)) {
+ GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_window, "nvvideowindow", 0,
+ "nvvideowindow");
+ g_once_init_leave (&debug_init, 1);
+ }
+
+ winsys_name = g_getenv ("GST_NV_VIDEO_WINSYS");
+
+#if NV_VIDEO_SINKS_HAS_X11
+ if (!window && (!winsys_name || g_strstr_len (winsys_name, 3, "x11"))) {
+ window = GST_NV_VIDEO_WINDOW (gst_nv_video_window_x11_new (NULL));
+ }
+#endif
+
+ if (!window) {
+ GST_ERROR ("couldn't create window. GST_NV_VIDEO_WINSYS = %s",
+ winsys_name ? winsys_name : NULL);
+ return NULL;
+ }
+
+ window->display = gst_object_ref (display);
+
+ GST_DEBUG_OBJECT (window, "created window for display %" GST_PTR_FORMAT,
+ display);
+
+ return window;
+}
+
+/* create new window handle after destroying existing */
+gboolean
+gst_nv_video_window_create_window (GstNvVideoWindow * window, gint x,
+ gint y, gint width, gint height)
+{
+ GstNvVideoWindowClass *window_class;
+
+ window_class = GST_NV_VIDEO_WINDOW_GET_CLASS (window);
+
+ return window_class->create_window (window, x, y, width, height);
+}
+
+gboolean
+gst_nv_video_window_set_handle (GstNvVideoWindow * window, guintptr id)
+{
+ GstNvVideoWindowClass *window_class;
+
+ window_class = GST_NV_VIDEO_WINDOW_GET_CLASS (window);
+
+ return window_class->set_handle (window, id);
+}
+
+guintptr
+gst_nv_video_window_get_handle (GstNvVideoWindow * window)
+{
+ GstNvVideoWindowClass *window_class;
+ window_class = GST_NV_VIDEO_WINDOW_GET_CLASS (window);
+
+ return window_class->get_handle (window);
+}
+
+GstNvVideoContext *
+gst_nv_video_window_get_context (GstNvVideoWindow * window)
+{
+ g_return_val_if_fail (GST_IS_NV_VIDEO_WINDOW (window), NULL);
+
+ return (GstNvVideoContext *) g_weak_ref_get (&window->context);
+}
diff --git a/gst-plugins-nv-video-sinks/common/window.h b/gst-plugins-nv-video-sinks/common/window.h
new file mode 100644
index 0000000..d551c35
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/window.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_WINDOW_H__
+#define __GST_NV_VIDEO_WINDOW_H__
+
+#include
+#include
+#include
+
+#include "gstnvvideofwd.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_WINDOW \
+ (gst_nv_video_window_get_type())
+#define GST_NV_VIDEO_WINDOW(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_NV_VIDEO_WINDOW, GstNvVideoWindow))
+#define GST_NV_VIDEO_WINDOW_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_WINDOW, GstNvVideoWindowClass))
+#define GST_IS_NV_VIDEO_WINDOW(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_WINDOW))
+#define GST_IS_NV_VIDEO_WINDOW_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_WINDOW))
+#define GST_NV_VIDEO_WINDOW_CAST(obj) \
+ ((GstNvVideoWindow*)(obj))
+#define GST_NV_VIDEO_WINDOW_GET_CLASS(o) \
+ (G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_WINDOW, GstNvVideoWindowClass))
+
+struct _GstNvVideoWindowClass
+{
+ GstObjectClass parent_class;
+
+ guintptr (*get_handle) (GstNvVideoWindow * window);
+ gboolean (*set_handle) (GstNvVideoWindow * window, guintptr id);
+ gboolean (*create_window) (GstNvVideoWindow * window, gint x, gint y, gint width, gint height);
+ gboolean (*draw) (GstNvVideoWindow * window, GstBuffer * buf);
+};
+
+struct _GstNvVideoWindow
+{
+ GstObject parent;
+
+ GstNvVideoDisplay *display;
+
+ GWeakRef context;
+};
+
+GST_EXPORT
+GstNvVideoWindow *gst_nv_video_window_new (GstNvVideoDisplay * display);
+GST_EXPORT
+gboolean gst_nv_video_window_create_window (GstNvVideoWindow * window, gint x, gint y, gint width, gint height);
+GST_EXPORT
+gboolean gst_nv_video_window_set_handle (GstNvVideoWindow * window, guintptr id);
+GST_EXPORT
+guintptr gst_nv_video_window_get_handle (GstNvVideoWindow * window);
+GST_EXPORT
+GstNvVideoContext *gst_nv_video_window_get_context (GstNvVideoWindow * window);
+
+GType gst_nv_video_window_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_WINDOW_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/x11/display_x11.c b/gst-plugins-nv-video-sinks/common/x11/display_x11.c
new file mode 100644
index 0000000..bdbbbf4
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/x11/display_x11.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include "display_x11.h"
+
+G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_display;
+#define GST_CAT_DEFAULT gst_debug_nv_video_display
+
+G_DEFINE_TYPE (GstNvVideoDisplayX11, gst_nv_video_display_x11,
+ GST_TYPE_NV_VIDEO_DISPLAY);
+
+static void
+gst_nv_video_display_x11_finalize (GObject * object)
+{
+ GstNvVideoDisplayX11 *display_x11 = GST_NV_VIDEO_DISPLAY_X11 (object);
+
+ GST_DEBUG ("closing X11 display connection, handle=%p", display_x11->dpy);
+
+ if (display_x11->dpy) {
+ XCloseDisplay (display_x11->dpy);
+ }
+
+ GST_DEBUG ("closed X11 display connection");
+
+ G_OBJECT_CLASS (gst_nv_video_display_x11_parent_class)->finalize (object);
+}
+
+static guintptr
+gst_nv_video_display_x11_get_handle (GstNvVideoDisplay * display)
+{
+ return (guintptr) GST_NV_VIDEO_DISPLAY_X11 (display)->dpy;
+}
+
+static void
+gst_nv_video_display_x11_class_init (GstNvVideoDisplayX11Class * klass)
+{
+ GST_NV_VIDEO_DISPLAY_CLASS (klass)->get_handle =
+ GST_DEBUG_FUNCPTR (gst_nv_video_display_x11_get_handle);
+ G_OBJECT_CLASS (klass)->finalize = gst_nv_video_display_x11_finalize;
+}
+
+static void
+gst_nv_video_display_x11_init (GstNvVideoDisplayX11 * display_x11)
+{
+ GstNvVideoDisplay *display = (GstNvVideoDisplay *) display_x11;
+
+ display->type = GST_NV_VIDEO_DISPLAY_TYPE_X11;
+
+ GST_DEBUG_OBJECT (display, "init done");
+}
+
+GstNvVideoDisplayX11 *
+gst_nv_video_display_x11_new (const gchar * name)
+{
+ GstNvVideoDisplayX11 *ret;
+
+ ret = g_object_new (GST_TYPE_NV_VIDEO_DISPLAY_X11, NULL);
+ gst_object_ref_sink (ret);
+
+ ret->dpy = XOpenDisplay (NULL);
+
+ if (!ret->dpy) {
+ GST_ERROR ("failed to open X11 display connection");
+ gst_object_unref (ret);
+ return NULL;
+ }
+
+ GST_DEBUG ("opened X11 display connection handle=%p", ret->dpy);
+
+ return ret;
+}
diff --git a/gst-plugins-nv-video-sinks/common/x11/display_x11.h b/gst-plugins-nv-video-sinks/common/x11/display_x11.h
new file mode 100644
index 0000000..26db8dc
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/x11/display_x11.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV_VIDEO_DISPLAY_X11_H__
+#define __GST_NV_VIDEO_DISPLAY_X11_H__
+
+#include
+
+#include "display.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_DISPLAY_X11 \
+ (gst_nv_video_display_x11_get_type())
+#define GST_NV_VIDEO_DISPLAY_X11(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_DISPLAY_X11, GstNvVideoDisplayX11))
+#define GST_NV_VIDEO_DISPLAY_X11_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_DISPLAY_X11, GstNvVideoDisplayX11Class))
+#define GST_IS_NV_VIDEO_DISPLAY_X11(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_DISPLAY_X11))
+#define GST_IS_NV_VIDEO_DISPLAY_X11_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_DISPLAY_X11))
+#define GST_NV_VIDEO_DISPLAY_X11_CAST(obj) \
+ ((GstNvVideoDisplayX11*)(obj))
+
+typedef struct _GstNvVideoDisplayX11 GstNvVideoDisplayX11;
+typedef struct _GstNvVideoDisplayX11Class GstNvVideoDisplayX11Class;
+
+struct _GstNvVideoDisplayX11
+{
+ GstNvVideoDisplay parent;
+
+ Display *dpy;
+};
+
+struct _GstNvVideoDisplayX11Class
+{
+ GstNvVideoDisplayClass parent_class;
+};
+
+GST_EXPORT
+GstNvVideoDisplayX11 * gst_nv_video_display_x11_new (const gchar * name);
+
+GType gst_nv_video_display_x11_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_DISPLAY_X11_H__ */
diff --git a/gst-plugins-nv-video-sinks/common/x11/window_x11.c b/gst-plugins-nv-video-sinks/common/x11/window_x11.c
new file mode 100644
index 0000000..dfb40cd
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/x11/window_x11.c
@@ -0,0 +1,157 @@
+/**
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA Corporation and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA Corporation is strictly prohibited.
+ */
+
+#include "context.h"
+#include "display_x11.h"
+#include "window_x11.h"
+#include
+
+G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_window;
+#define GST_CAT_DEFAULT gst_debug_nv_video_window
+
+#define gst_nv_video_window_x11_parent_class parent_class
+G_DEFINE_TYPE (GstNvVideoWindowX11, gst_nv_video_window_x11,
+ GST_TYPE_NV_VIDEO_WINDOW);
+
+static void
+gst_nv_video_window_x11_destroy (GstNvVideoWindow * window)
+{
+ GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
+ GstNvVideoDisplayX11 *display_x11 = (GstNvVideoDisplayX11 *) window->display;
+
+ if (window_x11->internal_window) {
+ GST_DEBUG_OBJECT (window, "destroy internal window %" G_GUINTPTR_FORMAT,
+ window_x11->handle);
+
+ XUnmapWindow (display_x11->dpy, window_x11->handle);
+ XDestroyWindow (display_x11->dpy, window_x11->handle);
+ XSync (display_x11->dpy, FALSE);
+ window_x11->internal_window = FALSE;
+ window_x11->handle = 0;
+ } else {
+ GST_DEBUG_OBJECT (window, "unset foreign window handle %" G_GUINTPTR_FORMAT,
+ window_x11->handle);
+ window_x11->handle = 0;
+ }
+}
+
+static void
+gst_nv_video_window_x11_finalize (GObject * object)
+{
+ GstNvVideoWindow *window = GST_NV_VIDEO_WINDOW (object);
+
+ GST_DEBUG_OBJECT (window, "finalize begin");
+
+ gst_nv_video_window_x11_destroy (window);
+
+ G_OBJECT_CLASS (gst_nv_video_window_x11_parent_class)->finalize (object);
+
+ GST_DEBUG_OBJECT (window, "finalize end");
+}
+
+static guintptr
+gst_nv_video_window_x11_get_handle (GstNvVideoWindow * window)
+{
+ GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
+
+ return window_x11->handle;
+}
+
+static gboolean
+gst_nv_video_window_x11_set_handle (GstNvVideoWindow * window, guintptr id)
+{
+ GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
+
+ gst_nv_video_window_x11_destroy (window);
+ window_x11->handle = id;
+
+ GST_DEBUG_OBJECT (window, "set window handle to %" G_GUINTPTR_FORMAT, id);
+
+ return FALSE;
+}
+
+static gboolean
+gst_nv_video_window_x11_create (GstNvVideoWindow * window, gint x,
+ gint y, gint width, gint height)
+{
+ GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
+ GstNvVideoDisplayX11 *display_x11 = (GstNvVideoDisplayX11 *) window->display;
+ Display *dpy = display_x11->dpy;
+ int screen = DefaultScreen (dpy);
+
+ XSizeHints hints = {0};
+ hints.flags = PPosition ;
+ hints.x = x;
+ hints.y = y;
+ // GstNvVideoWindow doesn't have destroy_winow method (like create_window)
+ // and GstNvVideoWindow object can't have multiple X windows. So if
+ // upper layer has existing window (foreign or internal), unset/destroy it.
+ //
+ // TODO: In case of existing internal window, we might able to re-use it
+ // with XResizeWindow.
+ gst_nv_video_window_x11_destroy (window);
+
+ window_x11->handle = XCreateSimpleWindow (dpy, RootWindow (dpy, screen),
+ hints.x, hints.y, width, height, 1,
+ BlackPixel (dpy, screen), WhitePixel (dpy, screen));
+
+ if (!window_x11->handle) {
+ GST_ERROR_OBJECT (window, "failed to create internal window\n");
+ return FALSE;
+ }
+
+ window_x11->internal_window = TRUE;
+
+ XSetWindowBackgroundPixmap (dpy, window_x11->handle, None);
+ XSetNormalHints(dpy, window_x11->handle, &hints);
+ XMapRaised (dpy, window_x11->handle);
+ XSync (dpy, FALSE);
+
+ GST_DEBUG_OBJECT (window,
+ "created internal window %dx%d, handle=%" G_GUINTPTR_FORMAT, width,
+ height, window_x11->handle);
+
+ return TRUE;
+}
+
+static void
+gst_nv_video_window_x11_class_init (GstNvVideoWindowX11Class * klass)
+{
+ GstNvVideoWindowClass *window_class = (GstNvVideoWindowClass *) klass;
+
+ window_class->create_window =
+ GST_DEBUG_FUNCPTR (gst_nv_video_window_x11_create);
+ window_class->get_handle =
+ GST_DEBUG_FUNCPTR (gst_nv_video_window_x11_get_handle);
+ window_class->set_handle =
+ GST_DEBUG_FUNCPTR (gst_nv_video_window_x11_set_handle);
+
+ G_OBJECT_CLASS (klass)->finalize = gst_nv_video_window_x11_finalize;
+}
+
+static void
+gst_nv_video_window_x11_init (GstNvVideoWindowX11 * window)
+{
+ window->handle = 0;
+ window->internal_window = FALSE;
+
+ GST_DEBUG_OBJECT (window, "init done");
+}
+
+GstNvVideoWindowX11 *
+gst_nv_video_window_x11_new (const gchar * name)
+{
+ GstNvVideoWindowX11 *ret;
+
+ ret = g_object_new (GST_TYPE_NV_VIDEO_WINDOW_X11, NULL);
+ gst_object_ref_sink (ret);
+
+ return ret;
+}
diff --git a/gst-plugins-nv-video-sinks/common/x11/window_x11.h b/gst-plugins-nv-video-sinks/common/x11/window_x11.h
new file mode 100644
index 0000000..b9b7feb
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/common/x11/window_x11.h
@@ -0,0 +1,54 @@
+/**
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA Corporation and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA Corporation is strictly prohibited.
+ */
+
+#ifndef __GST_NV_VIDEO_WINDOW_X11_H__
+#define __GST_NV_VIDEO_WINDOW_X11_H__
+
+#include "window.h"
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_NV_VIDEO_WINDOW_X11 \
+ (gst_nv_video_window_x11_get_type())
+#define GST_NV_VIDEO_WINDOW_X11(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_WINDOW_X11, GstNvVideoWindowX11))
+#define GST_NV_VIDEO_WINDOW_X11_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_WINDOW_X11, GstNvVideoWindowX11Class))
+#define GST_IS_NV_VIDEO_WINDOW_X11(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_WINDOW_X11))
+#define GST_IS_NV_VIDEO_WINDOW_X11_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_WINDOW_X11))
+#define GST_NV_VIDEO_WINDOW_X11_CAST(obj) \
+ ((GstNvVideoWindowX11*)(obj))
+
+typedef struct _GstNvVideoWindowX11 GstNvVideoWindowX11;
+typedef struct _GstNvVideoWindowX11Class GstNvVideoWindowX11Class;
+
+struct _GstNvVideoWindowX11
+{
+ GstNvVideoWindow parent;
+
+ guintptr handle;
+ gboolean internal_window;
+};
+
+struct _GstNvVideoWindowX11Class
+{
+ GstNvVideoWindowClass parent_class;
+};
+
+GST_EXPORT
+GstNvVideoWindowX11 *gst_nv_video_window_x11_new (const gchar * name);
+
+GType gst_nv_video_window_x11_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV_VIDEO_WINDOW_X11_H__ */
diff --git a/gst-plugins-nv-video-sinks/gstnvvideosinks.c b/gst-plugins-nv-video-sinks/gstnvvideosinks.c
new file mode 100644
index 0000000..94492bb
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/gstnvvideosinks.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include
+
+#if NV_VIDEO_SINKS_HAS_NV3DSINK
+#include "nv3dsink/gstnv3dsink.h"
+#endif
+
+#if NV_VIDEO_SINKS_HAS_X11
+#include
+#endif
+
+GST_DEBUG_CATEGORY_STATIC (gst_nvvideosinks_debug);
+#define GST_CAT_DEFAULT gst_nvvideosinks_debug
+
+static gboolean
+plugin_init (GstPlugin * plugin)
+{
+#if NV_VIDEO_SINKS_HAS_X11
+ XInitThreads ();
+#endif
+
+ /* debug category for fltering log messages */
+ GST_DEBUG_CATEGORY_INIT (gst_nvvideosinks_debug, "nvvideosinks", 0,
+ "Nvidia video sinks");
+
+#if NV_VIDEO_SINKS_HAS_NV3DSINK
+ if (!gst_element_register (plugin, "nv3dsink", GST_RANK_SECONDARY,
+ GST_TYPE_NV3DSINK)) {
+ return FALSE;
+ }
+#endif
+
+ return TRUE;
+}
+
+/* PACKAGE is usually set by autotools but we are not using autotools
+ * to compile this code, so set it ourselves. GST_PLUGIN_DEFINE needs
+ * PACKAGE to be defined.
+ */
+#ifndef PACKAGE
+#define PACKAGE "gst-plugins-nv-video-sinks"
+#endif
+
+/* gstreamer looks for this structure to register plugins */
+GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
+ GST_VERSION_MINOR,
+ nvvideosinks,
+ "Nvidia Video Sink Plugins",
+ plugin_init, "0.0.1", "Proprietary", "Nvidia Video Sink Plugins",
+ "http://nvidia.com/")
diff --git a/gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.c b/gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.c
new file mode 100644
index 0000000..c0a6adb
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#include "gstnv3dsink.h"
+#include "display.h"
+#include "context.h"
+#include "window.h"
+
+GST_DEBUG_CATEGORY (gst_debug_nv3dsink);
+#define GST_CAT_DEFAULT gst_debug_nv3dsink
+
+GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
+
+#define GST_CAPS_FEATURE_MEMORY_NVMM "memory:NVMM"
+
+static void gst_nv3dsink_videooverlay_init (GstVideoOverlayInterface * iface);
+static void gst_nv3dsink_set_window_handle (GstVideoOverlay * overlay,
+ guintptr id);
+static void gst_nv3dsink_expose (GstVideoOverlay * overlay);
+static void gst_nv3dsink_handle_events (GstVideoOverlay * overlay,
+ gboolean handle_events);
+static void gst_nv3dsink_set_render_rectangle (GstVideoOverlay * overlay,
+ gint x, gint y, gint width, gint height);
+
+/* Input capabilities. */
+static GstStaticPadTemplate gst_nv3dsink_sink_template_factory =
+ GST_STATIC_PAD_TEMPLATE ("sink",
+ GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS (
+ //Supported Software buffer caps
+ GST_VIDEO_CAPS_MAKE ("{ "
+ "RGBA, BGRA, ARGB, ABGR, " "RGBx, BGRx, xRGB, xBGR, "
+ "AYUV, Y444, I420, YV12, " "NV12, NV21, Y42B, Y41B, "
+ "RGB, BGR, RGB16 }")
+ ";"
+ GST_VIDEO_CAPS_MAKE_WITH_FEATURES (
+ GST_CAPS_FEATURE_MEMORY_NVMM,
+ "{ RGBA, I420, NV12 }")
+ ));
+
+#define parent_class gst_nv3dsink_parent_class
+G_DEFINE_TYPE_WITH_CODE (GstNv3dSink, gst_nv3dsink, GST_TYPE_VIDEO_SINK,
+ G_IMPLEMENT_INTERFACE (GST_TYPE_VIDEO_OVERLAY,
+ gst_nv3dsink_videooverlay_init);
+ GST_DEBUG_CATEGORY_INIT (gst_debug_nv3dsink, "nv3dsink", 0,
+ "Nvidia 3D sink"));
+
+enum
+{
+ PROP_0,
+ PROP_WINDOW_X,
+ PROP_WINDOW_Y,
+ PROP_WINDOW_WIDTH,
+ PROP_WINDOW_HEIGHT
+};
+
+/* GObject vmethod implementations */
+
+static void
+gst_nv3dsink_videooverlay_init (GstVideoOverlayInterface * iface)
+{
+ iface->set_window_handle = gst_nv3dsink_set_window_handle;
+ iface->expose = gst_nv3dsink_expose;
+ iface->handle_events = gst_nv3dsink_handle_events;
+ iface->set_render_rectangle = gst_nv3dsink_set_render_rectangle;
+}
+
+static void
+gst_nv3dsink_set_window_handle (GstVideoOverlay * overlay, guintptr id)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
+ gint width = 0;
+ gint height = 0;
+
+ g_return_if_fail (GST_IS_NV3DSINK (nv3dsink));
+
+ g_mutex_lock (&nv3dsink->win_handle_lock);
+
+ GST_DEBUG_OBJECT (nv3dsink, "set_window_handle %" G_GUINT64_FORMAT, id);
+
+ if (gst_nv_video_window_get_handle (nv3dsink->window) == id) {
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ return;
+ }
+
+ if (id) {
+ gst_nv_video_window_set_handle (nv3dsink->window, id);
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ return;
+ }
+
+ if (!GST_VIDEO_SINK_WIDTH (nv3dsink) || !GST_VIDEO_SINK_HEIGHT (nv3dsink)) {
+ // window will be created during caps negotiation
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ return;
+ }
+ // create internal window
+ if (nv3dsink->window_width != 0 && nv3dsink->window_height != 0) {
+ width = nv3dsink->window_width;
+ height = nv3dsink->window_height;
+ } else {
+ width = GST_VIDEO_SINK_WIDTH (nv3dsink);
+ height = GST_VIDEO_SINK_HEIGHT (nv3dsink);
+ }
+ if (!gst_nv_video_window_create_window (nv3dsink->window,
+ nv3dsink->window_x, nv3dsink->window_y, width, height)) {
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ return;
+ }
+
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+}
+
+static void
+gst_nv3dsink_expose (GstVideoOverlay * overlay)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
+
+ GST_DEBUG_OBJECT (nv3dsink, "expose unimplemented");
+}
+
+static void
+gst_nv3dsink_handle_events (GstVideoOverlay * overlay, gboolean handle_events)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
+
+ GST_DEBUG_OBJECT (nv3dsink, "handle_events unimplemented");
+}
+
+static void
+gst_nv3dsink_set_render_rectangle (GstVideoOverlay * overlay, gint x, gint y,
+ gint width, gint height)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
+
+ g_return_if_fail (GST_IS_NV3DSINK (nv3dsink));
+
+ GST_DEBUG_OBJECT (nv3dsink, "set_render_rectangle unimplemented");
+
+ return;
+}
+
+static void
+gst_nv3dsink_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ GstNv3dSink *nv3dsink;
+
+ g_return_if_fail (GST_IS_NV3DSINK (object));
+
+ nv3dsink = GST_NV3DSINK (object);
+
+ switch (prop_id) {
+ case PROP_WINDOW_X:
+ nv3dsink->window_x = g_value_get_uint (value);
+ break;
+ case PROP_WINDOW_Y:
+ nv3dsink->window_y = g_value_get_uint (value);
+ break;
+ case PROP_WINDOW_WIDTH:
+ nv3dsink->window_width = g_value_get_uint (value);
+ break;
+ case PROP_WINDOW_HEIGHT:
+ nv3dsink->window_height = g_value_get_uint (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_nv3dsink_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ GstNv3dSink *nv3dsink;
+
+ g_return_if_fail (GST_IS_NV3DSINK (object));
+
+ nv3dsink = GST_NV3DSINK (object);
+
+ switch (prop_id) {
+ case PROP_WINDOW_X:
+ g_value_set_uint (value, nv3dsink->window_x);
+ break;
+ case PROP_WINDOW_Y:
+ g_value_set_uint (value, nv3dsink->window_y);
+ break;
+ case PROP_WINDOW_WIDTH:
+ g_value_set_uint (value, nv3dsink->window_width);
+ break;
+ case PROP_WINDOW_HEIGHT:
+ g_value_set_uint (value, nv3dsink->window_height);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_nv3dsink_finalize (GObject * object)
+{
+ GstNv3dSink *nv3dsink;
+
+ g_return_if_fail (GST_IS_NV3DSINK (object));
+
+ nv3dsink = GST_NV3DSINK (object);
+
+ GST_TRACE_OBJECT (nv3dsink, "finalize");
+
+ g_mutex_clear (&nv3dsink->win_handle_lock);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+static gboolean
+gst_nv3dsink_start (GstBaseSink * bsink)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
+ GstNvVideoWindow *window;
+
+ GST_TRACE_OBJECT (nv3dsink, "start");
+
+ // TODO: Query display from application/upstream elements if there
+ // is such use case.
+
+ if (!nv3dsink->display) {
+ if (!gst_nv_video_display_new (&nv3dsink->display)) {
+ GST_ERROR_OBJECT (nv3dsink, "failed to create new display");
+ return FALSE;
+ }
+ } else {
+ GST_DEBUG_OBJECT (nv3dsink, "using existing display (%p)",
+ nv3dsink->display);
+ }
+
+ if (!nv3dsink->context) {
+ if (!gst_nv_video_display_create_context (nv3dsink->display,
+ &nv3dsink->context)) {
+ GST_ERROR_OBJECT (nv3dsink, "failed to create new context");
+ return FALSE;
+ }
+ } else {
+ GST_DEBUG_OBJECT (nv3dsink, "using existing context (%p)",
+ nv3dsink->context);
+ }
+
+ if (!nv3dsink->window) {
+ window = gst_nv_video_display_create_window (nv3dsink->display);
+ if (window == NULL) {
+ GST_ERROR_OBJECT (nv3dsink, "failed to create new window");
+ return FALSE;
+ }
+ nv3dsink->window = gst_object_ref (window);
+ gst_object_unref (window);
+ gst_nv_video_context_set_window (nv3dsink->context, nv3dsink->window);
+ } else {
+ GST_DEBUG_OBJECT (nv3dsink, "using existing window (%p)", nv3dsink->window);
+ }
+
+ return TRUE;
+}
+
+static gboolean
+gst_nv3dsink_stop (GstBaseSink * bsink)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
+
+ GST_TRACE_OBJECT (nv3dsink, "stop");
+
+ if (nv3dsink->configured_caps) {
+ gst_caps_unref (nv3dsink->configured_caps);
+ nv3dsink->configured_caps = NULL;
+ }
+
+ if (nv3dsink->context) {
+ gst_object_unref (nv3dsink->context);
+ nv3dsink->context = NULL;
+ }
+
+ if (nv3dsink->window) {
+ g_object_unref (nv3dsink->window);
+ nv3dsink->window = NULL;
+ }
+
+ if (nv3dsink->display) {
+ g_object_unref (nv3dsink->display);
+ nv3dsink->display = NULL;
+ }
+
+ return TRUE;
+}
+
+static GstCaps *
+gst_nv3dsink_get_caps (GstBaseSink * bsink, GstCaps * filter)
+{
+ GstNv3dSink *nv3dsink;
+ GstCaps *tmp = NULL;
+ GstCaps *result = NULL;
+ GstCaps *caps = NULL;
+
+ nv3dsink = GST_NV3DSINK (bsink);
+
+ tmp = gst_pad_get_pad_template_caps (GST_BASE_SINK_PAD (bsink));
+
+ if (filter) {
+ GST_DEBUG_OBJECT (bsink, "intersecting with filter caps %" GST_PTR_FORMAT,
+ filter);
+
+ result = gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (tmp);
+ } else {
+ result = tmp;
+ }
+
+ caps = gst_nv_video_context_get_caps (nv3dsink->context);
+ if (caps) {
+ result = gst_caps_intersect (result, caps);
+ gst_caps_unref (caps);
+ }
+
+ GST_DEBUG_OBJECT (bsink, "returning caps: %" GST_PTR_FORMAT, result);
+
+ return result;
+}
+
+static gboolean
+gst_nv3dsink_set_caps (GstBaseSink * bsink, GstCaps * caps)
+{
+ GstNv3dSink *nv3dsink;
+ GstVideoInfo info;
+ GstCapsFeatures *features;
+ gint width = 0;
+ gint height = 0;
+
+ nv3dsink = GST_NV3DSINK (bsink);
+
+ if (!nv3dsink->context || !nv3dsink->display) {
+ return FALSE;
+ }
+
+ GST_DEBUG_OBJECT (bsink, "set caps with %" GST_PTR_FORMAT, caps);
+
+ if (nv3dsink->configured_caps) {
+ if (gst_caps_can_intersect (caps, nv3dsink->configured_caps)) {
+ return TRUE;
+ }
+ }
+
+ features = gst_caps_get_features (caps, 0);
+ if (gst_caps_features_contains (features, GST_CAPS_FEATURE_MEMORY_NVMM)) {
+ nv3dsink->context->using_NVMM = 1;
+ }
+
+ if (!gst_video_info_from_caps (&info, caps)) {
+ GST_ERROR_OBJECT (nv3dsink, "Invalid caps %" GST_PTR_FORMAT, caps);
+ return FALSE;
+ }
+
+ nv3dsink->context->configured_info = info;
+
+ int is_res_changed = 0;
+
+ if ((GST_VIDEO_SINK_WIDTH (nv3dsink)!=0 && GST_VIDEO_SINK_HEIGHT (nv3dsink)!=0) && (GST_VIDEO_SINK_WIDTH (nv3dsink) != info.width || GST_VIDEO_SINK_HEIGHT (nv3dsink) != info.height)) {
+ is_res_changed = 1;
+ }
+
+ if (is_res_changed)
+ {
+ gst_nv_video_context_handle_tearing(nv3dsink->context);
+ }
+
+ GST_VIDEO_SINK_WIDTH (nv3dsink) = info.width;
+ GST_VIDEO_SINK_HEIGHT (nv3dsink) = info.height;
+
+ g_mutex_lock (&nv3dsink->win_handle_lock);
+ if (!gst_nv_video_window_get_handle (nv3dsink->window)) {
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ gst_video_overlay_prepare_window_handle (GST_VIDEO_OVERLAY (nv3dsink));
+ } else {
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ }
+
+ if (GST_VIDEO_SINK_WIDTH (nv3dsink) <= 0
+ || GST_VIDEO_SINK_HEIGHT (nv3dsink) <= 0) {
+ GST_ERROR_OBJECT (nv3dsink, "invalid size");
+ return FALSE;
+ }
+
+ g_mutex_lock (&nv3dsink->win_handle_lock);
+ if (!gst_nv_video_window_get_handle (nv3dsink->window)) {
+ if (nv3dsink->window_width != 0 && nv3dsink->window_height != 0) {
+ width = nv3dsink->window_width;
+ height = nv3dsink->window_height;
+ } else {
+ width = GST_VIDEO_SINK_WIDTH (nv3dsink);
+ height = GST_VIDEO_SINK_HEIGHT (nv3dsink);
+ }
+ if (!gst_nv_video_window_create_window (nv3dsink->window,
+ nv3dsink->window_x, nv3dsink->window_y, width, height)) {
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+ return FALSE;
+ }
+ }
+ g_mutex_unlock (&nv3dsink->win_handle_lock);
+
+ gst_caps_replace (&nv3dsink->configured_caps, caps);
+
+ return TRUE;
+}
+
+static gboolean
+gst_nv3dsink_propose_allocation (GstBaseSink * bsink, GstQuery * query)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
+
+ gst_nv_video_context_handle_drc (nv3dsink->context);
+ gst_query_add_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL);
+ gst_query_add_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE, NULL);
+
+ return TRUE;
+}
+
+static GstFlowReturn
+gst_nv3dsink_show_frame (GstVideoSink * vsink, GstBuffer * buf)
+{
+ GstNv3dSink *nv3dsink;
+
+ nv3dsink = GST_NV3DSINK (vsink);
+
+ GST_TRACE_OBJECT (nv3dsink, "show buffer %p, window size:%ux%u", buf,
+ GST_VIDEO_SINK_WIDTH (nv3dsink), GST_VIDEO_SINK_HEIGHT (nv3dsink));
+
+ if (!gst_nv_video_context_show_frame (nv3dsink->context, buf)) {
+ return GST_FLOW_FLUSHING;
+ }
+
+ return GST_FLOW_OK;
+}
+
+static gboolean
+gst_nv3dsink_event (GstBaseSink * bsink, GstEvent * event)
+{
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_EOS:
+ gst_nv_video_context_handle_eos (nv3dsink->context);
+ break;
+
+ default:
+ break;
+ }
+
+ if (GST_BASE_SINK_CLASS (parent_class)->event)
+ return GST_BASE_SINK_CLASS (parent_class)->event (bsink, event);
+ else
+ gst_event_unref (event);
+
+ return TRUE;
+}
+
+static GstStateChangeReturn
+gst_nv3dsink_change_state (GstElement * element, GstStateChange transition)
+{
+ GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS;
+ GstNv3dSink *nv3dsink = GST_NV3DSINK (element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ /* call handle_eos to unref last buffer */
+ gst_nv_video_context_handle_eos (nv3dsink->context);
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+
+ return ret;
+}
+
+/* initialize the plugin's class */
+static void
+gst_nv3dsink_class_init (GstNv3dSinkClass * klass)
+{
+ GObjectClass *gobject_class;
+ GstElementClass *gstelement_class;
+ GstBaseSinkClass *gstbasesink_class;
+ GstVideoSinkClass *gstvideosink_class;
+
+ gobject_class = (GObjectClass *) klass;
+ gstelement_class = (GstElementClass *) klass;
+ gstbasesink_class = (GstBaseSinkClass *) klass;
+ gstvideosink_class = (GstVideoSinkClass *) klass;
+
+ gobject_class->set_property = gst_nv3dsink_set_property;
+ gobject_class->get_property = gst_nv3dsink_get_property;
+
+ gst_element_class_set_static_metadata (gstelement_class, "Nvidia 3D sink",
+ "Sink/Video", "A videosink based on 3D graphics rendering API",
+ "Yogish Kulkarni ");
+
+ gobject_class->finalize = gst_nv3dsink_finalize;
+
+ gst_element_class_add_static_pad_template (gstelement_class,
+ &gst_nv3dsink_sink_template_factory);
+
+ gstbasesink_class->start = GST_DEBUG_FUNCPTR (gst_nv3dsink_start);
+ gstbasesink_class->stop = GST_DEBUG_FUNCPTR (gst_nv3dsink_stop);
+ gstbasesink_class->set_caps = GST_DEBUG_FUNCPTR (gst_nv3dsink_set_caps);
+ gstbasesink_class->get_caps = GST_DEBUG_FUNCPTR (gst_nv3dsink_get_caps);
+ gstbasesink_class->propose_allocation =
+ GST_DEBUG_FUNCPTR (gst_nv3dsink_propose_allocation);
+
+ gstbasesink_class->event = gst_nv3dsink_event;
+
+ gstvideosink_class->show_frame = GST_DEBUG_FUNCPTR (gst_nv3dsink_show_frame);
+ gstelement_class->change_state = gst_nv3dsink_change_state;
+
+ g_object_class_install_property (gobject_class, PROP_WINDOW_X,
+ g_param_spec_uint ("window-x",
+ "Window x coordinate",
+ "X coordinate of window", 0, G_MAXINT, 10,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_WINDOW_Y,
+ g_param_spec_uint ("window-y",
+ "Window y coordinate",
+ "Y coordinate of window", 0, G_MAXINT, 10,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_WINDOW_WIDTH,
+ g_param_spec_uint ("window-width",
+ "Window width",
+ "Width of window", 0, G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_WINDOW_HEIGHT,
+ g_param_spec_uint ("window-height",
+ "Window height",
+ "Height of window", 0, G_MAXINT, 0,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+}
+
+/* initialize the new element */
+static void
+gst_nv3dsink_init (GstNv3dSink * nv3dsink)
+{
+ GST_TRACE_OBJECT (nv3dsink, "init");
+
+ nv3dsink->display = NULL;
+ nv3dsink->context = NULL;
+ nv3dsink->window = NULL;
+ nv3dsink->window_x = 0;
+ nv3dsink->window_y = 0;
+ nv3dsink->window_width = 0;
+ nv3dsink->window_height = 0;
+
+ nv3dsink->configured_caps = NULL;
+
+ /* mutex to serialize create, set and get window handle calls */
+ g_mutex_init (&nv3dsink->win_handle_lock);
+}
diff --git a/gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.h b/gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.h
new file mode 100644
index 0000000..b7564f7
--- /dev/null
+++ b/gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License, version 2.1, as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program. If not, see
+ * .
+ */
+
+#ifndef __GST_NV3DSINK_H__
+#define __GST_NV3DSINK_H__
+
+#include
+#include
+#include
+
+#include "gstnvvideofwd.h"
+
+G_BEGIN_DECLS
+
+GST_DEBUG_CATEGORY_EXTERN (gst_debug_nv3dsink);
+
+#define GST_TYPE_NV3DSINK \
+ (gst_nv3dsink_get_type())
+#define GST_NV3DSINK(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV3DSINK, GstNv3dSink))
+#define GST_NV3DSINK_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV3DSINK, GstNv3dSinkClass))
+#define GST_IS_NV3DSINK(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV3DSINK))
+#define GST_IS_NV3DSINK_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV3DSINK))
+
+typedef struct _GstNv3dSink GstNv3dSink;
+typedef struct _GstNv3dSinkClass GstNv3dSinkClass;
+
+struct _GstNv3dSink
+{
+ GstVideoSink parent;
+
+ GstNvVideoDisplay *display;
+ GstNvVideoContext *context;
+ GstNvVideoWindow *window;
+ gint window_x;
+ gint window_y;
+ gint window_width;
+ gint window_height;
+
+ GMutex win_handle_lock;
+
+ GstCaps *configured_caps;
+};
+
+struct _GstNv3dSinkClass
+{
+ GstVideoSinkClass parent_class;
+};
+
+GType gst_nv3dsink_get_type (void);
+
+G_END_DECLS
+
+#endif /* __GST_NV3DSINK_H__ */
diff --git a/nvbufsurface.h b/nvbufsurface.h
new file mode 100644
index 0000000..8fb2621
--- /dev/null
+++ b/nvbufsurface.h
@@ -0,0 +1,866 @@
+/*
+ * Copyright (c) 2019-2024, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA Corporation and its licensors retain all intellectual property
+ * and proprietary rights in and to this software, related documentation
+ * and any modifications thereto. Any use, reproduction, disclosure or
+ * distribution of this software and related documentation without an express
+ * license agreement from NVIDIA Corporation is strictly prohibited.
+ */
+
+/**
+ * @file nvbufsurface.h
+ * NvBufSurface Interface
+ *
+ * This file specifies the NvBufSurface management API.
+ *
+ * The NvBufSurface API provides methods to allocate / deallocate, map / unmap
+ * and copy batched buffers.
+ */
+ /**
+ * @defgroup ds_nvbuf_api Buffer Management API module
+ *
+ * This section describes types and functions of NvBufSurface application
+ * programming interface.
+ *
+ */
+
+#ifndef NVBUFSURFACE_H_
+#define NVBUFSURFACE_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @defgroup ds_aaa NvBufSurface Types and Functions
+ * Defines types and functions of \ref NvBufSurface application
+ * programming interface.
+ * @ingroup ds_nvbuf_api
+ * @{ */
+
+/** Defines the default padding length for reserved fields of structures. */
+#define STRUCTURE_PADDING 4
+
+/** Defines the maximum number of planes. */
+#define NVBUF_MAX_PLANES 4
+
+/**
+ * Defines the default values for chroma subsampling.
+ * The default value matches JPEG/MPEG use cases.
+ */
+#define NVBUFSURFACE_CHROMA_SUBSAMPLING_HORIZ_DEFAULT 0
+#define NVBUFSURFACE_CHROMA_SUBSAMPLING_VERT_DEFAULT 1
+
+#define NVBUFSURFACE_CHROMA_SUBSAMPLING_PARAMS_DEFAULT \
+ { \
+ NVBUFSURFACE_CHROMA_SUBSAMPLING_HORIZ_DEFAULT, \
+ NVBUFSURFACE_CHROMA_SUBSAMPLING_VERT_DEFAULT \
+ }
+
+/**
+ * Defines mapping types of NvBufSurface.
+ */
+typedef enum
+{
+ NVBUF_MAP_READ, /**< Specifies \ref NvBufSurface mapping type "read." */
+ NVBUF_MAP_WRITE, /**< Specifies \ref NvBufSurface mapping type
+ "write." */
+ NVBUF_MAP_READ_WRITE, /**< Specifies \ref NvBufSurface mapping type
+ "read/write." */
+} NvBufSurfaceMemMapFlags;
+
+/**
+ * Defines tags that identify the components requesting a memory allocation.
+ * The tags can be used later to identify the total memory allocated to
+ * particular types of components.
+ * TODO: Check if DeepStream require more tags to be defined.
+ */
+typedef enum
+{
+ /** tag None. */
+ NvBufSurfaceTag_NONE = 0x0,
+ /** tag for Camera. */
+ NvBufSurfaceTag_CAMERA = 0x200,
+ /** tag for Jpeg Encoder/Decoder. */
+ NvBufSurfaceTag_JPEG = 0x1500,
+ /** tag for VPR Buffers. */
+ NvBufSurfaceTag_PROTECTED = 0x1504,
+ /** tag for H264/H265 Video Encoder. */
+ NvBufSurfaceTag_VIDEO_ENC = 0x1200,
+ /** tag for H264/H265/VP9 Video Decoder. */
+ NvBufSurfaceTag_VIDEO_DEC = 0x1400,
+ /** tag for Video Transform/Composite/Blend. */
+ NvBufSurfaceTag_VIDEO_CONVERT = 0xf01,
+} NvBufSurfaceTag;
+
+/**
+ * Defines color formats for NvBufSurface.
+ */
+typedef enum
+{
+ /** Specifies an invalid color format. */
+ NVBUF_COLOR_FORMAT_INVALID,
+ /** Specifies 8 bit GRAY scale - single plane */
+ NVBUF_COLOR_FORMAT_GRAY8,
+ /** Specifies BT.601 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420,
+ /** Specifies BT.601 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YVU420,
+ /** Specifies BT.601 colorspace - YUV420 ER multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_ER,
+ /** Specifies BT.601 colorspace - YVU420 ER multi-planar. */
+ NVBUF_COLOR_FORMAT_YVU420_ER,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_ER,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_UYVY,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_UYVY_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_VYUY,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_VYUY_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YUYV,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YUYV_ER,
+ /** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YVYU,
+ /** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
+ NVBUF_COLOR_FORMAT_YVYU_ER,
+ /** Specifies BT.601 colorspace - YUV444 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444,
+ /** Specifies RGBA-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_RGBA,
+ /** Specifies BGRA-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_BGRA,
+ /** Specifies ARGB-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_ARGB,
+ /** Specifies ABGR-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_ABGR,
+ /** Specifies RGBx-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_RGBx,
+ /** Specifies BGRx-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_BGRx,
+ /** Specifies xRGB-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_xRGB,
+ /** Specifies xBGR-8-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_xBGR,
+ /** Specifies RGB-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_RGB,
+ /** Specifies BGR-8-8-8 single plane. */
+ NVBUF_COLOR_FORMAT_BGR,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE,
+ /** Specifies BT.709 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_709,
+ /** Specifies BT.709 colorspace - YUV420 ER multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_709_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_709_ER,
+ /** Specifies BT.2020 colorspace - YUV420 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV420_2020,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_10LE_2020,
+ /** Specifies color format for packed 2 signed shorts */
+ NVBUF_COLOR_FORMAT_SIGNED_R16G16,
+ /** Specifies RGB- unsigned 8 bit multiplanar plane. */
+ NVBUF_COLOR_FORMAT_R8_G8_B8,
+ /** Specifies BGR- unsigned 8 bit multiplanar plane. */
+ NVBUF_COLOR_FORMAT_B8_G8_R8,
+ /** Specifies RGB-32bit Floating point multiplanar plane. */
+ NVBUF_COLOR_FORMAT_R32F_G32F_B32F,
+ /** Specifies BGR-32bit Floating point multiplanar plane. */
+ NVBUF_COLOR_FORMAT_B32F_G32F_R32F,
+ /** Specifies BT.601 colorspace - YUV422 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV422,
+ /** Specifies BT.601 colorspace - Y/CrCb 4:2:0 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21_10LE,
+ /** Specifies BT.601 colorspace - Y/CrCb 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV21_12LE,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_10LE,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24,
+ /** Specifies BT.601 colorspace - Y/CrCb 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE,
+ /** Specifies BT.601_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_ER,
+ /** Specifies BT.601_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_709,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_709,
+ /** Specifies BT.709_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV16_709_ER,
+ /** Specifies BT.709_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_709_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE_709,
+ /** Specifies BT.709 ER colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_10LE_2020,
+ /** Specifies BT.2020 colorspace - Y/CbCr 12 bit 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_NV24_12LE_2020,
+ /** Specifies Non-linear RGB BT.709 colorspace - RGBA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_RGBA_10_10_10_2_709,
+ /** Specifies Non-linear RGB BT.2020 colorspace - RGBA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_RGBA_10_10_10_2_2020,
+ /** Specifies Non-linear RGB BT.709 colorspace - BGRA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_BGRA_10_10_10_2_709,
+ /** Specifies Non-linear RGB BT.2020 colorspace - BGRA-10-10-10-2 planar. */
+ NVBUF_COLOR_FORMAT_BGRA_10_10_10_2_2020,
+ /** Specifies Optical flow SAD calculation Buffer format */
+ NVBUF_COLOR_FORMAT_A32,
+ /** Specifies BT.601 colorspace - 10 bit YUV 4:2:2 interleaved. */
+ NVBUF_COLOR_FORMAT_UYVP,
+ /** Specifies BT.601 colorspace - 10 bit YUV ER 4:2:2 interleaved. */
+ NVBUF_COLOR_FORMAT_UYVP_ER,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 10-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_10LE_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:4:4 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_YUV444_12LE_2020,
+ /** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 12-bit multi-planar. */
+ NVBUF_COLOR_FORMAT_NV12_12LE_709_ER,
+ /** Specifies 8 bit GRAY scale ER - single plane */
+ NVBUF_COLOR_FORMAT_GRAY8_ER,
+ /** Specifies BT.709 colorspace - Y/CbCr 4:2:2 planar */
+ NVBUF_COLOR_FORMAT_UYVY_709,
+ /** Specifies BT.709 colorspace - Y/CbCr ER 4:2:2 planar */
+ NVBUF_COLOR_FORMAT_UYVY_709_ER,
+ /** Specifies BT.2020 colorspace - Y/CbCr 4:2:2 planar */
+ NVBUF_COLOR_FORMAT_UYVY_2020,
+ NVBUF_COLOR_FORMAT_LAST
+} NvBufSurfaceColorFormat;
+
+/**
+ * Specifies layout formats for \ref NvBufSurface video planes.
+ */
+typedef enum
+{
+ /** Specifies pitch layout. */
+ NVBUF_LAYOUT_PITCH,
+ /** Specifies block linear layout. */
+ NVBUF_LAYOUT_BLOCK_LINEAR,
+} NvBufSurfaceLayout;
+
+/**
+ * Specifies memory types for \ref NvBufSurface.
+ */
+typedef enum
+{
+ /** Specifies the default memory type, i.e. \ref NVBUF_MEM_CUDA_DEVICE
+ for dGPU, \ref NVBUF_MEM_SURFACE_ARRAY for Jetson. Use \ref NVBUF_MEM_DEFAULT
+ to allocate whichever type of memory is appropriate for the platform. */
+ NVBUF_MEM_DEFAULT,
+ /** Specifies CUDA Host memory type. */
+ NVBUF_MEM_CUDA_PINNED,
+ /** Specifies CUDA Device memory type. */
+ NVBUF_MEM_CUDA_DEVICE,
+ /** Specifies CUDA Unified memory type. */
+ NVBUF_MEM_CUDA_UNIFIED,
+ /** Specifies NVRM Surface Array type. Valid only for Jetson. */
+ NVBUF_MEM_SURFACE_ARRAY,
+ /** Specifies NVRM Handle type. Valid only for Jetson. */
+ NVBUF_MEM_HANDLE,
+ /** Specifies memory allocated by malloc(). */
+ NVBUF_MEM_SYSTEM,
+} NvBufSurfaceMemType;
+
+/**
+ * Defines display scan formats for NvBufSurface video planes.
+ */
+typedef enum
+{
+ /** Progessive scan formats. */
+ NVBUF_DISPLAYSCANFORMAT_PROGRESSIVE,
+ /** Interlaced scan formats. */
+ NVBUF_DISPLAYSCANFORMAT_INTERLACED,
+} NvBufSurfaceDisplayScanFormat;
+
+/**
+ * Holds plane wise parameters(extended) of a buffer.
+ */
+typedef struct NvBufSurfacePlaneParamsEx
+{
+ /** display scan format - progressive/interlaced. */
+ NvBufSurfaceDisplayScanFormat scanformat[NVBUF_MAX_PLANES];
+ /** offset of the second field for interlaced buffer. */
+ uint32_t secondfieldoffset[NVBUF_MAX_PLANES];
+ /** block height of the planes for blockLinear layout buffer. */
+ uint32_t blockheightlog2[NVBUF_MAX_PLANES];
+ /** physical address of allocated planes. */
+ uint32_t physicaladdress[NVBUF_MAX_PLANES];
+ /** flags associated with planes */
+ uint64_t flags[NVBUF_MAX_PLANES];
+
+ void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
+} NvBufSurfacePlaneParamsEx;
+
+/**
+ * Holds plane wise parameters of a buffer.
+ */
+typedef struct NvBufSurfacePlaneParams
+{
+ /** Holds the number of planes. */
+ uint32_t num_planes;
+ /** Holds the widths of planes. */
+ uint32_t width[NVBUF_MAX_PLANES];
+ /** Holds the heights of planes. */
+ uint32_t height[NVBUF_MAX_PLANES];
+ /** Holds the pitches of planes in bytes. */
+ uint32_t pitch[NVBUF_MAX_PLANES];
+ /** Holds the offsets of planes in bytes. */
+ uint32_t offset[NVBUF_MAX_PLANES];
+ /** Holds the sizes of planes in bytes. */
+ uint32_t psize[NVBUF_MAX_PLANES];
+ /** Holds the number of bytes occupied by a pixel in each plane. */
+ uint32_t bytesPerPix[NVBUF_MAX_PLANES];
+
+ void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
+} NvBufSurfacePlaneParams;
+
+
+/**
+ * Holds Chroma Subsampling parameters for NvBufSurface allocation.
+ */
+typedef struct NvBufSurfaceChromaSubsamplingParams
+{
+ /** location settings */
+ uint8_t chromaLocHoriz;
+ uint8_t chromaLocVert;
+} NvBufSurfaceChromaSubsamplingParams;
+
+/**
+ * Holds parameters required to allocate an \ref NvBufSurface.
+ */
+typedef struct NvBufSurfaceCreateParams {
+ /** Holds the GPU ID. Valid only for a multi-GPU system. */
+ uint32_t gpuId;
+ /** Holds the width of the buffer. */
+ uint32_t width;
+ /** Holds the height of the buffer. */
+ uint32_t height;
+ /** Holds the amount of memory to be allocated. Optional; if set, all other
+ parameters (width, height, etc.) are ignored. */
+ uint32_t size;
+ /** Holds a "contiguous memory" flag. If set, contiguous memory is allocated
+ for the batch. Valid only for CUDA memory types. */
+ bool isContiguous;
+ /** Holds the color format of the buffer. */
+ NvBufSurfaceColorFormat colorFormat;
+ /** Holds the surface layout. May be Block Linear (BL) or Pitch Linear (PL).
+ For a dGPU, only PL is valid. */
+ NvBufSurfaceLayout layout;
+ /** Holds the type of memory to be allocated. */
+ NvBufSurfaceMemType memType;
+} NvBufSurfaceCreateParams;
+
+/**
+ * Hold extended parameters required to allocate NvBufSurface.
+ * (Applicable for NvBufSurfaceAllocate API)
+ */
+typedef struct NvBufSurfaceAllocateParams {
+ /** Hold legacy NvBufSurface creation parameters */
+ NvBufSurfaceCreateParams params;
+ /** Display scan format */
+ NvBufSurfaceDisplayScanFormat displayscanformat;
+ /** Chroma Subsampling parameters */
+ NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
+ /** components tag to be used for memory allocation */
+ NvBufSurfaceTag memtag;
+ /** disable pitch padding allocation only applicable for cuda and system memory allocation
+ pitch would be width times bytes per pixel for the plane, for odd width it would be
+ multiple of 2, also note for some non standard video resolution cuda kernels may fail
+ due to unaligned pitch
+ */
+ bool disablePitchPadding;
+ /** Used void* from custom param for 64 bit machine, using other uint32_t param */
+ uint32_t _reservedParam;
+
+ void * _reserved[STRUCTURE_PADDING-1];
+} NvBufSurfaceAllocateParams;
+
+/**
+ * Hold the pointers of mapped buffer.
+ */
+typedef struct NvBufSurfaceMappedAddr {
+ /** Holds planewise pointers to a CPU mapped buffer. */
+ void * addr[NVBUF_MAX_PLANES];
+ /** Holds a pointer to a mapped EGLImage. */
+ void *eglImage;
+
+ void * _reserved[STRUCTURE_PADDING];
+} NvBufSurfaceMappedAddr;
+
+/**
+ * Hold the information(extended) of single buffer in the batch.
+ */
+typedef struct NvBufSurfaceParamsEx {
+ /** offset in bytes from the start of the buffer to the first valid byte.
+ (Applicable for NVBUF_MEM_HANDLE) */
+ int32_t startofvaliddata;
+ /** size of the valid data from the first to the last valid byte.
+ (Applicable for NVBUF_MEM_HANDLE) */
+ int32_t sizeofvaliddatainbytes;
+ /** chroma subsampling parameters.
+ (Applicable for NVBUF_MEM_SURFACE_ARRAY) */
+ NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
+ /** get buffer vpr information. */
+ bool is_protected;
+ /** plane wise extended info */
+ NvBufSurfacePlaneParamsEx planeParamsex;
+
+ void * _reserved[STRUCTURE_PADDING];
+} NvBufSurfaceParamsEx;
+
+/**
+ * Hold the information of single buffer in the batch.
+ */
+typedef struct NvBufSurfaceParams {
+ /** Holds the width of the buffer. */
+ uint32_t width;
+ /** Holds the height of the buffer. */
+ uint32_t height;
+ /** Holds the pitch of the buffer. */
+ uint32_t pitch;
+ /** Holds the color format of the buffer. */
+ NvBufSurfaceColorFormat colorFormat;
+ /** Holds BL or PL. For dGPU, only PL is valid. */
+ NvBufSurfaceLayout layout;
+ /** Holds a DMABUF FD. Valid only for \ref NVBUF_MEM_SURFACE_ARRAY and
+ \ref NVBUF_MEM_HANDLE type memory. */
+ uint64_t bufferDesc;
+ /** Holds the amount of allocated memory. */
+ uint32_t dataSize;
+ /** Holds a pointer to allocated memory. Not valid for
+ \ref NVBUF_MEM_SURFACE_ARRAY or \ref NVBUF_MEM_HANDLE. */
+ void * dataPtr;
+ /** Holds planewise information (width, height, pitch, offset, etc.). */
+ NvBufSurfacePlaneParams planeParams;
+ /** Holds pointers to mapped buffers. Initialized to NULL
+ when the structure is created. */
+ NvBufSurfaceMappedAddr mappedAddr;
+ /** pointers of extended parameters of single buffer in the batch.*/
+ NvBufSurfaceParamsEx *paramex;
+
+ void * _reserved[STRUCTURE_PADDING - 1];
+} NvBufSurfaceParams;
+
+/**
+ * Holds information about batched buffers.
+ */
+typedef struct NvBufSurface {
+ /** Holds a GPU ID. Valid only for a multi-GPU system. */
+ uint32_t gpuId;
+ /** Holds the batch size. */
+ uint32_t batchSize;
+ /** Holds the number valid and filled buffers. Initialized to zero when
+ an instance of the structure is created. */
+ uint32_t numFilled;
+ /** Holds an "is contiguous" flag. If set, memory allocated for the batch
+ is contiguous. */
+ bool isContiguous;
+ /** Holds type of memory for buffers in the batch. */
+ NvBufSurfaceMemType memType;
+ /** Holds a pointer to an array of batched buffers. */
+ NvBufSurfaceParams *surfaceList;
+
+ void * _reserved[STRUCTURE_PADDING];
+} NvBufSurface;
+
+/**
+ * Holds plane parameters to map the buffer received from another process.
+ */
+typedef struct NvBufSurfaceMapPlaneParams
+{
+ /** Holds the widths of planes */
+ uint32_t width;
+ /** Holds the heights of planes */
+ uint32_t height;
+ /** Holds the pitches of planes in bytes */
+ uint32_t pitch;
+ /** Holds the offsets of planes in bytes */
+ uint32_t offset;
+ /** Holds the sizes of planes in bytes */
+ uint32_t psize;
+ /** Holds offset of the second field for interlaced buffer */
+ uint32_t secondfieldoffset;
+ /** Holds block height of the planes for blockLinear layout buffer */
+ uint32_t blockheightlog2;
+ /** Holds flags associated with the planes */
+ uint64_t flags;
+ /** Reserved */
+ uint8_t reserved[64];
+} NvBufSurfaceMapPlaneParams;
+
+/**
+ * Holds buffer parameters to map the buffer received from another process.
+ */
+typedef struct NvBufSurfaceMapParams {
+ /** Holds the number of planes. */
+ uint32_t num_planes;
+ /** Holds a GPU ID */
+ uint32_t gpuId;
+ /** Holds a DMABUF FD */
+ uint64_t fd;
+ /** Holds the total size of allocated memory */
+ uint32_t totalSize;
+ /** Holds type of memory */
+ NvBufSurfaceMemType memType;
+ /** Holds BL or PL layout */
+ NvBufSurfaceLayout layout;
+ /** Holds display scan format */
+ NvBufSurfaceDisplayScanFormat scanformat;
+ /** Holds the color format */
+ NvBufSurfaceColorFormat colorFormat;
+ /** Holds chroma subsampling parameters */
+ NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
+ /** Holds plane parameters */
+ NvBufSurfaceMapPlaneParams planes[NVBUF_MAX_PLANES];
+ /** Reserved */
+ uint8_t reserved[64];
+} NvBufSurfaceMapParams;
+
+/**
+ * \brief Allocates a batch of buffers.
+ *
+ * Allocates memory for \a batchSize buffers and returns a pointer to an
+ * allocated \ref NvBufSurface. The \a params structure must have
+ * the allocation parameters of a single buffer. If \a params.size
+ * is set, a buffer of that size is allocated, and all other
+ * parameters (width, height, color format, etc.) are ignored.
+ *
+ * Call NvBufSurfaceDestroy() to free resources allocated by this function.
+ *
+ * @param[out] surf An indirect pointer to the allocated batched
+ * buffers.
+ * @param[in] batchSize Batch size of buffers.
+ * @param[in] params A pointer to an \ref NvBufSurfaceCreateParams
+ * structure.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceCreate (NvBufSurface **surf, uint32_t batchSize,
+ NvBufSurfaceCreateParams *params);
+
+/**
+ * \brief Allocate batch of buffers. (Using extended buffer allocation parameters)
+ *
+ * Allocates memory for batchSize buffers and returns in *surf a pointer to allocated NvBufSurface.
+ * params structure should have allocation parameters of single buffer. If size field in
+ * params is set, buffer of that size will be allocated and all other
+ * parameters (w, h, color format etc.) will be ignored.
+ *
+ * Use NvBufSurfaceDestroy to free all the resources.
+ *
+ * @param[out] surf pointer to allocated batched buffers.
+ * @param[in] batchSize batch size of buffers.
+ * @param[in] paramsext pointer to NvBufSurfaceAllocateParams structure.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurfaceAllocate (NvBufSurface **surf, uint32_t batchSize,
+ NvBufSurfaceAllocateParams *paramsext);
+
+/**
+ * Free the batched buffers previously allocated through NvBufSurfaceCreate.
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface to be freed.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceDestroy (NvBufSurface *surf);
+
+/**
+ * \brief Maps hardware batched buffers to the HOST or CPU address space.
+ *
+ * Valid for \ref NVBUF_MEM_CUDA_UNIFIED type memory for dGPU and
+ * \ref NVBUF_MEM_SURFACE_ARRAY and \ref NVBUF_MEM_HANDLE type memory for
+ * Jetson.
+ *
+ * This function fills an array of pointers at
+ * \a surf->surfaceList->mappedAddr->addr.
+ * \a surf is a pointer to an \ref NvBufSurface.
+ * \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
+ * \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
+ * \a addr is declared as an array of pointers to void, and holds pointers
+ * to the buffers.
+ *
+ * The client must call NvBufSurfaceSyncForCpu() with the virtual address
+ * populated by this function before accessing mapped memory in the CPU.
+ *
+ * After memory mapping is complete, mapped memory modification
+ * must be coordinated between the CPU and the hardware device as
+ * follows:
+ * - CPU: If the CPU modifies mapped memory, the client must call
+ * NvBufSurfaceSyncForDevice() before any hardware device accesses the memory.
+ * - Hardware device: If a hardware device modifies mapped memory, the client
+ * must call NvBufSurfaceSyncForCpu() before the CPU accesses the memory.
+ *
+ * Use NvBufSurfaceUnMap() to unmap buffer(s) and release any resource.
+ *
+ * @param[in,out] surf A pointer to an NvBufSurface structure. The function
+ * stores pointers to the buffers in a descendant of this
+ * structure; see the notes above.
+ * @param[in] index Index of a buffer in the batch. -1 refers to all buffers
+ * in the batch.
+ * @param[in] plane Index of a plane in buffer. -1 refers to all planes
+ * in the buffer.
+ * @param[in] type A flag for mapping type.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceMap (NvBufSurface *surf, int index, int plane, NvBufSurfaceMemMapFlags type);
+
+/**
+ * \brief Unmaps previously mapped buffer(s).
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch. -1 indicates
+ * all buffers in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 indicates
+ * all planes in the buffer.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceUnMap (NvBufSurface *surf, int index, int plane);
+
+/**
+ * \brief Copies the content of source batched buffer(s) to destination
+ * batched buffer(s).
+ *
+ * You can use this function to copy source buffer(s) of one memory type
+ * to destination buffer(s) of another memory type,
+ * e.g. CUDA host to CUDA device, malloc'ed memory to CUDA device, etc.
+ *
+ * The source and destination \ref NvBufSurface objects must have same
+ * buffer and batch size.
+ *
+ * @param[in] srcSurf A pointer to the source NvBufSurface structure.
+ * @param[in] dstSurf A pointer to the destination NvBufSurface structure.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
+
+/**
+ * \brief Copies the NvBufSurface plane memory content to a raw buffer plane for a specific
+ * batched buffer.
+ *
+ * This function can be used to copy plane memory content from source raw buffer pointer
+ * to specific destination batch buffer of supported memory type.
+ *
+ * @param[in] Surf pointer to NvBufSurface structure.
+ * @param[in] index index of buffer in the batch.
+ * @param[in] plane index of plane in buffer.
+ * @param[in] out_width aligned width of the raw data plane.
+ * @param[in] out_height aligned height of the raw data plane.
+ * @param[in] ptr pointer to the output raw plane data.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurface2Raw (NvBufSurface *Surf, unsigned int index, unsigned int plane, unsigned int out_width, unsigned int out_height, unsigned char *ptr);
+
+/**
+ * \brief Copies the raw buffer plane memory content to the NvBufSurface plane memory of a specific
+ * batched buffer.
+ *
+ * This function can be used to copy plane memory content from batch buffer
+ * to specific destination raw buffer pointer.
+ *
+ * @param[in] ptr pointer to the input raw plane data.
+ * @param[in] index index of buffer in the batch.
+ * @param[in] plane index of plane in buffer.
+ * @param[in] in_width aligned width of the raw data plane.
+ * @param[in] in_height aligned height of the raw data plane.
+ * @param[in] Surf pointer to NvBufSurface structure.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int Raw2NvBufSurface (unsigned char *ptr, unsigned int index, unsigned int plane, unsigned int in_width, unsigned int in_height, NvBufSurface *Surf);
+
+/**
+ * Syncs the HW memory cache for the CPU.
+ *
+ * Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
+ * \ref NVBUF_MEM_HANDLE.
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index Index of the buffer in the batch. -1 refers to
+ * all buffers in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 refers to all planes
+ * in the buffer.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceSyncForCpu (NvBufSurface *surf, int index, int plane);
+
+/**
+ * \brief Syncs the hardware memory cache for the device.
+ *
+ * Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
+ * \ref NVBUF_MEM_HANDLE.
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch. -1 refers to all buffers
+ * in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 refers to all planes
+ * in the buffer.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceSyncForDevice (NvBufSurface *surf, int index, int plane);
+
+/**
+ * \brief Gets the \ref NvBufSurface from the DMABUF FD.
+ *
+ * @param[in] dmabuf_fd DMABUF FD of the buffer.
+ * @param[out] buffer A pointer to the NvBufSurface.
+ *
+ * @return 0 for success, or -1 otherwise.
+ */
+int NvBufSurfaceFromFd (int dmabuf_fd, void **buffer);
+
+/**
+ * \brief Fills each byte of the buffer(s) in an \ref NvBufSurface with a
+ * provided value.
+ *
+ * You can also use this function to reset the buffer(s) in the batch.
+ *
+ * @param[in] surf A pointer to the NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch. -1 refers to all buffers
+ * in the batch.
+ * @param[in] plane Index of a plane in the buffer. -1 refers to all planes
+ * in the buffer.
+ * @param[in] value The value to be used as fill.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceMemSet (NvBufSurface *surf, int index, int plane, uint8_t value);
+
+/**
+ * \brief Creates an EGLImage from the memory of one or more
+ * \ref NvBufSurface buffers.
+ *
+ * Only memory type \ref NVBUF_MEM_SURFACE_ARRAY is supported.
+ *
+ * This function returns the created EGLImage by storing its address at
+ * \a surf->surfaceList->mappedAddr->eglImage. (\a surf is a pointer to
+ * an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
+ * \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
+ * \a eglImage is declared as a pointer to void, and holds an
+ * EGLImageKHR.)
+ *
+ * You can use this function in scenarios where a CUDA operation on Jetson
+ * hardware memory (identified by \ref NVBUF_MEM_SURFACE_ARRAY) is required.
+ * The EGLImageKHR struct provided by this function can then be registered
+ * with CUDA for further CUDA operations.
+ *
+ * @param[in,out] surf A pointer to an NvBufSurface structure. The function
+ * stores a pointer to the created EGLImage in
+ * a descendant of this structure; see the notes above.
+ * @param[in] index Index of a buffer in the batch. -1 specifies all buffers
+ * in the batch.
+ *
+ * @return 0 for success, or -1 otherwise.
+ */
+int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
+
+/**
+ * \brief Destroys the previously created EGLImage object(s).
+ *
+ * @param[in] surf A pointer to an \ref NvBufSurface structure.
+ * @param[in] index The index of a buffer in the batch. -1 specifies all
+ * buffers in the batch.
+ *
+ * @return 0 if successful, or -1 otherwise.
+ */
+int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
+
+/**
+ * \brief Import parameters received from another process and create hardware buffer.
+ *
+ * Calling process must need to call NvBufferDestroy() to remove reference count for
+ * hardware buffer handle of the imported DMA buffer.
+ *
+ * @param[out] out_nvbuf_surf Pointer to hardware buffer.
+ * @param[in] in_params Parameters to create hardware buffer.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurfaceImport (NvBufSurface **out_nvbuf_surf, const NvBufSurfaceMapParams *in_params);
+
+/**
+ * \brief Get buffer information to map the buffer in another process.
+ *
+ * @param[in] surf Pointer to NvBufSurface structure.
+ * @param[in] index Index of a buffer in the batch.
+ * @param[out] params Pointer to NvBufSurfaceMapParams information of the buffer.
+ *
+ * @return 0 for success, -1 for failure.
+ */
+int NvBufSurfaceGetMapParams (const NvBufSurface *surf, int index, NvBufSurfaceMapParams *params);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* NVBUFSURFACE_H_ */
diff --git a/push_info.txt b/push_info.txt
new file mode 100644
index 0000000..051d5f1
--- /dev/null
+++ b/push_info.txt
@@ -0,0 +1 @@
+jetson_36.3