Updating prebuilts and/or headers

2d40637e04ba1be9b48393f71bed54b09590ab0d - nvbufsurface.h
c3f21bd4363243ad1bca5b1d434b5896402eec14 - nvbuf_utils.h
9a172f748a2b8f4d6d15648ea353989ccc7aeba6 - gst-plugins-nv-video-sinks/Makefile
7ef56486c9e6b3e354473a2959d274517dd709da - gst-plugins-nv-video-sinks/gstnvvideosinks.c
9825d8a113dbf7dd16f791ff1ca66f2de3047b22 - gst-plugins-nv-video-sinks/LICENSE.libgstnvvideosinks
f8cd771fc3695e957a02665e9eb1a1c6fb9b0572 - gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.c
9b7125a2d7ebe2ea647c43d2eb43e8d04cd16c47 - gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.h
5e13200e9cba5f45d74cf6899dd3356d5f5d1c8e - gst-plugins-nv-video-sinks/common/context.h
ad360a668f0f494ebd2bb630c3faaf93078c6e0d - gst-plugins-nv-video-sinks/common/window.c
72f9a4b823c4162c9f22cedb7c1cb1764d06fcb6 - gst-plugins-nv-video-sinks/common/renderer.h
fcb1b73054a1c8ff8ce614878ee46880273656f4 - gst-plugins-nv-video-sinks/common/renderer.c
4f86ed5c7d6dfa6e6e4df4fd2945993655fc3409 - gst-plugins-nv-video-sinks/common/context.c
d48e1dae85e3c6a0ba7623be7ee306b8e1ef6695 - gst-plugins-nv-video-sinks/common/gstnvvideofwd.h
a0debde9b0fd5bc6ac9c5fac7f1a14745a2b0617 - gst-plugins-nv-video-sinks/common/display.c
96b0b4d38692a0aecf70944749684ac938ff192f - gst-plugins-nv-video-sinks/common/display.h
6e77d54ffc5d1a49d5bad768cdf5cfadf458f1f7 - gst-plugins-nv-video-sinks/common/window.h
638b0da4ea65d02818289e89bc1d635ddbcdaec5 - gst-plugins-nv-video-sinks/common/x11/window_x11.h
d692399c6d94dbc7814770b08baf9271ed97f8e0 - gst-plugins-nv-video-sinks/common/x11/display_x11.h
b3f1b67cae0b4643f6a676b362ceaa61abc9c40f - gst-plugins-nv-video-sinks/common/x11/display_x11.c
c98945083e215dff26507c1e10b0ebf62a2c6fb7 - gst-plugins-nv-video-sinks/common/x11/window_x11.c
f528404a796de5a23dab281588feb72f42343e59 - gst-plugins-nv-video-sinks/common/renderer/renderer_gl.h
707a36267f329bb22afdd19b947be5a99478ec7a - gst-plugins-nv-video-sinks/common/renderer/renderer_gl.c
570146fa5ab1969a0283f0f844bbcb90c71f24ed - gst-plugins-nv-video-sinks/common/egl/context_egl.c
536a072a8ef84b3c91307777f88121fb88df2c4f - gst-plugins-nv-video-sinks/common/egl/context_egl.h

Change-Id: I0d186b38613c2e294d1d3b0bb4173113534203e1
This commit is contained in:
svcmobrel-release
2023-03-28 13:43:21 -07:00
parent c64ebd08a3
commit cda1dac573
27 changed files with 6470 additions and 0 deletions

26
commitFile.txt Normal file
View File

@@ -0,0 +1,26 @@
Updating prebuilts and/or headers
2d40637e04ba1be9b48393f71bed54b09590ab0d - nvbufsurface.h
c3f21bd4363243ad1bca5b1d434b5896402eec14 - nvbuf_utils.h
9a172f748a2b8f4d6d15648ea353989ccc7aeba6 - gst-plugins-nv-video-sinks/Makefile
7ef56486c9e6b3e354473a2959d274517dd709da - gst-plugins-nv-video-sinks/gstnvvideosinks.c
9825d8a113dbf7dd16f791ff1ca66f2de3047b22 - gst-plugins-nv-video-sinks/LICENSE.libgstnvvideosinks
f8cd771fc3695e957a02665e9eb1a1c6fb9b0572 - gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.c
9b7125a2d7ebe2ea647c43d2eb43e8d04cd16c47 - gst-plugins-nv-video-sinks/nv3dsink/gstnv3dsink.h
5e13200e9cba5f45d74cf6899dd3356d5f5d1c8e - gst-plugins-nv-video-sinks/common/context.h
ad360a668f0f494ebd2bb630c3faaf93078c6e0d - gst-plugins-nv-video-sinks/common/window.c
72f9a4b823c4162c9f22cedb7c1cb1764d06fcb6 - gst-plugins-nv-video-sinks/common/renderer.h
fcb1b73054a1c8ff8ce614878ee46880273656f4 - gst-plugins-nv-video-sinks/common/renderer.c
4f86ed5c7d6dfa6e6e4df4fd2945993655fc3409 - gst-plugins-nv-video-sinks/common/context.c
d48e1dae85e3c6a0ba7623be7ee306b8e1ef6695 - gst-plugins-nv-video-sinks/common/gstnvvideofwd.h
a0debde9b0fd5bc6ac9c5fac7f1a14745a2b0617 - gst-plugins-nv-video-sinks/common/display.c
96b0b4d38692a0aecf70944749684ac938ff192f - gst-plugins-nv-video-sinks/common/display.h
6e77d54ffc5d1a49d5bad768cdf5cfadf458f1f7 - gst-plugins-nv-video-sinks/common/window.h
638b0da4ea65d02818289e89bc1d635ddbcdaec5 - gst-plugins-nv-video-sinks/common/x11/window_x11.h
d692399c6d94dbc7814770b08baf9271ed97f8e0 - gst-plugins-nv-video-sinks/common/x11/display_x11.h
b3f1b67cae0b4643f6a676b362ceaa61abc9c40f - gst-plugins-nv-video-sinks/common/x11/display_x11.c
c98945083e215dff26507c1e10b0ebf62a2c6fb7 - gst-plugins-nv-video-sinks/common/x11/window_x11.c
f528404a796de5a23dab281588feb72f42343e59 - gst-plugins-nv-video-sinks/common/renderer/renderer_gl.h
707a36267f329bb22afdd19b947be5a99478ec7a - gst-plugins-nv-video-sinks/common/renderer/renderer_gl.c
570146fa5ab1969a0283f0f844bbcb90c71f24ed - gst-plugins-nv-video-sinks/common/egl/context_egl.c
536a072a8ef84b3c91307777f88121fb88df2c4f - gst-plugins-nv-video-sinks/common/egl/context_egl.h

View File

@@ -0,0 +1,23 @@
The software listed below is licensed under the terms of the LGPLv2.1
(see below). To obtain source code, contact oss-requests@nvidia.com.
libgstnvvideosinks (libgstnvvideosinks.so)
------------------------------------
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/

View File

@@ -0,0 +1,78 @@
###############################################################################
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA Corporation is strictly prohibited.
#
###############################################################################
ifneq ($(MAKECMDGOALS),install)
ifeq ($(CUDA_VER),)
$(error "CUDA_VER is not set. Set it by running - "export CUDA_VER=<cuda-version>"")
endif
endif
SO_NAME := libgstnvvideosinks.so
DEST_DIR ?= /usr/lib/aarch64-linux-gnu/gstreamer-1.0
SRCS := common/context.c \
common/display.c \
common/egl/context_egl.c \
common/renderer.c \
common/renderer/renderer_gl.c \
common/window.c \
common/x11/display_x11.c \
common/x11/window_x11.c \
gstnvvideosinks.c \
nv3dsink/gstnv3dsink.c
INCLUDES += -I./common \
-I./common/egl \
-I./common/renderer \
-I./common/x11 \
-I/usr/local/include/gstreamer-1.0 \
-I/usr/local/cuda-$(CUDA_VER)/targets/aarch64-linux/include/ \
-I../
PKGS := glib-2.0 \
gstreamer-1.0 \
gstreamer-base-1.0 \
gstreamer-video-1.0
OBJS := $(SRCS:.c=.o)
CFLAGS := -fPIC \
-DNV_VIDEO_SINKS_HAS_EGL \
-DNV_VIDEO_SINKS_HAS_GL \
-DNV_VIDEO_SINKS_HAS_NV3DSINK \
-DNV_VIDEO_SINKS_HAS_X11
CFLAGS += `pkg-config --cflags $(PKGS)`
LDFLAGS = -Wl,--no-undefined -L/usr/lib/aarch64-linux-gnu/tegra -L/usr/local/cuda-$(CUDA_VER)/targets/aarch64-linux/lib/
LIBS = -lnvbufsurface -lGLESv2 -lEGL -lX11 -lm -lcuda -lcudart
LIBS += `pkg-config --libs $(PKGS)`
all: $(SO_NAME)
%.o: %.c
$(CC) -c $< $(CFLAGS) $(INCLUDES) -o $@
$(SO_NAME): $(OBJS)
$(CC) -shared -o $(SO_NAME) $(OBJS) $(LIBS) $(LDFLAGS)
.PHONY: install
$(DEST_DIR):
mkdir -p $(DEST_DIR)
install: $(SO_NAME) | $(DEST_DIR)
cp -vp $(SO_NAME) $(DEST_DIR)
.PHONY: clean
clean:
rm -rf $(OBJS) $(SO_NAME)

View File

@@ -0,0 +1,38 @@
###############################################################################
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA Corporation is strictly prohibited.
#
###############################################################################
Steps to compile the "gst-plugins-nv-video-sinks" sources:
1) Install gstreamer related packages using the command:
sudo apt-get install gstreamer1.0-tools gstreamer1.0-alsa \
gstreamer1.0-plugins-base gstreamer1.0-plugins-good \
gstreamer1.0-plugins-bad gstreamer1.0-plugins-ugly \
gstreamer1.0-libav libgstreamer1.0-dev \
libgstreamer-plugins-base1.0-dev libegl1-mesa-dev
2) Install CUDA Runtime 10.0+
3) Extract the package "libgstnvvideosinks_src.tbz2" as follow:
tar xvjf libgstnvvideosinks_src.tbz2`
4) cd "gst-plugins-nv-video-sinks"
5) Export the appropriate CUDA_VER using - "export CUDA_VER=<cuda-version>"
5) run "make" to create "libgstnvvideosinks.so"
6) run "sudo make install" to install "libgstnvvideosinks.so" in
"/usr/lib/aarch64-linux-gnu/gstreamer-1.0".
7) run "make install DEST_DIR=<location>" to install at different <location>.

View File

@@ -0,0 +1,470 @@
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include <gst/base/gstdataqueue.h>
#include "context.h"
#include "window.h"
#if NV_VIDEO_SINKS_HAS_EGL
#include "context_egl.h"
#endif
#define GST_CAT_DEFAULT gst_debug_nv_video_context
GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
struct _GstNvVideoContextPrivate
{
GstDataQueue *queue;
GThread *render_thread;
gboolean render_thread_active;
gboolean eos_handled;
GstFlowReturn last_ret;
GMutex render_lock;
GCond create_cond;
GCond quit_cond;
GCond eos_cond;
};
G_DEFINE_ABSTRACT_TYPE_WITH_CODE (GstNvVideoContext, gst_nv_video_context,
GST_TYPE_OBJECT, G_ADD_PRIVATE(GstNvVideoContext));
GstNvVideoContextType
gst_nv_video_context_get_handle_type (GstNvVideoContext * context)
{
g_return_val_if_fail (GST_IS_NV_VIDEO_CONTEXT (context),
GST_NV_VIDEO_CONTEXT_TYPE_NONE);
return context->type;
}
static gpointer
gst_nv_video_context_render_thread_func (GstNvVideoContext * context)
{
GstNvVideoContextClass *context_class;
GstDataQueueItem *item = NULL;
GstFlowReturn ret = GST_FLOW_OK;
context_class = GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
GST_DEBUG_OBJECT (context, "render thread started");
context_class->setup (context);
cudaError_t CUerr = cudaSuccess;
GST_LOG_OBJECT (context, "SETTING CUDA DEVICE = %d in func=%s\n", context->gpu_id, __func__);
CUerr = cudaSetDevice(context->gpu_id);
if (CUerr != cudaSuccess) {
GST_LOG_OBJECT (context,"\n *** Unable to set device in %s Line %d\n", __func__, __LINE__);
return NULL;
}
g_mutex_lock (&context->priv->render_lock);
context->priv->render_thread_active = TRUE;
context->priv->last_ret = ret;
g_cond_signal (&context->priv->create_cond);
g_mutex_unlock (&context->priv->render_lock);
while (gst_data_queue_pop (context->priv->queue, &item)) {
GstMiniObject *object = item->object;
GstBuffer *buf = NULL;
GST_TRACE_OBJECT (context,
"render thread: got data queue item %" GST_PTR_FORMAT, object);
ret = GST_FLOW_ERROR;
if (GST_IS_BUFFER (object)) {
buf = GST_BUFFER_CAST (item->object);
if (context_class->show_frame (context, buf)) {
ret = GST_FLOW_OK;
}
} else if (!object) {
GST_TRACE_OBJECT (context, "render thread: handle EOS");
context_class->handle_eos (context);
g_mutex_lock (&context->priv->render_lock);
g_cond_signal (&context->priv->eos_cond);
context->priv->eos_handled = TRUE;
g_mutex_unlock (&context->priv->render_lock);
GST_TRACE_OBJECT (context, "render thread: handled EOS");
} else {
g_assert_not_reached ();
}
item->destroy (item);
g_mutex_lock (&context->priv->render_lock);
context->priv->last_ret = ret;
g_mutex_unlock (&context->priv->render_lock);
if (ret != GST_FLOW_OK) {
break;
}
GST_TRACE_OBJECT (context, "render thread: handled");
}
GST_DEBUG_OBJECT (context, "tearing down render thread");
context_class->cleanup (context);
g_mutex_lock (&context->priv->render_lock);
g_cond_signal (&context->priv->quit_cond);
context->priv->render_thread_active = FALSE;
g_mutex_unlock (&context->priv->render_lock);
GST_DEBUG_OBJECT (context, "render thread exit");
return NULL;
}
static void
gst_nv_video_context_queue_free_item (GstDataQueueItem * item)
{
GstDataQueueItem *data = item;
if (data->object)
gst_mini_object_unref (data->object);
g_slice_free (GstDataQueueItem, data);
}
static gboolean
gst_nv_video_context_render_thread_show_frame (GstNvVideoContext * context,
GstBuffer * buf)
{
gboolean last_ret;
GstDataQueueItem *item;
GstMiniObject *obj = GST_MINI_OBJECT_CAST (buf);
g_assert (obj);
g_mutex_lock (&context->priv->render_lock);
last_ret = context->priv->last_ret;
g_mutex_unlock (&context->priv->render_lock);
if (last_ret != GST_FLOW_OK) {
return FALSE;
}
item = g_slice_new (GstDataQueueItem);
item->destroy = (GDestroyNotify) gst_nv_video_context_queue_free_item;
item->object = gst_mini_object_ref (obj);
item->size = 0;
item->duration = GST_CLOCK_TIME_NONE;
item->visible = TRUE;
if (!gst_data_queue_push (context->priv->queue, item)) {
item->destroy (item);
return FALSE;
}
return TRUE;
}
static gboolean
gst_nv_video_context_queue_check_full (GstDataQueue * queue, guint visible,
guint bytes, guint64 time, gpointer checkdata)
{
return FALSE;
}
static void
gst_nv_video_context_finalize (GObject * object)
{
GstNvVideoContext *context = GST_NV_VIDEO_CONTEXT (object);
GST_DEBUG_OBJECT (context, "finalize begin");
if (context->priv->queue) {
g_object_unref (context->priv->queue);
context->priv->queue = NULL;
}
if (context->priv->render_thread) {
g_thread_unref (context->priv->render_thread);
context->priv->render_thread = NULL;
}
if (context->window) {
gst_object_unref (context->window);
context->window = NULL;
}
if (context->display) {
gst_object_unref (context->display);
context->display = NULL;
}
g_mutex_clear (&context->priv->render_lock);
g_cond_clear (&context->priv->create_cond);
g_cond_clear (&context->priv->quit_cond);
g_cond_clear (&context->priv->eos_cond);
GST_DEBUG_OBJECT (context, "finalize done");
G_OBJECT_CLASS (gst_nv_video_context_parent_class)->finalize (object);
}
static void
gst_nv_video_context_init (GstNvVideoContext * context)
{
GstNvVideoContext *self = GST_NV_VIDEO_CONTEXT (context);
context->priv = (GstNvVideoContextPrivate *)gst_nv_video_context_get_instance_private (self);
g_mutex_init (&context->priv->render_lock);
g_cond_init (&context->priv->create_cond);
g_cond_init (&context->priv->quit_cond);
g_cond_init (&context->priv->eos_cond);
context->priv->queue = NULL;
context->priv->render_thread = NULL;
context->priv->render_thread_active = FALSE;
context->priv->eos_handled = FALSE;
context->using_NVMM = 0;
context->cuContext = NULL;
context->cuResource[0] = NULL;
context->cuResource[1] = NULL;
context->cuResource[2] = NULL;
context->gpu_id = 0;
GST_DEBUG_OBJECT (context, "init done");
}
static void
gst_nv_video_context_class_init (GstNvVideoContextClass * klass)
{
G_OBJECT_CLASS (klass)->finalize = gst_nv_video_context_finalize;
}
GstNvVideoContext *
gst_nv_video_context_new (GstNvVideoDisplay * display)
{
GstNvVideoContext *context = NULL;
static volatile gsize debug_init = 0;
const gchar *context_name = NULL;
if (g_once_init_enter (&debug_init)) {
GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_context, "nvvideocontext", 0,
"nvvideocontext");
g_once_init_leave (&debug_init, 1);
}
context_name = g_getenv ("GST_NV_VIDEO_CONTEXT");
#if NV_VIDEO_SINKS_HAS_EGL
if (!context && (!context_name || g_strstr_len (context_name, 3, "egl"))) {
context = GST_NV_VIDEO_CONTEXT (gst_nv_video_context_egl_new (display));
}
#endif
if (!context) {
GST_ERROR ("couldn't create context. GST_NV_VIDEO_CONTEXT = %s",
context_name ? context_name : NULL);
return NULL;
}
context->display = gst_object_ref (display);
GST_DEBUG_OBJECT (context, "created context for display %" GST_PTR_FORMAT,
display);
return context;
}
gboolean
gst_nv_video_context_show_frame (GstNvVideoContext * context, GstBuffer * buf)
{
g_mutex_lock (&context->priv->render_lock);
if (context->priv->render_thread_active) {
g_mutex_unlock (&context->priv->render_lock);
return gst_nv_video_context_render_thread_show_frame (context, buf);
}
g_mutex_unlock (&context->priv->render_lock);
return FALSE;
}
void
gst_nv_video_context_handle_tearing (GstNvVideoContext * context)
{
GstNvVideoContextClass *context_class =
GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
context_class->handle_tearing (context);
return;
}
void
gst_nv_video_context_handle_drc (GstNvVideoContext * context)
{
GstNvVideoContextClass *context_class =
GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
context_class->handle_drc (context);
return;
}
void
gst_nv_video_context_handle_eos (GstNvVideoContext * context)
{
GstNvVideoContextClass *context_class =
GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
GstDataQueueItem *item;
g_mutex_lock (&context->priv->render_lock);
if (!context->priv->render_thread_active) {
g_mutex_unlock (&context->priv->render_lock);
context_class->handle_eos (context);
return;
}
// Push NULL object in queue to indicate EOS and wait till it is handled.
item = g_slice_new (GstDataQueueItem);
item->destroy = (GDestroyNotify) gst_nv_video_context_queue_free_item;
item->object = NULL;
item->size = 0;
item->duration = GST_CLOCK_TIME_NONE;
item->visible = TRUE;
if (!gst_data_queue_push (context->priv->queue, item)) {
GST_ERROR_OBJECT (context, "faild to send EOS to render thread");
item->destroy (item);
g_mutex_unlock (&context->priv->render_lock);
return;
}
GST_TRACE_OBJECT (context, "wait for render thread to handle EOS");
while (context->priv->render_thread_active && !context->priv->eos_handled) {
gint64 end = g_get_monotonic_time () + G_TIME_SPAN_SECOND;
g_cond_wait_until (&context->priv->eos_cond, &context->priv->render_lock, end);
}
GST_TRACE_OBJECT (context, "wait for render thread to handle EOS is done");
context->priv->eos_handled = FALSE;
g_mutex_unlock (&context->priv->render_lock);
}
GstCaps *
gst_nv_video_context_get_caps (GstNvVideoContext * context)
{
GstNvVideoContextClass *context_class;
if (!context) {
return NULL;
}
context_class = GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
return context_class->get_caps (context);
}
gboolean
gst_nv_video_context_set_window (GstNvVideoContext * context,
GstNvVideoWindow * window)
{
if (context->window) {
gst_object_unref (context->window);
}
if (window) {
// Before the object's GObjectClass.dispose method is called, every
// GWeakRef associated with becomes empty.
g_weak_ref_set (&window->context, context);
}
context->window = window ? gst_object_ref (window) : NULL;
return TRUE;
}
void
gst_nv_video_context_destroy_render_thread (GstNvVideoContext * context)
{
if (context->priv->queue) {
gst_data_queue_set_flushing (context->priv->queue, TRUE);
gst_data_queue_flush (context->priv->queue);
}
g_mutex_lock (&context->priv->render_lock);
if (context->priv->render_thread_active) {
GST_DEBUG_OBJECT (context, "destroying render thread");
while (context->priv->render_thread_active) {
g_cond_wait (&context->priv->quit_cond, &context->priv->render_lock);
}
g_thread_join (context->priv->render_thread);
GST_DEBUG_OBJECT (context, "render thread destroyed");
}
g_mutex_unlock (&context->priv->render_lock);
}
gboolean
gst_nv_video_context_create_render_thread (GstNvVideoContext * context)
{
g_mutex_lock (&context->priv->render_lock);
if (!context->priv->render_thread) {
g_assert (context->priv->queue == NULL);
context->priv->queue =
gst_data_queue_new (gst_nv_video_context_queue_check_full, NULL, NULL,
NULL);
if (!context->priv->queue) {
g_mutex_unlock (&context->priv->render_lock);
return FALSE;
}
gst_data_queue_set_flushing (context->priv->queue, FALSE);
gst_data_queue_flush (context->priv->queue);
context->priv->render_thread =
g_thread_new ("NvVideoRenderThread",
(GThreadFunc) gst_nv_video_context_render_thread_func, context);
while (!context->priv->render_thread_active) {
g_cond_wait (&context->priv->create_cond, &context->priv->render_lock);
}
if (context->priv->last_ret != GST_FLOW_OK) {
g_object_unref (context->priv->queue);
context->priv->queue = NULL;
g_mutex_unlock (&context->priv->render_lock);
return FALSE;
}
GST_INFO_OBJECT (context, "render thread created");
}
g_mutex_unlock (&context->priv->render_lock);
return TRUE;
}
gboolean
gst_nv_video_context_create (GstNvVideoContext * context)
{
GstNvVideoContextClass *context_class;
context_class = GST_NV_VIDEO_CONTEXT_GET_CLASS (context);
return context_class->create (context);
}

View File

@@ -0,0 +1,122 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_CONTEXT_H__
#define __GST_NV_VIDEO_CONTEXT_H__
#include <gst/gst.h>
#include <gst/video/gstvideosink.h>
#include <gst/video/video.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include "gstnvvideofwd.h"
#include <cuda.h>
#include <cudaGL.h>
#include <cuda_runtime.h>
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_CONTEXT \
(gst_nv_video_context_get_type())
#define GST_NV_VIDEO_CONTEXT(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_CONTEXT, GstNvVideoContext))
#define GST_NV_VIDEO_CONTEXT_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_CONTEXT, GstNvVideoContextClass))
#define GST_IS_NV_VIDEO_CONTEXT(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_CONTEXT))
#define GST_IS_NV_VIDEO_CONTEXT_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_CONTEXT))
#define GST_NV_VIDEO_CONTEXT_GET_CLASS(o) \
(G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_CONTEXT, GstNvVideoContextClass))
typedef enum
{
GST_NV_VIDEO_CONTEXT_TYPE_NONE = 0,
GST_NV_VIDEO_CONTEXT_TYPE_EGL = (1 << 0),
GST_NV_VIDEO_CONTEXT_TYPE_ANY = G_MAXUINT32
} GstNvVideoContextType;
struct _GstNvVideoContextClass
{
GstObjectClass parent_class;
gboolean (*create) (GstNvVideoContext * context);
gboolean (*setup) (GstNvVideoContext * context);
void (*cleanup) (GstNvVideoContext * context);
GstCaps *(*get_caps) (GstNvVideoContext * context);
gboolean (*show_frame) (GstNvVideoContext * context, GstBuffer * buf);
void (*handle_eos) (GstNvVideoContext * context);
void (*handle_drc) (GstNvVideoContext * context);
void (*handle_tearing) (GstNvVideoContext * context);
};
struct _GstNvVideoContext
{
GstObject parent;
GstNvVideoDisplay *display;
GstNvVideoWindow *window;
GstNvVideoContextType type;
GstNvVideoContextPrivate *priv;
guint using_NVMM;
GstVideoInfo configured_info;
gboolean is_cuda_init;
CUcontext cuContext;
CUgraphicsResource cuResource[3];
unsigned int gpu_id;
};
GST_EXPORT
GstNvVideoContext * gst_nv_video_context_new (GstNvVideoDisplay * display);
GST_EXPORT
gboolean gst_nv_video_context_create (GstNvVideoContext * context);
GST_EXPORT
GstCaps * gst_nv_video_context_get_caps (GstNvVideoContext * context);
GST_EXPORT
gboolean gst_nv_video_context_set_window (GstNvVideoContext * context, GstNvVideoWindow * window);
GST_EXPORT
gboolean gst_nv_video_context_show_frame (GstNvVideoContext * context, GstBuffer * buf);
GST_EXPORT
void gst_nv_video_context_handle_eos (GstNvVideoContext * context);
GST_EXPORT
void gst_nv_video_context_handle_drc (GstNvVideoContext * context);
GST_EXPORT
void gst_nv_video_context_handle_tearing (GstNvVideoContext * context);
GST_EXPORT
gboolean gst_nv_video_context_create_render_thread (GstNvVideoContext * context);
GST_EXPORT
void gst_nv_video_context_destroy_render_thread (GstNvVideoContext * context);
GST_EXPORT
GstNvVideoContextType gst_nv_video_context_get_handle_type (GstNvVideoContext * context);
GType gst_nv_video_context_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_CONTEXT_H__ */

View File

@@ -0,0 +1,111 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "display.h"
#include "context.h"
#include "window.h"
#if NV_VIDEO_SINKS_HAS_X11
#include "display_x11.h"
#endif
#define GST_CAT_DEFAULT gst_debug_nv_video_display
GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
G_DEFINE_ABSTRACT_TYPE (GstNvVideoDisplay, gst_nv_video_display,
GST_TYPE_OBJECT);
GstNvVideoDisplayType
gst_nv_video_display_get_handle_type (GstNvVideoDisplay * display)
{
g_return_val_if_fail (GST_IS_NV_VIDEO_DISPLAY (display),
GST_NV_VIDEO_DISPLAY_TYPE_NONE);
return display->type;
}
static void
gst_nv_video_display_init (GstNvVideoDisplay * display)
{
}
gboolean
gst_nv_video_display_create_context (GstNvVideoDisplay * display,
GstNvVideoContext ** ptr_context)
{
GstNvVideoContext *context = NULL;
g_return_val_if_fail (display != NULL, FALSE);
g_return_val_if_fail (ptr_context != NULL, FALSE);
context = gst_nv_video_context_new (display);
if (!context) {
GST_ERROR ("context creation failed");
return FALSE;
}
if (!gst_nv_video_context_create (context)) {
return FALSE;
}
*ptr_context = context;
GST_DEBUG_OBJECT (display, "created context %" GST_PTR_FORMAT, context);
return TRUE;
}
GstNvVideoWindow *
gst_nv_video_display_create_window (GstNvVideoDisplay * display)
{
return gst_nv_video_window_new (display);
}
static void
gst_nv_video_display_class_init (GstNvVideoDisplayClass * klass)
{
}
gboolean
gst_nv_video_display_new (GstNvVideoDisplay ** display)
{
static volatile gsize debug_init = 0;
const gchar *winsys_name = NULL;
if (g_once_init_enter (&debug_init)) {
GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_display, "nvvideodisplay", 0,
"nvvideodisplay");
g_once_init_leave (&debug_init, 1);
}
winsys_name = g_getenv ("GST_NV_VIDEO_WINSYS");
#if NV_VIDEO_SINKS_HAS_X11
if (!*display && (!winsys_name || g_strstr_len (winsys_name, 3, "x11"))) {
*display = GST_NV_VIDEO_DISPLAY (gst_nv_video_display_x11_new (NULL));
}
#endif
if (!*display) {
GST_ERROR ("couldn't create display. GST_NV_VIDEO_WINSYS = %s",
winsys_name ? winsys_name : NULL);
return FALSE;
}
return TRUE;
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_DISPLAY_H__
#define __GST_NV_VIDEO_DISPLAY_H__
#include <gst/gst.h>
#include <gst/video/gstvideosink.h>
#include <gst/video/video.h>
#include "gstnvvideofwd.h"
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_DISPLAY \
(gst_nv_video_display_get_type())
#define GST_NV_VIDEO_DISPLAY(obj)\
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_DISPLAY, GstNvVideoDisplay))
#define GST_NV_VIDEO_DISPLAY_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_DISPLAY, GstNvVideoDisplayClass))
#define GST_IS_NV_VIDEO_DISPLAY(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_DISPLAY))
#define GST_IS_NV_VIDEO_DISPLAY_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_DISPLAY))
#define GST_NV_VIDEO_DISPLAY_CAST(obj) \
((GstNvVideoDisplay*)(obj))
#define GST_NV_VIDEO_DISPLAY_GET_CLASS(o) \
(G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_DISPLAY, GstNvVideoDisplayClass))
struct _GstNvVideoDisplayClass
{
GstObjectClass parent_class;
guintptr (*get_handle) (GstNvVideoDisplay * display);
};
typedef enum
{
GST_NV_VIDEO_DISPLAY_TYPE_NONE = 0,
GST_NV_VIDEO_DISPLAY_TYPE_X11 = (1 << 0),
GST_NV_VIDEO_DISPLAY_TYPE_ANY = G_MAXUINT32
} GstNvVideoDisplayType;
struct _GstNvVideoDisplay
{
GstObject parent;
GstNvVideoDisplayType type;
};
GST_EXPORT
gboolean gst_nv_video_display_new (GstNvVideoDisplay ** display);
GST_EXPORT
gboolean gst_nv_video_display_create_context (GstNvVideoDisplay * display, GstNvVideoContext ** ptr_context);
GST_EXPORT
GstNvVideoDisplayType gst_nv_video_display_get_handle_type (GstNvVideoDisplay * display);
GST_EXPORT
GstNvVideoWindow *gst_nv_video_display_create_window (GstNvVideoDisplay * display);
GType gst_nv_video_display_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_DISPLAY_H__ */

View File

@@ -0,0 +1,486 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "context_egl.h"
#include "display.h"
#include "display_x11.h"
#include "window.h"
#include "nvbufsurface.h"
#include <EGL/egl.h>
G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_context;
#define GST_CAT_DEFAULT gst_debug_nv_video_context
G_DEFINE_TYPE (GstNvVideoContextEgl, gst_nv_video_context_egl,
GST_TYPE_NV_VIDEO_CONTEXT);
static GstCaps *
gst_nv_video_context_egl_new_template_caps (GstVideoFormat format)
{
return gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, gst_video_format_to_string (format),
"width", GST_TYPE_INT_RANGE, 1, G_MAXINT,
"height", GST_TYPE_INT_RANGE, 1, G_MAXINT,
"framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
}
static void
log_egl_error (GstNvVideoContext * context, const char *name)
{
GST_ERROR_OBJECT (context, "egl error: %s returned %x", name, eglGetError ());
}
static gboolean
gst_nv_video_context_egl_is_surface_changed (GstNvVideoContextEgl * context_egl)
{
gint w, h;
eglQuerySurface (context_egl->display, context_egl->surface, EGL_WIDTH, &w);
eglQuerySurface (context_egl->display, context_egl->surface, EGL_HEIGHT, &h);
if (context_egl->surface_width != w || context_egl->surface_height != h) {
context_egl->surface_width = w;
context_egl->surface_height = h;
return TRUE;
}
return FALSE;
}
static gboolean
gst_nv_video_context_egl_show_frame (GstNvVideoContext * context,
GstBuffer * buf)
{
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
EGLImageKHR image = EGL_NO_IMAGE_KHR;
GstMemory *mem;
NvBufSurface *in_surface = NULL;
gboolean is_cuda_mem = TRUE;
if (!context_egl->surface) {
guintptr handle = gst_nv_video_window_get_handle (context->window);
context_egl->surface =
eglCreateWindowSurface (context_egl->display, context_egl->config,
(EGLNativeWindowType) handle, NULL);
if (context_egl->surface == EGL_NO_SURFACE) {
log_egl_error (context, "eglCreateWindowSurface");
return FALSE;
}
if (!eglMakeCurrent (context_egl->display, context_egl->surface,
context_egl->surface, context_egl->context)) {
log_egl_error (context, "eglMakeCurrent");
return FALSE;
}
GST_DEBUG_OBJECT (context, "egl surface %p created", context_egl->surface);
}
if (!context_egl->renderer) {
context_egl->renderer = gst_nv_video_renderer_new (context, "gl");
if (!context_egl->renderer) {
GST_ERROR_OBJECT (context, "renderer creation failed");
return FALSE;
}
if (!gst_nv_video_renderer_setup (context_egl->renderer)) {
GST_ERROR_OBJECT (context, "renderer setup failed");
return FALSE;
}
}
if (context->using_NVMM) {
if (!context->is_cuda_init) {
if (!gst_nv_video_renderer_cuda_init (context, context_egl->renderer)) {
GST_ERROR_OBJECT (context, "cuda init failed");
return FALSE;
}
}
}
if (gst_nv_video_context_egl_is_surface_changed (context_egl)) {
GST_DEBUG_OBJECT (context, "surface dimensions changed to %dx%d",
context_egl->surface_width, context_egl->surface_height);
gst_nv_video_renderer_update_viewport (context_egl->renderer,
context_egl->surface_width, context_egl->surface_height);
}
if (gst_buffer_n_memory (buf) >= 1 && (mem = gst_buffer_peek_memory (buf, 0))) {
//Software buffer handling
if (!context->using_NVMM) {
if (!gst_nv_video_renderer_fill_texture(context, context_egl->renderer, buf)) {
GST_ERROR_OBJECT (context, "fill_texture failed");
return FALSE;
}
if (!gst_nv_video_renderer_draw_2D_Texture (context_egl->renderer)) {
GST_ERROR_OBJECT (context, "draw 2D Texture failed");
return FALSE;
}
}
else {
// NvBufSurface support (NVRM and CUDA)
GstMapInfo map = { NULL, (GstMapFlags) 0, NULL, 0, 0, };
mem = gst_buffer_peek_memory (buf, 0);
gst_memory_map (mem, &map, GST_MAP_READ);
/* Types of Buffers handled -
* NvBufSurface
* - NVMM buffer type
* - Cuda buffer type
*/
/* NvBufSurface type are handled here */
in_surface = (NvBufSurface*) map.data;
NvBufSurfaceMemType memType = in_surface->memType;
if (memType == NVBUF_MEM_DEFAULT) {
#ifdef IS_DESKTOP
memType = NVBUF_MEM_CUDA_DEVICE;
#else
memType = NVBUF_MEM_SURFACE_ARRAY;
#endif
}
if (memType == NVBUF_MEM_SURFACE_ARRAY || memType == NVBUF_MEM_HANDLE) {
is_cuda_mem = FALSE;
}
if (is_cuda_mem == FALSE) {
/* NvBufSurface - NVMM buffer type are handled here */
if (in_surface->batchSize != 1) {
GST_ERROR_OBJECT (context,"ERROR: Batch size not 1\n");
return FALSE;
}
if (NvBufSurfaceMapEglImage (in_surface, 0) !=0 ) {
GST_ERROR_OBJECT (context,"ERROR: NvBufSurfaceMapEglImage\n");
return FALSE;
}
image = in_surface->surfaceList[0].mappedAddr.eglImage;
gst_nv_video_renderer_draw_eglimage (context_egl->renderer, image);
}
else {
/* NvBufSurface - Cuda buffer type are handled here */
if (!gst_nv_video_renderer_cuda_buffer_copy (context, context_egl->renderer, buf))
{
GST_ERROR_OBJECT (context,"cuda buffer copy failed\n");
return FALSE;
}
if (!gst_nv_video_renderer_draw_2D_Texture (context_egl->renderer)) {
GST_ERROR_OBJECT (context,"draw 2D texture failed");
return FALSE;
}
}
gst_memory_unmap (mem, &map);
}
}
if (!eglSwapBuffers (context_egl->display, context_egl->surface)) {
log_egl_error (context, "eglSwapBuffers");
}
if (image != EGL_NO_IMAGE_KHR) {
NvBufSurfaceUnMapEglImage (in_surface, 0);
}
GST_TRACE_OBJECT (context, "release %p hold %p", context_egl->last_buf, buf);
// TODO: We hold buffer used in current drawing till next swap buffer
// is completed so that decoder won't write it till GL has finished using it.
// When Triple buffering in X is enabled, this can cause tearing as completion
// of next swap buffer won't guarantee GL has finished with the buffer used in
// current swap buffer. This issue will be addresed when we transfer SyncFds
// from decoder <-> sink.
if (!context_egl->is_drc_on) {
gst_buffer_replace (&context_egl->last_buf, buf);
}
return TRUE;
}
static void
gst_nv_video_context_egl_handle_tearing (GstNvVideoContext * context)
{
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
context_egl->is_drc_on = 0;
return;
}
static void
gst_nv_video_context_egl_handle_drc (GstNvVideoContext * context)
{
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
GST_TRACE_OBJECT (context, "release last frame when resolution changes %p", context_egl->last_buf);
if (context_egl->last_buf)
context_egl->is_drc_on = 1;
gst_buffer_replace (&context_egl->last_buf, NULL);
}
static void
gst_nv_video_context_egl_handle_eos (GstNvVideoContext * context)
{
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
GST_TRACE_OBJECT (context, "release last frame %p", context_egl->last_buf);
gst_buffer_replace (&context_egl->last_buf, NULL);
}
static gboolean
gst_nv_video_context_egl_setup (GstNvVideoContext * context)
{
GstNvVideoDisplayX11 *display_x11 = (GstNvVideoDisplayX11 *) context->display;
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
EGLint major, minor;
EGLint num_configs;
EGLint attr[] = {
EGL_BUFFER_SIZE, 24,
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_NONE
};
EGLint attribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
GST_DEBUG_OBJECT (context, "EGL context setup");
context_egl->display =
eglGetDisplay ((EGLNativeDisplayType) display_x11->dpy);
if (!eglInitialize (context_egl->display, &major, &minor)) {
log_egl_error (context, "eglInitialize");
return FALSE;
}
GST_INFO_OBJECT (context, "egl version: %d.%d", major, minor);
eglBindAPI (EGL_OPENGL_ES_API);
if (!eglChooseConfig (context_egl->display, attr, &context_egl->config, 1,
&num_configs)) {
log_egl_error (context, "eglChooseConfig");
}
context_egl->context =
eglCreateContext (context_egl->display, context_egl->config,
EGL_NO_CONTEXT, attribs);
if (context_egl->context == EGL_NO_CONTEXT) {
log_egl_error (context, "eglChooseConfig");
return FALSE;
}
GST_DEBUG_OBJECT (context, "egl context %p created", context_egl->context);
return TRUE;
}
static void
gst_nv_video_context_egl_cleanup (GstNvVideoContext * context)
{
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
GST_DEBUG_OBJECT (context, "egl cleanup display=%p surface=%p context=%p",
context_egl->display, context_egl->surface, context_egl->context);
if (context_egl->renderer) {
if (context->using_NVMM) {
gst_nv_video_renderer_cuda_cleanup (context, context_egl->renderer);
}
gst_nv_video_renderer_cleanup (context_egl->renderer);
gst_object_unref (context_egl->renderer);
context_egl->renderer = NULL;
}
if (!eglMakeCurrent (context_egl->display, EGL_NO_SURFACE, EGL_NO_SURFACE,
EGL_NO_CONTEXT)) {
log_egl_error (context, "eglMakeCurrent");
}
if (context_egl->surface) {
eglDestroySurface (context_egl->display, context_egl->surface);
context_egl->surface = NULL;
}
if (context_egl->context) {
eglDestroyContext (context_egl->display, context_egl->context);
context_egl->context = NULL;
}
eglTerminate (context_egl->display);
context_egl->display = NULL;
GST_DEBUG_OBJECT (context, "egl cleanup done");
return;
}
static GstCaps *
gst_nv_video_context_egl_getcaps (GstNvVideoContext * context)
{
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
GST_LOG_OBJECT (context, "context add_caps %" GST_PTR_FORMAT,
context_egl->caps);
return gst_caps_copy (context_egl->caps);
}
static gboolean
gst_nv_video_context_egl_create (GstNvVideoContext * context)
{
return gst_nv_video_context_create_render_thread (context);
}
static void
gst_nv_video_context_egl_finalize (GObject * object)
{
GstNvVideoContext *context = GST_NV_VIDEO_CONTEXT (object);
GstNvVideoContextEgl *context_egl = GST_NV_VIDEO_CONTEXT_EGL (context);
GST_DEBUG_OBJECT (context, "finalize begin");
gst_nv_video_context_destroy_render_thread (context);
if (context_egl->caps) {
gst_caps_unref (context_egl->caps);
}
G_OBJECT_CLASS (gst_nv_video_context_egl_parent_class)->finalize (object);
GST_DEBUG_OBJECT (context, "finalize end");
}
static void
gst_nv_video_context_egl_class_init (GstNvVideoContextEglClass * klass)
{
GstNvVideoContextClass *context_class = (GstNvVideoContextClass *) klass;
context_class->create = GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_create);
context_class->setup = GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_setup);
context_class->get_caps =
GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_getcaps);
context_class->show_frame =
GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_show_frame);
context_class->handle_eos =
GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_handle_eos);
context_class->handle_drc =
GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_handle_drc);
context_class->handle_tearing =
GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_handle_tearing);
context_class->cleanup = GST_DEBUG_FUNCPTR (gst_nv_video_context_egl_cleanup);
G_OBJECT_CLASS (klass)->finalize = gst_nv_video_context_egl_finalize;
}
static void
gst_nv_video_context_egl_init (GstNvVideoContextEgl * context_egl)
{
GstNvVideoContext *context = (GstNvVideoContext *) context_egl;
context->type = GST_NV_VIDEO_CONTEXT_TYPE_EGL;
context_egl->context = NULL;
context_egl->display = NULL;
context_egl->surface = NULL;
context_egl->config = NULL;
context_egl->surface_width = 0;
context_egl->surface_height = 0;
context_egl->last_buf = NULL;
context_egl->is_drc_on = 0;
}
GstNvVideoContextEgl *
gst_nv_video_context_egl_new (GstNvVideoDisplay * display)
{
GstNvVideoContextEgl *ret;
GstCaps *caps = NULL;
guint i, n;
// for now we need x11 display for EGL context.
if ((gst_nv_video_display_get_handle_type (display) &
GST_NV_VIDEO_DISPLAY_TYPE_X11)
== 0) {
return NULL;
}
ret = g_object_new (GST_TYPE_NV_VIDEO_CONTEXT_EGL, NULL);
gst_object_ref_sink (ret);
// TODO: query from egl
caps = gst_caps_new_empty ();
// Software buffer caps
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGBA));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_BGRA));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_ARGB));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_ABGR));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGBx));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_BGRx));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_xRGB));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_xBGR));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_AYUV));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_Y444));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGB));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_BGR));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_I420));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_YV12));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV12));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV21));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_Y42B));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_Y41B));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGB16));
n = gst_caps_get_size(caps);
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV12));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_NV21));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_I420));
gst_caps_append (caps,
gst_nv_video_context_egl_new_template_caps (GST_VIDEO_FORMAT_RGBA));
for (i = n; i < n+4; i++) {
GstCapsFeatures *features = gst_caps_features_new ("memory:NVMM", NULL);
gst_caps_set_features (caps, i, features);
}
gst_caps_replace (&ret->caps, caps);
gst_caps_unref (caps);
return ret;
}

View File

@@ -0,0 +1,75 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_CONTEXT_EGL_H__
#define __GST_NV_VIDEO_CONTEXT_EGL_H__
#include "context.h"
#include "renderer.h"
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_CONTEXT_EGL \
(gst_nv_video_context_egl_get_type())
#define GST_NV_VIDEO_CONTEXT_EGL(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_CONTEXT_EGL, GstNvVideoContextEgl))
#define GST_NV_VIDEO_CONTEXT_EGL_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_CONTEXT_EGL, GstNvVideoContextEglClass))
#define GST_IS_NV_VIDEO_CONTEXT_EGL(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_CONTEXT_EGL))
#define GST_IS_NV_VIDEO_CONTEXT_EGL_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_CONTEXT_EGL))
#define GST_NV_VIDEO_CONTEXT_EGL_CAST(obj) \
((GstNvVideoContextEgl*)(obj))
typedef struct _GstNvVideoContextEgl GstNvVideoContextEgl;
typedef struct _GstNvVideoContextEglClass GstNvVideoContextEglClass;
struct _GstNvVideoContextEgl
{
GstNvVideoContext parent;
gpointer context;
gpointer display;
gpointer surface;
gpointer config;
gint surface_width;
gint surface_height;
GstNvVideoRenderer *renderer;
GstCaps *caps;
GstBuffer *last_buf;
gint is_drc_on;
};
struct _GstNvVideoContextEglClass
{
GstNvVideoContextClass parent_class;
};
G_GNUC_INTERNAL
GstNvVideoContextEgl * gst_nv_video_context_egl_new (GstNvVideoDisplay * display);
GType gst_nv_video_context_egl_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_CONTEXT_EGL_H__ */

View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_FWD_H__
#define __GST_NV_VIDEO_FWD_H__
#include <gst/gst.h>
G_BEGIN_DECLS
typedef struct _GstNvVideoDisplay GstNvVideoDisplay;
typedef struct _GstNvVideoDisplayClass GstNvVideoDisplayClass;
typedef struct _GstNvVideoWindow GstNvVideoWindow;
typedef struct _GstNvVideoWindowClass GstNvVideoWindowClass;
typedef struct _GstNvVideoContext GstNvVideoContext;
typedef struct _GstNvVideoContextClass GstNvVideoContextClass;
typedef struct _GstNvVideoContextPrivate GstNvVideoContextPrivate;
typedef struct _GstNvVideoRenderer GstNvVideoRenderer;
typedef struct _GstNvVideoRendererClass GstNvVideoRendererClass;
G_END_DECLS
#endif /* __GST_NV_VIDEO_FWD_H__ */

View File

@@ -0,0 +1,165 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include <gst/base/gstdataqueue.h>
#include "renderer.h"
#include "context.h"
#if NV_VIDEO_SINKS_HAS_GL
#include "renderer_gl.h"
#endif
#define GST_CAT_DEFAULT gst_debug_nv_video_renderer
GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
G_DEFINE_ABSTRACT_TYPE (GstNvVideoRenderer, gst_nv_video_renderer,
GST_TYPE_OBJECT);
static void
gst_nv_video_renderer_init (GstNvVideoRenderer * renderer)
{
}
static void
gst_nv_video_renderer_class_init (GstNvVideoRendererClass * klass)
{
}
GstNvVideoRenderer *
gst_nv_video_renderer_new (GstNvVideoContext * context, const char *name)
{
GstNvVideoRenderer *renderer = NULL;
static volatile gsize debug_init = 0;
if (g_once_init_enter (&debug_init)) {
GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_renderer, "nvvideorenderer", 0,
"nvvideorenderer");
g_once_init_leave (&debug_init, 1);
}
if (!name) {
GST_ERROR ("renderer name not valid");
}
#if NV_VIDEO_SINKS_HAS_GL
if (g_strstr_len (name, 2, "gl")) {
renderer = GST_NV_VIDEO_RENDERER (gst_nv_video_renderer_gl_new (context));
}
#endif
if (!renderer) {
GST_ERROR ("couldn't create renderer name = %s", name);
return NULL;
}
renderer->format = context->configured_info.finfo->format;
GST_DEBUG_OBJECT (renderer, "created %s renderer for context %" GST_PTR_FORMAT, name, context);
return renderer;
}
gboolean
gst_nv_video_renderer_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
return renderer_class->cuda_init (context, renderer);
}
void
gst_nv_video_renderer_cuda_cleanup (GstNvVideoContext * context, GstNvVideoRenderer * renderer)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
renderer_class->cuda_cleanup (context, renderer);
}
void
gst_nv_video_renderer_cleanup (GstNvVideoRenderer * renderer)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
renderer_class->cleanup (renderer);
}
gboolean
gst_nv_video_renderer_setup (GstNvVideoRenderer * renderer)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
return renderer_class->setup (renderer);
}
void
gst_nv_video_renderer_update_viewport (GstNvVideoRenderer * renderer, int width, int height)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
renderer_class->update_viewport (renderer, width, height);
}
gboolean
gst_nv_video_renderer_fill_texture (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
return renderer_class->fill_texture (context, renderer, buf);
}
gboolean
gst_nv_video_renderer_cuda_buffer_copy (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
return renderer_class->cuda_buffer_copy (context, renderer, buf);
}
gboolean
gst_nv_video_renderer_draw_2D_Texture (GstNvVideoRenderer * renderer)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
return renderer_class->draw_2D_Texture (renderer);
}
gboolean
gst_nv_video_renderer_draw_eglimage (GstNvVideoRenderer * renderer, void * image)
{
GstNvVideoRendererClass *renderer_class;
renderer_class = GST_NV_VIDEO_RENDERER_GET_CLASS (renderer);
return renderer_class->draw_eglimage (renderer, image);
}

View File

@@ -0,0 +1,99 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_RENDERER_H__
#define __GST_NV_VIDEO_RENDERER_H__
#include <gst/gst.h>
#include <gst/video/video.h>
#include "gstnvvideofwd.h"
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_RENDERER \
(gst_nv_video_renderer_get_type())
#define GST_NV_VIDEO_RENDERER(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_RENDERER, GstNvVideoRenderer))
#define GST_NV_VIDEO_RENDERER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_RENDERER, GstNvVideoRendererClass))
#define GST_IS_NV_VIDEO_RENDERER(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_RENDERER))
#define GST_IS_NV_VIDEO_RENDERER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_RENDERER))
#define GST_NV_VIDEO_RENDERER_GET_CLASS(o) \
(G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_RENDERER, GstNvVideoRendererClass))
struct _GstNvVideoRendererClass
{
GstObjectClass parent_class;
gboolean (*cuda_init) (GstNvVideoContext *context, GstNvVideoRenderer * renderer);
void (*cuda_cleanup) (GstNvVideoContext *context, GstNvVideoRenderer * renderer);
gboolean (*setup) (GstNvVideoRenderer * renderer);
void (*cleanup) (GstNvVideoRenderer * renderer);
void (*update_viewport) (GstNvVideoRenderer * renderer, int width, int height);
gboolean (*fill_texture) (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
gboolean (*cuda_buffer_copy) (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
gboolean (*draw_2D_Texture) (GstNvVideoRenderer * renderer);
gboolean (*draw_eglimage) (GstNvVideoRenderer * renderer, void * image);
};
struct _GstNvVideoRenderer
{
GstObject parent;
GstNvVideoContext * context;
GstVideoFormat format;
};
GST_EXPORT
GstNvVideoRenderer * gst_nv_video_renderer_new (GstNvVideoContext * context, const char *name);
GST_EXPORT
gboolean gst_nv_video_renderer_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer);
GST_EXPORT
void gst_nv_video_renderer_cuda_cleanup (GstNvVideoContext * context, GstNvVideoRenderer * renderer);
GST_EXPORT
gboolean gst_nv_video_renderer_setup (GstNvVideoRenderer * renderer);
GST_EXPORT
void gst_nv_video_renderer_cleanup (GstNvVideoRenderer * renderer);
GST_EXPORT
void gst_nv_video_renderer_update_viewport (GstNvVideoRenderer * renderer, int width, int height);
GST_EXPORT
gboolean gst_nv_video_renderer_fill_texture (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
GST_EXPORT
gboolean gst_nv_video_renderer_cuda_buffer_copy (GstNvVideoContext *context, GstNvVideoRenderer * renderer, GstBuffer * buf);
GST_EXPORT
gboolean gst_nv_video_renderer_draw_2D_Texture (GstNvVideoRenderer * renderer);
GST_EXPORT
gboolean gst_nv_video_renderer_draw_eglimage (GstNvVideoRenderer * renderer, void * image);
GType gst_nv_video_renderer_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_RENDERER_H__ */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,93 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_RENDERER_GL_H__
#define __GST_NV_VIDEO_RENDERER_GL_H__
#include "context.h"
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_RENDERER_GL \
(gst_nv_video_renderer_gl_get_type())
#define GST_NV_VIDEO_RENDERER_GL(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_RENDERER_GL, GstNvVideoRendererGl))
#define GST_NV_VIDEO_RENDERER_GL_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_RENDERER_GL, GstNvVideoRendererGlClass))
#define GST_IS_NV_VIDEO_RENDERER_GL(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_RENDERER_GL))
#define GST_IS_NV_VIDEO_RENDERER_GL_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_RENDERER_GL))
#define GST_NV_VIDEO_RENDERER_GL_CAST(obj) \
((GstNvVideoRendererGl*)(obj))
typedef struct _GstNvVideoRendererGl GstNvVideoRendererGl;
typedef struct _GstNvVideoRendererGlClass GstNvVideoRendererGlClass;
#define RENDERER_NUM_GL_TEXTURES 1
struct _GstNvVideoRendererGl
{
GstNvVideoRenderer parent;
GLuint vert_obj[3]; /* EGL frame, 2D frame, 2D frame border*/
GLuint frag_obj[3]; /* EGL frame, 2D frame, 2D frame border*/
GLuint prog_obj[3]; /* EGL frame, 2D frame, 2D frame border*/
GLint pos;
GLint tex_pos;
GLint tex_sampler;
GLsizei num_textures;
GLuint textures[RENDERER_NUM_GL_TEXTURES];
unsigned int vertex_buffer;
unsigned int index_buffer;
//Defining different attribs and uniforms for 2D textures
GLuint position_loc[2]; /* Frame and Border */
GLuint texpos_loc[1]; /* Frame */
GLuint tex_scale_loc[1][3]; /* [frame] RGB/Y, U/UV, V */
GLuint tex_loc[1][3]; /* [frame] RGB/Y, U/UV, V */
unsigned int vertex_buffer_2d;
unsigned int index_buffer_2d;
gint num_textures_2d;
GLuint textures_2d[3];
GLuint stride[3];
PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES;
};
struct _GstNvVideoRendererGlClass
{
GstNvVideoRendererClass parent_class;
};
G_GNUC_INTERNAL
GstNvVideoRendererGl * gst_nv_video_renderer_gl_new (GstNvVideoContext * context);
void
gst_nv_video_renderer_gl_process_shaders (GstNvVideoRenderer * renderer, gchar ** frag_prog, const gchar *texnames[], GstVideoFormat format);
gboolean
gst_nv_video_renderer_gl_cuda_init (GstNvVideoContext * context, GstNvVideoRenderer * renderer);
GType gst_nv_video_renderer_gl_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_RENDERER_GL_H__ */

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "window.h"
#if NV_VIDEO_SINKS_HAS_X11
#include "window_x11.h"
#endif
#define GST_CAT_DEFAULT gst_debug_nv_video_window
GST_DEBUG_CATEGORY (GST_CAT_DEFAULT);
#define gst_nv_video_window_parent_class parent_class
G_DEFINE_ABSTRACT_TYPE (GstNvVideoWindow, gst_nv_video_window, GST_TYPE_OBJECT);
static void
gst_nv_video_window_finalize (GObject * object)
{
GstNvVideoWindow *window = GST_NV_VIDEO_WINDOW (object);
g_weak_ref_clear (&window->context);
gst_object_unref (window->display);
G_OBJECT_CLASS (gst_nv_video_window_parent_class)->finalize (object);
}
static void
gst_nv_video_window_init (GstNvVideoWindow * window)
{
g_weak_ref_init (&window->context, NULL);
}
static void
gst_nv_video_window_class_init (GstNvVideoWindowClass * klass)
{
G_OBJECT_CLASS (klass)->finalize = gst_nv_video_window_finalize;
}
GstNvVideoWindow *
gst_nv_video_window_new (GstNvVideoDisplay * display)
{
GstNvVideoWindow *window = NULL;
static volatile gsize debug_init = 0;
const gchar *winsys_name = NULL;
if (g_once_init_enter (&debug_init)) {
GST_DEBUG_CATEGORY_INIT (gst_debug_nv_video_window, "nvvideowindow", 0,
"nvvideowindow");
g_once_init_leave (&debug_init, 1);
}
winsys_name = g_getenv ("GST_NV_VIDEO_WINSYS");
#if NV_VIDEO_SINKS_HAS_X11
if (!window && (!winsys_name || g_strstr_len (winsys_name, 3, "x11"))) {
window = GST_NV_VIDEO_WINDOW (gst_nv_video_window_x11_new (NULL));
}
#endif
if (!window) {
GST_ERROR ("couldn't create window. GST_NV_VIDEO_WINSYS = %s",
winsys_name ? winsys_name : NULL);
return NULL;
}
window->display = gst_object_ref (display);
GST_DEBUG_OBJECT (window, "created window for display %" GST_PTR_FORMAT,
display);
return window;
}
/* create new window handle after destroying existing */
gboolean
gst_nv_video_window_create_window (GstNvVideoWindow * window, gint x,
gint y, gint width, gint height)
{
GstNvVideoWindowClass *window_class;
window_class = GST_NV_VIDEO_WINDOW_GET_CLASS (window);
return window_class->create_window (window, x, y, width, height);
}
gboolean
gst_nv_video_window_set_handle (GstNvVideoWindow * window, guintptr id)
{
GstNvVideoWindowClass *window_class;
window_class = GST_NV_VIDEO_WINDOW_GET_CLASS (window);
return window_class->set_handle (window, id);
}
guintptr
gst_nv_video_window_get_handle (GstNvVideoWindow * window)
{
GstNvVideoWindowClass *window_class;
window_class = GST_NV_VIDEO_WINDOW_GET_CLASS (window);
return window_class->get_handle (window);
}
GstNvVideoContext *
gst_nv_video_window_get_context (GstNvVideoWindow * window)
{
g_return_val_if_fail (GST_IS_NV_VIDEO_WINDOW (window), NULL);
return (GstNvVideoContext *) g_weak_ref_get (&window->context);
}

View File

@@ -0,0 +1,78 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_WINDOW_H__
#define __GST_NV_VIDEO_WINDOW_H__
#include <gst/gst.h>
#include <gst/video/gstvideosink.h>
#include <gst/video/video.h>
#include "gstnvvideofwd.h"
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_WINDOW \
(gst_nv_video_window_get_type())
#define GST_NV_VIDEO_WINDOW(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_NV_VIDEO_WINDOW, GstNvVideoWindow))
#define GST_NV_VIDEO_WINDOW_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_WINDOW, GstNvVideoWindowClass))
#define GST_IS_NV_VIDEO_WINDOW(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_WINDOW))
#define GST_IS_NV_VIDEO_WINDOW_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_WINDOW))
#define GST_NV_VIDEO_WINDOW_CAST(obj) \
((GstNvVideoWindow*)(obj))
#define GST_NV_VIDEO_WINDOW_GET_CLASS(o) \
(G_TYPE_INSTANCE_GET_CLASS((o), GST_TYPE_NV_VIDEO_WINDOW, GstNvVideoWindowClass))
struct _GstNvVideoWindowClass
{
GstObjectClass parent_class;
guintptr (*get_handle) (GstNvVideoWindow * window);
gboolean (*set_handle) (GstNvVideoWindow * window, guintptr id);
gboolean (*create_window) (GstNvVideoWindow * window, gint x, gint y, gint width, gint height);
gboolean (*draw) (GstNvVideoWindow * window, GstBuffer * buf);
};
struct _GstNvVideoWindow
{
GstObject parent;
GstNvVideoDisplay *display;
GWeakRef context;
};
GST_EXPORT
GstNvVideoWindow *gst_nv_video_window_new (GstNvVideoDisplay * display);
GST_EXPORT
gboolean gst_nv_video_window_create_window (GstNvVideoWindow * window, gint x, gint y, gint width, gint height);
GST_EXPORT
gboolean gst_nv_video_window_set_handle (GstNvVideoWindow * window, guintptr id);
GST_EXPORT
guintptr gst_nv_video_window_get_handle (GstNvVideoWindow * window);
GST_EXPORT
GstNvVideoContext *gst_nv_video_window_get_context (GstNvVideoWindow * window);
GType gst_nv_video_window_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_WINDOW_H__ */

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "display_x11.h"
G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_display;
#define GST_CAT_DEFAULT gst_debug_nv_video_display
G_DEFINE_TYPE (GstNvVideoDisplayX11, gst_nv_video_display_x11,
GST_TYPE_NV_VIDEO_DISPLAY);
static void
gst_nv_video_display_x11_finalize (GObject * object)
{
GstNvVideoDisplayX11 *display_x11 = GST_NV_VIDEO_DISPLAY_X11 (object);
GST_DEBUG ("closing X11 display connection, handle=%p", display_x11->dpy);
if (display_x11->dpy) {
XCloseDisplay (display_x11->dpy);
}
GST_DEBUG ("closed X11 display connection");
G_OBJECT_CLASS (gst_nv_video_display_x11_parent_class)->finalize (object);
}
static guintptr
gst_nv_video_display_x11_get_handle (GstNvVideoDisplay * display)
{
return (guintptr) GST_NV_VIDEO_DISPLAY_X11 (display)->dpy;
}
static void
gst_nv_video_display_x11_class_init (GstNvVideoDisplayX11Class * klass)
{
GST_NV_VIDEO_DISPLAY_CLASS (klass)->get_handle =
GST_DEBUG_FUNCPTR (gst_nv_video_display_x11_get_handle);
G_OBJECT_CLASS (klass)->finalize = gst_nv_video_display_x11_finalize;
}
static void
gst_nv_video_display_x11_init (GstNvVideoDisplayX11 * display_x11)
{
GstNvVideoDisplay *display = (GstNvVideoDisplay *) display_x11;
display->type = GST_NV_VIDEO_DISPLAY_TYPE_X11;
GST_DEBUG_OBJECT (display, "init done");
}
GstNvVideoDisplayX11 *
gst_nv_video_display_x11_new (const gchar * name)
{
GstNvVideoDisplayX11 *ret;
ret = g_object_new (GST_TYPE_NV_VIDEO_DISPLAY_X11, NULL);
gst_object_ref_sink (ret);
ret->dpy = XOpenDisplay (NULL);
if (!ret->dpy) {
GST_ERROR ("failed to open X11 display connection");
gst_object_unref (ret);
return NULL;
}
GST_DEBUG ("opened X11 display connection handle=%p", ret->dpy);
return ret;
}

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV_VIDEO_DISPLAY_X11_H__
#define __GST_NV_VIDEO_DISPLAY_X11_H__
#include <X11/Xlib.h>
#include "display.h"
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_DISPLAY_X11 \
(gst_nv_video_display_x11_get_type())
#define GST_NV_VIDEO_DISPLAY_X11(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_DISPLAY_X11, GstNvVideoDisplayX11))
#define GST_NV_VIDEO_DISPLAY_X11_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_DISPLAY_X11, GstNvVideoDisplayX11Class))
#define GST_IS_NV_VIDEO_DISPLAY_X11(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_DISPLAY_X11))
#define GST_IS_NV_VIDEO_DISPLAY_X11_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_DISPLAY_X11))
#define GST_NV_VIDEO_DISPLAY_X11_CAST(obj) \
((GstNvVideoDisplayX11*)(obj))
typedef struct _GstNvVideoDisplayX11 GstNvVideoDisplayX11;
typedef struct _GstNvVideoDisplayX11Class GstNvVideoDisplayX11Class;
struct _GstNvVideoDisplayX11
{
GstNvVideoDisplay parent;
Display *dpy;
};
struct _GstNvVideoDisplayX11Class
{
GstNvVideoDisplayClass parent_class;
};
GST_EXPORT
GstNvVideoDisplayX11 * gst_nv_video_display_x11_new (const gchar * name);
GType gst_nv_video_display_x11_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_DISPLAY_X11_H__ */

View File

@@ -0,0 +1,157 @@
/**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
#include "context.h"
#include "display_x11.h"
#include "window_x11.h"
#include <X11/Xutil.h>
G_GNUC_INTERNAL extern GstDebugCategory *gst_debug_nv_video_window;
#define GST_CAT_DEFAULT gst_debug_nv_video_window
#define gst_nv_video_window_x11_parent_class parent_class
G_DEFINE_TYPE (GstNvVideoWindowX11, gst_nv_video_window_x11,
GST_TYPE_NV_VIDEO_WINDOW);
static void
gst_nv_video_window_x11_destroy (GstNvVideoWindow * window)
{
GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
GstNvVideoDisplayX11 *display_x11 = (GstNvVideoDisplayX11 *) window->display;
if (window_x11->internal_window) {
GST_DEBUG_OBJECT (window, "destroy internal window %" G_GUINTPTR_FORMAT,
window_x11->handle);
XUnmapWindow (display_x11->dpy, window_x11->handle);
XDestroyWindow (display_x11->dpy, window_x11->handle);
XSync (display_x11->dpy, FALSE);
window_x11->internal_window = FALSE;
window_x11->handle = 0;
} else {
GST_DEBUG_OBJECT (window, "unset foreign window handle %" G_GUINTPTR_FORMAT,
window_x11->handle);
window_x11->handle = 0;
}
}
static void
gst_nv_video_window_x11_finalize (GObject * object)
{
GstNvVideoWindow *window = GST_NV_VIDEO_WINDOW (object);
GST_DEBUG_OBJECT (window, "finalize begin");
gst_nv_video_window_x11_destroy (window);
G_OBJECT_CLASS (gst_nv_video_window_x11_parent_class)->finalize (object);
GST_DEBUG_OBJECT (window, "finalize end");
}
static guintptr
gst_nv_video_window_x11_get_handle (GstNvVideoWindow * window)
{
GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
return window_x11->handle;
}
static gboolean
gst_nv_video_window_x11_set_handle (GstNvVideoWindow * window, guintptr id)
{
GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
gst_nv_video_window_x11_destroy (window);
window_x11->handle = id;
GST_DEBUG_OBJECT (window, "set window handle to %" G_GUINTPTR_FORMAT, id);
return FALSE;
}
static gboolean
gst_nv_video_window_x11_create (GstNvVideoWindow * window, gint x,
gint y, gint width, gint height)
{
GstNvVideoWindowX11 *window_x11 = GST_NV_VIDEO_WINDOW_X11 (window);
GstNvVideoDisplayX11 *display_x11 = (GstNvVideoDisplayX11 *) window->display;
Display *dpy = display_x11->dpy;
int screen = DefaultScreen (dpy);
XSizeHints hints = {0};
hints.flags = PPosition ;
hints.x = x;
hints.y = y;
// GstNvVideoWindow doesn't have destroy_winow method (like create_window)
// and GstNvVideoWindow object can't have multiple X windows. So if
// upper layer has existing window (foreign or internal), unset/destroy it.
//
// TODO: In case of existing internal window, we might able to re-use it
// with XResizeWindow.
gst_nv_video_window_x11_destroy (window);
window_x11->handle = XCreateSimpleWindow (dpy, RootWindow (dpy, screen),
hints.x, hints.y, width, height, 1,
BlackPixel (dpy, screen), WhitePixel (dpy, screen));
if (!window_x11->handle) {
GST_ERROR_OBJECT (window, "failed to create internal window\n");
return FALSE;
}
window_x11->internal_window = TRUE;
XSetWindowBackgroundPixmap (dpy, window_x11->handle, None);
XSetNormalHints(dpy, window_x11->handle, &hints);
XMapRaised (dpy, window_x11->handle);
XSync (dpy, FALSE);
GST_DEBUG_OBJECT (window,
"created internal window %dx%d, handle=%" G_GUINTPTR_FORMAT, width,
height, window_x11->handle);
return TRUE;
}
static void
gst_nv_video_window_x11_class_init (GstNvVideoWindowX11Class * klass)
{
GstNvVideoWindowClass *window_class = (GstNvVideoWindowClass *) klass;
window_class->create_window =
GST_DEBUG_FUNCPTR (gst_nv_video_window_x11_create);
window_class->get_handle =
GST_DEBUG_FUNCPTR (gst_nv_video_window_x11_get_handle);
window_class->set_handle =
GST_DEBUG_FUNCPTR (gst_nv_video_window_x11_set_handle);
G_OBJECT_CLASS (klass)->finalize = gst_nv_video_window_x11_finalize;
}
static void
gst_nv_video_window_x11_init (GstNvVideoWindowX11 * window)
{
window->handle = 0;
window->internal_window = FALSE;
GST_DEBUG_OBJECT (window, "init done");
}
GstNvVideoWindowX11 *
gst_nv_video_window_x11_new (const gchar * name)
{
GstNvVideoWindowX11 *ret;
ret = g_object_new (GST_TYPE_NV_VIDEO_WINDOW_X11, NULL);
gst_object_ref_sink (ret);
return ret;
}

View File

@@ -0,0 +1,54 @@
/**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
#ifndef __GST_NV_VIDEO_WINDOW_X11_H__
#define __GST_NV_VIDEO_WINDOW_X11_H__
#include "window.h"
G_BEGIN_DECLS
#define GST_TYPE_NV_VIDEO_WINDOW_X11 \
(gst_nv_video_window_x11_get_type())
#define GST_NV_VIDEO_WINDOW_X11(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV_VIDEO_WINDOW_X11, GstNvVideoWindowX11))
#define GST_NV_VIDEO_WINDOW_X11_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV_VIDEO_WINDOW_X11, GstNvVideoWindowX11Class))
#define GST_IS_NV_VIDEO_WINDOW_X11(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV_VIDEO_WINDOW_X11))
#define GST_IS_NV_VIDEO_WINDOW_X11_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV_VIDEO_WINDOW_X11))
#define GST_NV_VIDEO_WINDOW_X11_CAST(obj) \
((GstNvVideoWindowX11*)(obj))
typedef struct _GstNvVideoWindowX11 GstNvVideoWindowX11;
typedef struct _GstNvVideoWindowX11Class GstNvVideoWindowX11Class;
struct _GstNvVideoWindowX11
{
GstNvVideoWindow parent;
guintptr handle;
gboolean internal_window;
};
struct _GstNvVideoWindowX11Class
{
GstNvVideoWindowClass parent_class;
};
GST_EXPORT
GstNvVideoWindowX11 *gst_nv_video_window_x11_new (const gchar * name);
GType gst_nv_video_window_x11_get_type (void);
G_END_DECLS
#endif /* __GST_NV_VIDEO_WINDOW_X11_H__ */

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include <gst/gst.h>
#if NV_VIDEO_SINKS_HAS_NV3DSINK
#include "nv3dsink/gstnv3dsink.h"
#endif
#if NV_VIDEO_SINKS_HAS_X11
#include <X11/Xlib.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_nvvideosinks_debug);
#define GST_CAT_DEFAULT gst_nvvideosinks_debug
static gboolean
plugin_init (GstPlugin * plugin)
{
#if NV_VIDEO_SINKS_HAS_X11
XInitThreads ();
#endif
/* debug category for fltering log messages */
GST_DEBUG_CATEGORY_INIT (gst_nvvideosinks_debug, "nvvideosinks", 0,
"Nvidia video sinks");
#if NV_VIDEO_SINKS_HAS_NV3DSINK
if (!gst_element_register (plugin, "nv3dsink", GST_RANK_SECONDARY,
GST_TYPE_NV3DSINK)) {
return FALSE;
}
#endif
return TRUE;
}
/* PACKAGE is usually set by autotools but we are not using autotools
* to compile this code, so set it ourselves. GST_PLUGIN_DEFINE needs
* PACKAGE to be defined.
*/
#ifndef PACKAGE
#define PACKAGE "gst-plugins-nv-video-sinks"
#endif
/* gstreamer looks for this structure to register plugins */
GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
GST_VERSION_MINOR,
nvvideosinks,
"Nvidia Video Sink Plugins",
plugin_init, "0.0.1", "Proprietary", "Nvidia Video Sink Plugins",
"http://nvidia.com/")

View File

@@ -0,0 +1,579 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "gstnv3dsink.h"
#include "display.h"
#include "context.h"
#include "window.h"
GST_DEBUG_CATEGORY (gst_debug_nv3dsink);
#define GST_CAT_DEFAULT gst_debug_nv3dsink
GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
#define GST_CAPS_FEATURE_MEMORY_NVMM "memory:NVMM"
static void gst_nv3dsink_videooverlay_init (GstVideoOverlayInterface * iface);
static void gst_nv3dsink_set_window_handle (GstVideoOverlay * overlay,
guintptr id);
static void gst_nv3dsink_expose (GstVideoOverlay * overlay);
static void gst_nv3dsink_handle_events (GstVideoOverlay * overlay,
gboolean handle_events);
static void gst_nv3dsink_set_render_rectangle (GstVideoOverlay * overlay,
gint x, gint y, gint width, gint height);
/* Input capabilities. */
static GstStaticPadTemplate gst_nv3dsink_sink_template_factory =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (
//Supported Software buffer caps
GST_VIDEO_CAPS_MAKE ("{ "
"RGBA, BGRA, ARGB, ABGR, " "RGBx, BGRx, xRGB, xBGR, "
"AYUV, Y444, I420, YV12, " "NV12, NV21, Y42B, Y41B, "
"RGB, BGR, RGB16 }")
";"
GST_VIDEO_CAPS_MAKE_WITH_FEATURES (
GST_CAPS_FEATURE_MEMORY_NVMM,
"{ RGBA, BGRA, ARGB, ABGR, RGBx, BGRx, xRGB, xBGR, "
"AYUV, Y444, I420, YV12, NV12, NV21, Y42B, Y41B, "
"RGB, BGR, RGB16 }")
));
#define parent_class gst_nv3dsink_parent_class
G_DEFINE_TYPE_WITH_CODE (GstNv3dSink, gst_nv3dsink, GST_TYPE_VIDEO_SINK,
G_IMPLEMENT_INTERFACE (GST_TYPE_VIDEO_OVERLAY,
gst_nv3dsink_videooverlay_init);
GST_DEBUG_CATEGORY_INIT (gst_debug_nv3dsink, "nv3dsink", 0,
"Nvidia 3D sink"));
enum
{
PROP_0,
PROP_WINDOW_X,
PROP_WINDOW_Y,
PROP_WINDOW_WIDTH,
PROP_WINDOW_HEIGHT
};
/* GObject vmethod implementations */
static void
gst_nv3dsink_videooverlay_init (GstVideoOverlayInterface * iface)
{
iface->set_window_handle = gst_nv3dsink_set_window_handle;
iface->expose = gst_nv3dsink_expose;
iface->handle_events = gst_nv3dsink_handle_events;
iface->set_render_rectangle = gst_nv3dsink_set_render_rectangle;
}
static void
gst_nv3dsink_set_window_handle (GstVideoOverlay * overlay, guintptr id)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
gint width = 0;
gint height = 0;
g_return_if_fail (GST_IS_NV3DSINK (nv3dsink));
g_mutex_lock (&nv3dsink->win_handle_lock);
GST_DEBUG_OBJECT (nv3dsink, "set_window_handle %" G_GUINT64_FORMAT, id);
if (gst_nv_video_window_get_handle (nv3dsink->window) == id) {
g_mutex_unlock (&nv3dsink->win_handle_lock);
return;
}
if (id) {
gst_nv_video_window_set_handle (nv3dsink->window, id);
g_mutex_unlock (&nv3dsink->win_handle_lock);
return;
}
if (!GST_VIDEO_SINK_WIDTH (nv3dsink) || !GST_VIDEO_SINK_HEIGHT (nv3dsink)) {
// window will be created during caps negotiation
g_mutex_unlock (&nv3dsink->win_handle_lock);
return;
}
// create internal window
if (nv3dsink->window_width != 0 && nv3dsink->window_height != 0) {
width = nv3dsink->window_width;
height = nv3dsink->window_height;
} else {
width = GST_VIDEO_SINK_WIDTH (nv3dsink);
height = GST_VIDEO_SINK_HEIGHT (nv3dsink);
}
if (!gst_nv_video_window_create_window (nv3dsink->window,
nv3dsink->window_x, nv3dsink->window_y, width, height)) {
g_mutex_unlock (&nv3dsink->win_handle_lock);
return;
}
g_mutex_unlock (&nv3dsink->win_handle_lock);
}
static void
gst_nv3dsink_expose (GstVideoOverlay * overlay)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
GST_DEBUG_OBJECT (nv3dsink, "expose unimplemented");
}
static void
gst_nv3dsink_handle_events (GstVideoOverlay * overlay, gboolean handle_events)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
GST_DEBUG_OBJECT (nv3dsink, "handle_events unimplemented");
}
static void
gst_nv3dsink_set_render_rectangle (GstVideoOverlay * overlay, gint x, gint y,
gint width, gint height)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (overlay);
g_return_if_fail (GST_IS_NV3DSINK (nv3dsink));
GST_DEBUG_OBJECT (nv3dsink, "set_render_rectangle unimplemented");
return;
}
static void
gst_nv3dsink_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstNv3dSink *nv3dsink;
g_return_if_fail (GST_IS_NV3DSINK (object));
nv3dsink = GST_NV3DSINK (object);
switch (prop_id) {
case PROP_WINDOW_X:
nv3dsink->window_x = g_value_get_uint (value);
break;
case PROP_WINDOW_Y:
nv3dsink->window_y = g_value_get_uint (value);
break;
case PROP_WINDOW_WIDTH:
nv3dsink->window_width = g_value_get_uint (value);
break;
case PROP_WINDOW_HEIGHT:
nv3dsink->window_height = g_value_get_uint (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_nv3dsink_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstNv3dSink *nv3dsink;
g_return_if_fail (GST_IS_NV3DSINK (object));
nv3dsink = GST_NV3DSINK (object);
switch (prop_id) {
case PROP_WINDOW_X:
g_value_set_uint (value, nv3dsink->window_x);
break;
case PROP_WINDOW_Y:
g_value_set_uint (value, nv3dsink->window_y);
break;
case PROP_WINDOW_WIDTH:
g_value_set_uint (value, nv3dsink->window_width);
break;
case PROP_WINDOW_HEIGHT:
g_value_set_uint (value, nv3dsink->window_height);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_nv3dsink_finalize (GObject * object)
{
GstNv3dSink *nv3dsink;
g_return_if_fail (GST_IS_NV3DSINK (object));
nv3dsink = GST_NV3DSINK (object);
GST_TRACE_OBJECT (nv3dsink, "finalize");
g_mutex_clear (&nv3dsink->win_handle_lock);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
static gboolean
gst_nv3dsink_start (GstBaseSink * bsink)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
GstNvVideoWindow *window;
GST_TRACE_OBJECT (nv3dsink, "start");
// TODO: Query display from application/upstream elements if there
// is such use case.
if (!nv3dsink->display) {
if (!gst_nv_video_display_new (&nv3dsink->display)) {
GST_ERROR_OBJECT (nv3dsink, "failed to create new display");
return FALSE;
}
} else {
GST_DEBUG_OBJECT (nv3dsink, "using existing display (%p)",
nv3dsink->display);
}
if (!nv3dsink->context) {
if (!gst_nv_video_display_create_context (nv3dsink->display,
&nv3dsink->context)) {
GST_ERROR_OBJECT (nv3dsink, "failed to create new context");
return FALSE;
}
} else {
GST_DEBUG_OBJECT (nv3dsink, "using existing context (%p)",
nv3dsink->context);
}
if (!nv3dsink->window) {
window = gst_nv_video_display_create_window (nv3dsink->display);
if (window == NULL) {
GST_ERROR_OBJECT (nv3dsink, "failed to create new window");
return FALSE;
}
nv3dsink->window = gst_object_ref (window);
gst_object_unref (window);
gst_nv_video_context_set_window (nv3dsink->context, nv3dsink->window);
} else {
GST_DEBUG_OBJECT (nv3dsink, "using existing window (%p)", nv3dsink->window);
}
return TRUE;
}
static gboolean
gst_nv3dsink_stop (GstBaseSink * bsink)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
GST_TRACE_OBJECT (nv3dsink, "stop");
if (nv3dsink->configured_caps) {
gst_caps_unref (nv3dsink->configured_caps);
nv3dsink->configured_caps = NULL;
}
if (nv3dsink->context) {
gst_object_unref (nv3dsink->context);
nv3dsink->context = NULL;
}
if (nv3dsink->window) {
g_object_unref (nv3dsink->window);
nv3dsink->window = NULL;
}
if (nv3dsink->display) {
g_object_unref (nv3dsink->display);
nv3dsink->display = NULL;
}
return TRUE;
}
static GstCaps *
gst_nv3dsink_get_caps (GstBaseSink * bsink, GstCaps * filter)
{
GstNv3dSink *nv3dsink;
GstCaps *tmp = NULL;
GstCaps *result = NULL;
GstCaps *caps = NULL;
nv3dsink = GST_NV3DSINK (bsink);
tmp = gst_pad_get_pad_template_caps (GST_BASE_SINK_PAD (bsink));
if (filter) {
GST_DEBUG_OBJECT (bsink, "intersecting with filter caps %" GST_PTR_FORMAT,
filter);
result = gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (tmp);
} else {
result = tmp;
}
caps = gst_nv_video_context_get_caps (nv3dsink->context);
if (caps) {
result = gst_caps_intersect (result, caps);
gst_caps_unref (caps);
}
GST_DEBUG_OBJECT (bsink, "returning caps: %" GST_PTR_FORMAT, result);
return result;
}
static gboolean
gst_nv3dsink_set_caps (GstBaseSink * bsink, GstCaps * caps)
{
GstNv3dSink *nv3dsink;
GstVideoInfo info;
GstCapsFeatures *features;
gint width = 0;
gint height = 0;
nv3dsink = GST_NV3DSINK (bsink);
if (!nv3dsink->context || !nv3dsink->display) {
return FALSE;
}
GST_DEBUG_OBJECT (bsink, "set caps with %" GST_PTR_FORMAT, caps);
if (nv3dsink->configured_caps) {
if (gst_caps_can_intersect (caps, nv3dsink->configured_caps)) {
return TRUE;
}
}
features = gst_caps_get_features (caps, 0);
if (gst_caps_features_contains (features, GST_CAPS_FEATURE_MEMORY_NVMM)) {
nv3dsink->context->using_NVMM = 1;
}
if (!gst_video_info_from_caps (&info, caps)) {
GST_ERROR_OBJECT (nv3dsink, "Invalid caps %" GST_PTR_FORMAT, caps);
return FALSE;
}
nv3dsink->context->configured_info = info;
int is_res_changed = 0;
if ((GST_VIDEO_SINK_WIDTH (nv3dsink)!=0 && GST_VIDEO_SINK_HEIGHT (nv3dsink)!=0) && (GST_VIDEO_SINK_WIDTH (nv3dsink) != info.width || GST_VIDEO_SINK_HEIGHT (nv3dsink) != info.height)) {
is_res_changed = 1;
}
if (is_res_changed)
{
gst_nv_video_context_handle_tearing(nv3dsink->context);
}
GST_VIDEO_SINK_WIDTH (nv3dsink) = info.width;
GST_VIDEO_SINK_HEIGHT (nv3dsink) = info.height;
g_mutex_lock (&nv3dsink->win_handle_lock);
if (!gst_nv_video_window_get_handle (nv3dsink->window)) {
g_mutex_unlock (&nv3dsink->win_handle_lock);
gst_video_overlay_prepare_window_handle (GST_VIDEO_OVERLAY (nv3dsink));
} else {
g_mutex_unlock (&nv3dsink->win_handle_lock);
}
if (GST_VIDEO_SINK_WIDTH (nv3dsink) <= 0
|| GST_VIDEO_SINK_HEIGHT (nv3dsink) <= 0) {
GST_ERROR_OBJECT (nv3dsink, "invalid size");
return FALSE;
}
g_mutex_lock (&nv3dsink->win_handle_lock);
if (!gst_nv_video_window_get_handle (nv3dsink->window)) {
if (nv3dsink->window_width != 0 && nv3dsink->window_height != 0) {
width = nv3dsink->window_width;
height = nv3dsink->window_height;
} else {
width = GST_VIDEO_SINK_WIDTH (nv3dsink);
height = GST_VIDEO_SINK_HEIGHT (nv3dsink);
}
if (!gst_nv_video_window_create_window (nv3dsink->window,
nv3dsink->window_x, nv3dsink->window_y, width, height)) {
g_mutex_unlock (&nv3dsink->win_handle_lock);
return FALSE;
}
}
g_mutex_unlock (&nv3dsink->win_handle_lock);
gst_caps_replace (&nv3dsink->configured_caps, caps);
return TRUE;
}
static gboolean
gst_nv3dsink_propose_allocation (GstBaseSink * bsink, GstQuery * query)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
gst_nv_video_context_handle_drc (nv3dsink->context);
gst_query_add_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL);
gst_query_add_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE, NULL);
return TRUE;
}
static GstFlowReturn
gst_nv3dsink_show_frame (GstVideoSink * vsink, GstBuffer * buf)
{
GstNv3dSink *nv3dsink;
nv3dsink = GST_NV3DSINK (vsink);
GST_TRACE_OBJECT (nv3dsink, "show buffer %p, window size:%ux%u", buf,
GST_VIDEO_SINK_WIDTH (nv3dsink), GST_VIDEO_SINK_HEIGHT (nv3dsink));
if (!gst_nv_video_context_show_frame (nv3dsink->context, buf)) {
return GST_FLOW_FLUSHING;
}
return GST_FLOW_OK;
}
static gboolean
gst_nv3dsink_event (GstBaseSink * bsink, GstEvent * event)
{
GstNv3dSink *nv3dsink = GST_NV3DSINK (bsink);
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_EOS:
gst_nv_video_context_handle_eos (nv3dsink->context);
break;
default:
break;
}
if (GST_BASE_SINK_CLASS (parent_class)->event)
return GST_BASE_SINK_CLASS (parent_class)->event (bsink, event);
else
gst_event_unref (event);
return TRUE;
}
static GstStateChangeReturn
gst_nv3dsink_change_state (GstElement * element, GstStateChange transition)
{
GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS;
GstNv3dSink *nv3dsink = GST_NV3DSINK (element);
switch (transition) {
case GST_STATE_CHANGE_PAUSED_TO_READY:
/* call handle_eos to unref last buffer */
gst_nv_video_context_handle_eos (nv3dsink->context);
break;
default:
break;
}
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
return ret;
}
/* initialize the plugin's class */
static void
gst_nv3dsink_class_init (GstNv3dSinkClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
GstBaseSinkClass *gstbasesink_class;
GstVideoSinkClass *gstvideosink_class;
gobject_class = (GObjectClass *) klass;
gstelement_class = (GstElementClass *) klass;
gstbasesink_class = (GstBaseSinkClass *) klass;
gstvideosink_class = (GstVideoSinkClass *) klass;
gobject_class->set_property = gst_nv3dsink_set_property;
gobject_class->get_property = gst_nv3dsink_get_property;
gst_element_class_set_static_metadata (gstelement_class, "Nvidia 3D sink",
"Sink/Video", "A videosink based on 3D graphics rendering API",
"Yogish Kulkarni <yogishk@nvidia.com>");
gobject_class->finalize = gst_nv3dsink_finalize;
gst_element_class_add_static_pad_template (gstelement_class,
&gst_nv3dsink_sink_template_factory);
gstbasesink_class->start = GST_DEBUG_FUNCPTR (gst_nv3dsink_start);
gstbasesink_class->stop = GST_DEBUG_FUNCPTR (gst_nv3dsink_stop);
gstbasesink_class->set_caps = GST_DEBUG_FUNCPTR (gst_nv3dsink_set_caps);
gstbasesink_class->get_caps = GST_DEBUG_FUNCPTR (gst_nv3dsink_get_caps);
gstbasesink_class->propose_allocation =
GST_DEBUG_FUNCPTR (gst_nv3dsink_propose_allocation);
gstbasesink_class->event = gst_nv3dsink_event;
gstvideosink_class->show_frame = GST_DEBUG_FUNCPTR (gst_nv3dsink_show_frame);
gstelement_class->change_state = gst_nv3dsink_change_state;
g_object_class_install_property (gobject_class, PROP_WINDOW_X,
g_param_spec_uint ("window-x",
"Window x coordinate",
"X coordinate of window", 0, G_MAXINT, 10,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_WINDOW_Y,
g_param_spec_uint ("window-y",
"Window y coordinate",
"Y coordinate of window", 0, G_MAXINT, 10,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_WINDOW_WIDTH,
g_param_spec_uint ("window-width",
"Window width",
"Width of window", 0, G_MAXINT, 0,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_WINDOW_HEIGHT,
g_param_spec_uint ("window-height",
"Window height",
"Height of window", 0, G_MAXINT, 0,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
}
/* initialize the new element */
static void
gst_nv3dsink_init (GstNv3dSink * nv3dsink)
{
GST_TRACE_OBJECT (nv3dsink, "init");
nv3dsink->display = NULL;
nv3dsink->context = NULL;
nv3dsink->window = NULL;
nv3dsink->window_x = 0;
nv3dsink->window_y = 0;
nv3dsink->window_width = 0;
nv3dsink->window_height = 0;
nv3dsink->configured_caps = NULL;
/* mutex to serialize create, set and get window handle calls */
g_mutex_init (&nv3dsink->win_handle_lock);
}

View File

@@ -0,0 +1,71 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1, as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __GST_NV3DSINK_H__
#define __GST_NV3DSINK_H__
#include <gst/gst.h>
#include <gst/video/gstvideosink.h>
#include <gst/video/video.h>
#include "gstnvvideofwd.h"
G_BEGIN_DECLS
GST_DEBUG_CATEGORY_EXTERN (gst_debug_nv3dsink);
#define GST_TYPE_NV3DSINK \
(gst_nv3dsink_get_type())
#define GST_NV3DSINK(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), GST_TYPE_NV3DSINK, GstNv3dSink))
#define GST_NV3DSINK_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass), GST_TYPE_NV3DSINK, GstNv3dSinkClass))
#define GST_IS_NV3DSINK(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj), GST_TYPE_NV3DSINK))
#define GST_IS_NV3DSINK_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass), GST_TYPE_NV3DSINK))
typedef struct _GstNv3dSink GstNv3dSink;
typedef struct _GstNv3dSinkClass GstNv3dSinkClass;
struct _GstNv3dSink
{
GstVideoSink parent;
GstNvVideoDisplay *display;
GstNvVideoContext *context;
GstNvVideoWindow *window;
gint window_x;
gint window_y;
gint window_width;
gint window_height;
GMutex win_handle_lock;
GstCaps *configured_caps;
};
struct _GstNv3dSinkClass
{
GstVideoSinkClass parent_class;
};
GType gst_nv3dsink_get_type (void);
G_END_DECLS
#endif /* __GST_NV3DSINK_H__ */

897
nvbuf_utils.h Normal file
View File

@@ -0,0 +1,897 @@
/*
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: Buffering and Transform/Composition/Blending</b>
*
*/
/**
* @defgroup ee_nvbuffering_group Buffer Manager
* @ingroup common_utility_group
* NVIDIA buffering utility library for use by applications.
* The utility also transforms, composits, and blends.
* @{
*/
#ifndef _NVBUF_UTILS_H_
#define _NVBUF_UTILS_H_
#ifdef __cplusplus
extern "C"
{
#endif
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <errno.h>
#include <stdbool.h>
/**
* Defines the maximum number of planes for a video frame.
*/
#define MAX_NUM_PLANES 4
/**
* Defines the maximum number of input video frames that can be used for composition.
*/
#define MAX_COMPOSITE_FRAME 16
/**
* Defines the default values for chroma subsampling.
* The default value matches JPEG/MPEG use cases.
*/
#define NVBUF_CHROMA_SUBSAMPLING_HORIZ_DEFAULT 0
#define NVBUF_CHROMA_SUBSAMPLING_VERT_DEFAULT 1
/**
* Defines the maximum number of sync object parameters.
*/
#define NVBUF_MAX_SYNCOBJ_PARAMS 5
/**
* Use this value to represent an infinite wait interval.
* A value of zero should not be interpreted as infinite,
* it should be interpreted as "time out immediately" and
* simply check whether the event has already happened.
*/
#define NVBUFFER_SYNCPOINT_WAIT_INFINITE 0xFFFFFFFF
/**
* Defines Payload types for NvBuffer.
*/
typedef enum
{
/** buffer payload with hardware memory handle for set of planes. */
NvBufferPayload_SurfArray,
/** buffer payload with hardware memory handle for specific memory size. */
NvBufferPayload_MemHandle,
} NvBufferPayloadType;
/**
* Defines display scan formats for NvBuffer video planes.
*/
typedef enum
{
/** Progessive scan formats. */
NvBufferDisplayScanFormat_Progressive = 0,
/** Interlaced scan formats. */
NvBufferDisplayScanFormat_Interlaced,
} NvBufferDisplayScanFormat;
/**
* Defines Layout formats for NvBuffer video planes.
*/
typedef enum
{
/** Pitch Layout. */
NvBufferLayout_Pitch,
/** BlockLinear Layout. */
NvBufferLayout_BlockLinear,
} NvBufferLayout;
/**
* Defines memory access flags for NvBuffer.
*/
typedef enum
{
/** Memory read. */
NvBufferMem_Read,
/** Memory write. */
NvBufferMem_Write,
/** Memory read & write. */
NvBufferMem_Read_Write,
} NvBufferMemFlags;
/**
* Defines tags that identify the components requesting a memory allocation.
* The tags can be used later to identify the total memory allocated to
* particular types of components.
*/
typedef enum
{
/** tag None. */
NvBufferTag_NONE = 0x0,
/** tag for Camera. */
NvBufferTag_CAMERA = 0x200,
/** tag for Jpeg Encoder/Decoder. */
NvBufferTag_JPEG = 0x1500,
/** tag for VPR Buffers. */
NvBufferTag_PROTECTED = 0x1504,
/** tag for H264/H265 Video Encoder. */
NvBufferTag_VIDEO_ENC = 0x1200,
/** tag for H264/H265/VP9 Video Decoder. */
NvBufferTag_VIDEO_DEC = 0x1400,
/** tag for Video Transform/Composite. */
NvBufferTag_VIDEO_CONVERT = 0xf01,
} NvBufferTag;
/**
* Defines color formats for NvBuffer.
*/
typedef enum
{
/** BT.601 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420,
/** BT.601 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YVU420,
/** BT.601 colorspace - YUV422 multi-planar. */
NvBufferColorFormat_YUV422,
/** BT.601 colorspace - YUV420 ER multi-planar. */
NvBufferColorFormat_YUV420_ER,
/** BT.601 colorspace - YVU420 ER multi-planar. */
NvBufferColorFormat_YVU420_ER,
/** BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12,
/** BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_ER,
/** BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV21,
/** BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV21_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_UYVY,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_UYVY_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_VYUY,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_VYUY_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_YUYV,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_YUYV_ER,
/** BT.601 colorspace - YUV 4:2:2 planar. */
NvBufferColorFormat_YVYU,
/** BT.601 colorspace - YUV ER 4:2:2 planar. */
NvBufferColorFormat_YVYU_ER,
/** LegacyRGBA colorspace - BGRA-8-8-8-8 planar. */
NvBufferColorFormat_ABGR32,
/** LegacyRGBA colorspace - XRGB-8-8-8-8 planar. */
NvBufferColorFormat_XRGB32,
/** LegacyRGBA colorspace - ARGB-8-8-8-8 planar. */
NvBufferColorFormat_ARGB32,
/** BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE,
/** BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_709,
/** BT.709_ER colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_709_ER,
/** BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV12_10LE_2020,
/** BT.601 colorspace - Y/CrCb 4:2:0 10-bit multi-planar. */
NvBufferColorFormat_NV21_10LE,
/** BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV12_12LE,
/** BT.2020 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV12_12LE_2020,
/** BT.601 colorspace - Y/CrCb 4:2:0 12-bit multi-planar. */
NvBufferColorFormat_NV21_12LE,
/** BT.709 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420_709,
/** BT.709 colorspace - YUV420 ER multi-planar. */
NvBufferColorFormat_YUV420_709_ER,
/** BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_709,
/** BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_709_ER,
/** BT.2020 colorspace - YUV420 multi-planar. */
NvBufferColorFormat_YUV420_2020,
/** BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
NvBufferColorFormat_NV12_2020,
/** BT.601 colorspace - YUV444 multi-planar. */
NvBufferColorFormat_YUV444,
/** Optical flow */
NvBufferColorFormat_SignedR16G16,
/** Optical flow SAD calculation Buffer format */
NvBufferColorFormat_A32,
/** 8-bit grayscale. */
NvBufferColorFormat_GRAY8,
/** BT.601 colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16,
/** BT.601 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NvBufferColorFormat_NV16_10LE,
/** BT.601 colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24,
/** BT.601 colorspace - Y/CrCb 4:4:4 10-bit multi-planar. */
NvBufferColorFormat_NV24_10LE,
/** BT.601_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_ER,
/** BT.601_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_ER,
/** BT.709 colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_709,
/** BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_709,
/** BT.709_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NvBufferColorFormat_NV16_709_ER,
/** BT.709_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_709_ER,
/** BT.709 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_709,
/** BT.709 ER colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_709_ER,
/** BT.2020 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_10LE_2020,
/** BT.2020 colorspace - Y/CbCr 12 bit 4:4:4 multi-planar. */
NvBufferColorFormat_NV24_12LE_2020,
/** Non-linear RGB BT.709 colorspace - RGBA-10-10-10-2 planar. */
NvBufferColorFormat_RGBA_10_10_10_2_709,
/** Non-linear RGB BT.2020 colorspace - RGBA-10-10-10-2 planar. */
NvBufferColorFormat_RGBA_10_10_10_2_2020,
/** Non-linear RGB BT.709 colorspace - BGRA-10-10-10-2 planar. */
NvBufferColorFormat_BGRA_10_10_10_2_709,
/** Non-linear RGB BT.2020 colorspace - BGRA-10-10-10-2 planar. */
NvBufferColorFormat_BGRA_10_10_10_2_2020,
/** Invalid color format. */
NvBufferColorFormat_Invalid,
} NvBufferColorFormat;
/**
* Defines video flip methods.
*/
typedef enum
{
/** Video flip none. */
NvBufferTransform_None,
/** Video flip rotate 90 degree counter-clockwise. */
NvBufferTransform_Rotate90,
/** Video flip rotate 180 degree. */
NvBufferTransform_Rotate180,
/** Video flip rotate 270 degree counter-clockwise. */
NvBufferTransform_Rotate270,
/** Video flip with respect to X-axis. */
NvBufferTransform_FlipX,
/** Video flip with respect to Y-axis. */
NvBufferTransform_FlipY,
/** Video flip transpose. */
NvBufferTransform_Transpose,
/** Video flip inverse transpode. */
NvBufferTransform_InvTranspose,
} NvBufferTransform_Flip;
/**
* Defines transform video filter types.
*/
typedef enum
{
/** transform filter nearest. */
NvBufferTransform_Filter_Nearest,
/** transform filter bilinear. */
NvBufferTransform_Filter_Bilinear,
/** transform filter 5 tap. */
NvBufferTransform_Filter_5_Tap,
/** transform filter 10 tap. */
NvBufferTransform_Filter_10_Tap,
/** transform filter smart. */
NvBufferTransform_Filter_Smart,
/** transform filter nicest. */
NvBufferTransform_Filter_Nicest,
} NvBufferTransform_Filter;
/**
* Defines flags to indicate for valid transform.
*/
typedef enum {
/** transform flag to crop source rectangle. */
NVBUFFER_TRANSFORM_CROP_SRC = 1,
/** transform flag to crop destination rectangle. */
NVBUFFER_TRANSFORM_CROP_DST = 1 << 1,
/** transform flag to set filter type. */
NVBUFFER_TRANSFORM_FILTER = 1 << 2,
/** transform flag to set flip method. */
NVBUFFER_TRANSFORM_FLIP = 1 << 3,
} NvBufferTransform_Flag;
/**
* Defines flags that specify valid composition/blending operations.
*/
typedef enum {
/** flag to set for composition. */
NVBUFFER_COMPOSITE = 1,
/** flag to set for blending. */
NVBUFFER_BLEND = 1 << 1,
/** composition flag to set filter type. */
NVBUFFER_COMPOSITE_FILTER = 1 << 2,
} NvBufferComposite_Flag;
/**
* Holds parameters for buffer sync point object.
* sync object params is simply a data structure containing [sync point ID,value] pair.
* This can be used by clients to describe an event that might want to wait for.
*/
typedef struct _NvBufferSyncObjParams
{
uint32_t syncpointID;
uint32_t value;
}NvBufferSyncObjParams;
/**
* buffer sync point object.
*/
typedef struct _NvBufferSyncObjRec
{
NvBufferSyncObjParams insyncobj[NVBUF_MAX_SYNCOBJ_PARAMS];
uint32_t num_insyncobj;
NvBufferSyncObjParams outsyncobj;
uint32_t use_outsyncobj;
}NvBufferSyncObj;
/**
* Holds composition background r,g,b colors.
*/
typedef struct
{
/** background color value for r. */
float r;
/** background color value for g. */
float g;
/** background color value for b. */
float b;
}NvBufferCompositeBackground;
/**
* Holds coordinates for a rectangle.
*/
typedef struct
{
/** rectangle top. */
uint32_t top;
/** rectangle left. */
uint32_t left;
/** rectangle width. */
uint32_t width;
/** rectangle height. */
uint32_t height;
}NvBufferRect;
/**
* Holds an opaque NvBuffer session type required for parallel buffer
* tranformations and compositions. Operations using a single session are
* scheduled sequentially, after the previous operation finishes. Operations for
* multiple sessions are scheduled in parallel.
*/
typedef struct _NvBufferSession * NvBufferSession;
/**
* Holds Chroma Subsampling parameters.
*/
typedef struct _NvBufferChromaSubSamplingParams
{
/** location settings */
uint8_t chromaLocHoriz;
uint8_t chromaLocVert;
}NvBufferChromaSubsamplingParams;
#define NVBUF_CHROMA_SUBSAMPLING_PARAMS_DEFAULT \
{ \
NVBUF_CHROMA_SUBSAMPLING_HORIZ_DEFAULT, \
NVBUF_CHROMA_SUBSAMPLING_VERT_DEFAULT \
}
/**
* Holds the input parameters for hardware buffer creation.
*/
typedef struct _NvBufferCreateParams
{
/** width of the buffer. */
int32_t width;
/** height of the buffer. */
int32_t height;
/** payload type of the buffer. */
NvBufferPayloadType payloadType;
/** size of the memory.(Applicale for NvBufferPayload_MemHandle) */
int32_t memsize;
/** layout of the buffer. */
NvBufferLayout layout;
/** colorformat of the buffer. */
NvBufferColorFormat colorFormat;
/** tag to associate with the buffer. */
NvBufferTag nvbuf_tag;
}NvBufferCreateParams;
/**
* Holds parameters for a hardware buffer.
*/
typedef struct _NvBufferParams
{
/** Holds the DMABUF FD of the hardware buffer. */
uint32_t dmabuf_fd;
/** pointer to hardware buffer memory. */
void *nv_buffer;
/** payload type of the buffer. */
NvBufferPayloadType payloadType;
/** size of the memory.(Applicale for NvBufferPayload_MemHandle) */
int32_t memsize;
/** size of hardware buffer. */
uint32_t nv_buffer_size;
/** video format type of hardware buffer. */
NvBufferColorFormat pixel_format;
/** number of planes of hardware buffer. */
uint32_t num_planes;
/** width of each planes of hardware buffer. */
uint32_t width[MAX_NUM_PLANES];
/** height of each planes of hardware buffer. */
uint32_t height[MAX_NUM_PLANES];
/** pitch of each planes of hardware buffer. */
uint32_t pitch[MAX_NUM_PLANES];
/** memory offset values of each video planes of hardware buffer. */
uint32_t offset[MAX_NUM_PLANES];
/** size of each vodeo planes of hardware buffer. */
uint32_t psize[MAX_NUM_PLANES];
/** layout type of each planes of hardware buffer. */
uint32_t layout[MAX_NUM_PLANES];
}NvBufferParams;
/**
* Holds extended parameters for a hardware buffer.
*/
typedef struct _NvBufferParamsEx
{
/** nvbuffer basic parameters. */
NvBufferParams params;
/** offset in bytes from the start of the buffer to the first valid byte.
(Applicale for NvBufferPayload_MemHandle) */
int32_t startofvaliddata;
/** size of the valid data from the first to the last valid byte.
(Applicale for NvBufferPayload_MemHandle) */
int32_t sizeofvaliddatainbytes;
/** display scan format - progressive/interlaced. */
NvBufferDisplayScanFormat scanformat[MAX_NUM_PLANES];
/** offset of the second field for interlaced buffer. */
uint32_t secondfieldoffset[MAX_NUM_PLANES];
/** block height of the planes for blockLinear layout hardware buffer. */
uint32_t blockheightlog2[MAX_NUM_PLANES];
/** physical address of allocated planes. */
uint32_t physicaladdress[MAX_NUM_PLANES];
/** flags associated with planes */
uint64_t flags[MAX_NUM_PLANES];
/** metadata associated with the hardware buffer. */
void *payloadmetaInfo;
/** chroma subsampling parameters */
NvBufferChromaSubsamplingParams chromaSubsampling;
/** get buffer vpr information. */
bool is_protected;
/** buffer sync point object parameters */
NvBufferSyncObj syncobj;
/** reserved field. */
void *reserved;
}NvBufferParamsEx;
/**
* Holds parameters related to compositing/blending.
*/
typedef struct _NvBufferCompositeParams
{
/** flag to indicate which of the composition/blending parameters are valid. */
uint32_t composite_flag;
/** number of the input buffers to be composited. */
uint32_t input_buf_count;
/** filters to use for composition. */
NvBufferTransform_Filter composite_filter[MAX_COMPOSITE_FRAME];
/** alpha values of input buffers for the blending. */
float dst_comp_rect_alpha[MAX_COMPOSITE_FRAME];
/** source rectangle coordinates of input buffers for composition. */
NvBufferRect src_comp_rect[MAX_COMPOSITE_FRAME];
/** destination rectangle coordinates of input buffers for composition. */
NvBufferRect dst_comp_rect[MAX_COMPOSITE_FRAME];
/** background color values for composition. */
NvBufferCompositeBackground composite_bgcolor;
/** NvBufferSession to be used for composition. If NULL, the default session
* is used. */
NvBufferSession session;
}NvBufferCompositeParams;
/**
* Holds parameters for buffer transform functions.
*/
typedef struct _NvBufferTransformParams
{
/** flag to indicate which of the transform parameters are valid. */
uint32_t transform_flag;
/** flip method. */
NvBufferTransform_Flip transform_flip;
/** transform filter. */
NvBufferTransform_Filter transform_filter;
/** source rectangle coordinates for crop opeartion. */
NvBufferRect src_rect;
/** destination rectangle coordinates for crop opeartion. */
NvBufferRect dst_rect;
/** NvBufferSession to be used for transform. If NULL, the default session
* is used. */
NvBufferSession session;
}NvBufferTransformParams;
/**
* This method can be used to wait on sync point ID.
*
* @param[in] syncobj_params sync point object parameters.
* @param[in] timeout sync point wait timeout value.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferSyncObjWait (NvBufferSyncObjParams *syncobj_params, unsigned int timeout);
/**
* This method can be used to get hardware Buffer struct size.
*
* @returns hardware Buffer struct size.
*/
int NvBufferGetSize (void);
/**
* Creates an instance of EGLImage from a DMABUF FD.
*
* @param[in] display An EGLDisplay object used during the creation
* of the EGLImage. If NULL, nvbuf_utils() uses
* its own instance of EGLDisplay.
* @param[in] dmabuf_fd DMABUF FD of the buffer from which the EGLImage
* is to be created.
*
* @returns `EGLImageKHR` for success, `NULL` for failure
*/
EGLImageKHR NvEGLImageFromFd (EGLDisplay display, int dmabuf_fd);
/**
* Destroys an EGLImage object.
*
* @param[in] display An EGLDisplay object used to destroy the EGLImage.
* If NULL, nvbuf_utils() uses its own instance of
* EGLDisplay.
* @param[in] eglImage The EGLImageKHR object to be destroyed.
*
* @returns 0 for success, -1 for failure
*/
int NvDestroyEGLImage (EGLDisplay display, EGLImageKHR eglImage);
/**
* Allocates a hardware buffer (deprecated).
*
* @deprecated Use NvBufferCreateEx() instead.
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] width Buffer width, in bytes.
* @param[in] height Buffer height, in bytes.
* @param[in] layout Layout of the buffer.
* @param[in] colorFormat Color format of the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufferCreate (int *dmabuf_fd, int width, int height,
NvBufferLayout layout, NvBufferColorFormat colorFormat);
/**
* Allocates a hardware buffer.
*
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateEx (int *dmabuf_fd, NvBufferCreateParams *input_params);
/**
* Allocates a hardware buffer for interlace scan format.
*
* @param[out] dmabuf_fd Returns the DMABUF FD of the hardware buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateInterlace (int *dmabuf_fd, NvBufferCreateParams *input_params);
/**
* Allocates a hardware buffer with a given chroma subsampling location.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] input_params Input parameters for hardware buffer creation.
* @param[in] chromaSubsampling Chroma location parameters.
*
* @returns 0 for success, -1 for failure
*/
int NvBufferCreateWithChromaLoc (int *dmabuf_fd, NvBufferCreateParams *input_params, NvBufferChromaSubsamplingParams *chromaSubsampling);
/**
* Gets buffer parameters.
* @param[in] dmabuf_fd `DMABUF FD` of buffer.
* @param[out] params A pointer to the structure to fill with parameters.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferGetParams (int dmabuf_fd, NvBufferParams *params);
/**
* Gets buffer extended parameters.
* @param[in] dmabuf_fd `DMABUF FD` of buffer.
* @param[out] exparams A pointer to the structure to fill with extended parameters.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferGetParamsEx (int dmabuf_fd, NvBufferParamsEx *exparams);
/**
* Destroys a hardware buffer.
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` `hw_buffer` to destroy.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferDestroy (int dmabuf_fd);
/**
* Extracts the `dmabuf_fd` from the hardware buffer.
* @param[in] nvbuf Specifies the `hw_buffer`.
* @param[out] dmabuf_fd Returns DMABUF FD of `hw_buffer`.
*
* @returns 0 for success, -1 for failure.
*/
int ExtractFdFromNvBuffer (void *nvbuf, int *dmabuf_fd);
/**
* Releases the `dmabuf_fd` buffer.
* @see ExtractfdFromNvBuffer()
* @param[in] dmabuf_fd Specifies the `dmabuf_fd` to release.
*
* @returns 0 for success, -1 for failure.
*/
int NvReleaseFd (int dmabuf_fd);
/**
* Syncs the hardware memory cache for the CPU.
*
* \sa NvBufferMemMap for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForCpu (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the CPU, API to be used for another process.
*
* \sa NvBufferMemMapEx for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForCpuEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the device.
*
* \sa NvBufferMemMap for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForDevice (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Syncs the hardware memory cache for the device, API to be used for another process.
*
* \sa NvBufferMemMapEx for the purpose of the function
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.
* @param[in] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemSyncForDeviceEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Gets the memory-mapped virtual address of the plane.
*
* The client must call NvBufferMemSyncForCpu() with the virtual address returned
* by this function before accessing the mapped memory in CPU.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and hardware device as
* follows:
* - CPU: If the CPU modifies any mapped memory, the client must call
* NvBufferMemSyncForDevice() before any hardware device accesses the memory.
* - Hardware device: If the mapped memory is modified by any hardware device,
* the client must call NvBufferMemSyncForCpu() before CPU accesses the memory.
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] plane video frame plane.(Applies to @ref NvBufferPayload_SurfArray.)
* @param[in] memflag NvBuffer memory flag.
* @param[out] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemMap (int dmabuf_fd, unsigned int plane, NvBufferMemFlags memflag, void **pVirtAddr);
/**
* Gets the memory-mapped virtual address of the plane, API to be used for another process.
*
* The client must call NvBufferMemSyncForCpuEx() with the virtual address returned
* by this function before accessing the mapped memory in CPU in another process.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and hardware device as
* follows:
* - CPU: If the CPU modifies any mapped memory, the client must call
* NvBufferMemSyncForDeviceEx() before any hardware device accesses the memory.
* - Hardware device: If the mapped memory is modified by any hardware device,
* the client must call NvBufferMemSyncForCpuEx() before CPU accesses the memory.
*
* @param[in] dmabuf_fd DMABUF FD of buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane video frame plane.(Applies to @ref NvBufferPayload_SurfArray.)
* @param[in] memflag NvBuffer memory flag.
* @param[out] pVirtAddr Virtual Address pointer of the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemMapEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, NvBufferMemFlags memflag, void **pVirtAddr);
/**
* Unmaps the mapped virtual address of the plane.
*
* If the following conditions are both true, the client must call
* NvBufferMemSyncForDevice() before unmapping the memory:
* - Mapped memory was modified by the CPU.
* - Mapped memory will be accessed by a hardware device.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] plane Video frame plane. Applies to
* @ref NvBufferPayload_SurfArray.
* @param[in] pVirtAddr Virtual address pointer to the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemUnMap (int dmabuf_fd, unsigned int plane, void **pVirtAddr);
/**
* Unmaps the mapped virtual address of the plane, API to be used for another process.
*
* If the following conditions are both true, the client must call
* NvBufferMemSyncForDeviceEx() before unmapping the memory in another process:
* - Mapped memory was modified by the CPU.
* - Mapped memory will be accessed by a hardware device.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[in] exparams extended parameters for a hardware buffer.
* @param[in] plane Video frame plane. Applies to
* @ref NvBufferPayload_SurfArray.
* @param[in] pVirtAddr Virtual address pointer to the memory-mapped plane.
*
* @returns 0 for success, -1 for failure.
*/
int NvBufferMemUnMapEx (int dmabuf_fd, NvBufferParamsEx *exparams, unsigned int plane, void **pVirtAddr);
/**
* Copies the NvBuffer plane contents to a raw buffer plane.
* @param[in] dmabuf_fd DMABUF FD of NvBuffer.
* @param[in] plane video frame plane.
* @param[in] out_width aligned width of the raw data plane.
* @param[in] out_height aligned height of the raw data plane.
* @param[in] ptr pointer to the output raw plane data.
*
* @returns 0 for success, -1 for failure.
*/
int NvBuffer2Raw (int dmabuf_fd, unsigned int plane, unsigned int out_width, unsigned int out_height, unsigned char *ptr);
/**
* Copies raw buffer plane contents to an NvBuffer plane.
* @param[in] ptr pointer to the input raw plane data.
* @param[in] plane video frame plane.
* @param[in] in_width aligned width of the raw data plane.
* @param[in] in_height aligned height of the raw data plane.
* @param[in] dmabuf_fd DMABUF FD of NvBuffer.
*
* @returns 0 for success, -1 for failure.
*/
int Raw2NvBuffer (unsigned char *ptr, unsigned int plane, unsigned int in_width, unsigned int in_height, int dmabuf_fd);
/**
* Creates a new NvBufferSession for parallel scheduling of
* buffer transformations and compositions.
*
* @returns A session pointer, NULL for failure.
*/
NvBufferSession NvBufferSessionCreate(void);
/**
* Destroys an existing \ref NvBufferSession.
* @param[in] session An existing NvBufferSession.
*/
void NvBufferSessionDestroy(NvBufferSession session);
/**
* Transforms one DMA buffer to another DMA buffer.
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] transform_params transform parameters
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransform (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params);
/**
* Transforms one DMA buffer to another DMA buffer, API to be used for another process.
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] input_params extended input parameters for a hardware buffer.
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] output_params extended output parameters for a hardware buffer.
* @param[in] transform_params transform parameters
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransformEx (int src_dmabuf_fd, NvBufferParamsEx *input_params, int dst_dmabuf_fd, NvBufferParamsEx *output_params, NvBufferTransformParams *transform_params);
/**
* Transforms one DMA buffer to another DMA buffer asyncroniously (non-blocking).
* This function can support transforms for copying, scaling, fliping, rotating, and cropping.
* @param[in] src_dmabuf_fd DMABUF FD of source buffer
* @param[in] dst_dmabuf_fd DMABUF FD of destination buffer
* @param[in] transform_params transform parameters
* @param[in] syncobj nvbuffer sync point object
*
* @return 0 for sucess, -1 for failure.
*/
int NvBufferTransformAsync (int src_dmabuf_fd, int dst_dmabuf_fd, NvBufferTransformParams *transform_params, NvBufferSyncObj *syncobj);
/**
* \brief Composites multiple input DMA buffers to one output DMA buffer.
*
* This function can composite multiple input frames to one output.
*
* @param[in] src_dmabuf_fds An array of DMABUF FDs of source buffers.
* These buffers are composited together. Output
* is copied to the output buffer referenced by
* @a dst_dmabuf_fd.
* @param[in] dst_dmabuf_fd DMABUF FD of the compositing destination buffer.
* @param[in] composite_params Compositing parameters.
*/
int NvBufferComposite (int *src_dmabuf_fds, int dst_dmabuf_fd, NvBufferCompositeParams *composite_params);
#ifdef __cplusplus
}
#endif
/** @} */
#endif

825
nvbufsurface.h Normal file
View File

@@ -0,0 +1,825 @@
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file nvbufsurface.h
* <b>NvBufSurface Interface </b>
*
* This file specifies the NvBufSurface management API.
*
* The NvBufSurface API provides methods to allocate / deallocate, map / unmap
* and copy batched buffers.
*/
/**
* @defgroup ds_nvbuf_api Buffer Management API module
*
* This section describes types and functions of NvBufSurface application
* programming interface.
*
*/
#ifndef NVBUFSURFACE_H_
#define NVBUFSURFACE_H_
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/** @defgroup ds_aaa NvBufSurface Types and Functions
* Defines types and functions of \ref NvBufSurface application
* programming interface.
* @ingroup ds_nvbuf_api
* @{ */
/** Defines the default padding length for reserved fields of structures. */
#define STRUCTURE_PADDING 4
/** Defines the maximum number of planes. */
#define NVBUF_MAX_PLANES 4
/**
* Defines the default values for chroma subsampling.
* The default value matches JPEG/MPEG use cases.
*/
#define NVBUFSURFACE_CHROMA_SUBSAMPLING_HORIZ_DEFAULT 0
#define NVBUFSURFACE_CHROMA_SUBSAMPLING_VERT_DEFAULT 1
#define NVBUFSURFACE_CHROMA_SUBSAMPLING_PARAMS_DEFAULT \
{ \
NVBUFSURFACE_CHROMA_SUBSAMPLING_HORIZ_DEFAULT, \
NVBUFSURFACE_CHROMA_SUBSAMPLING_VERT_DEFAULT \
}
/**
* Defines mapping types of NvBufSurface.
*/
typedef enum
{
NVBUF_MAP_READ, /**< Specifies \ref NvBufSurface mapping type "read." */
NVBUF_MAP_WRITE, /**< Specifies \ref NvBufSurface mapping type
"write." */
NVBUF_MAP_READ_WRITE, /**< Specifies \ref NvBufSurface mapping type
"read/write." */
} NvBufSurfaceMemMapFlags;
/**
* Defines tags that identify the components requesting a memory allocation.
* The tags can be used later to identify the total memory allocated to
* particular types of components.
* TODO: Check if DeepStream require more tags to be defined.
*/
typedef enum
{
/** tag None. */
NvBufSurfaceTag_NONE = 0x0,
/** tag for Camera. */
NvBufSurfaceTag_CAMERA = 0x200,
/** tag for Jpeg Encoder/Decoder. */
NvBufSurfaceTag_JPEG = 0x1500,
/** tag for VPR Buffers. */
NvBufSurfaceTag_PROTECTED = 0x1504,
/** tag for H264/H265 Video Encoder. */
NvBufSurfaceTag_VIDEO_ENC = 0x1200,
/** tag for H264/H265/VP9 Video Decoder. */
NvBufSurfaceTag_VIDEO_DEC = 0x1400,
/** tag for Video Transform/Composite/Blend. */
NvBufSurfaceTag_VIDEO_CONVERT = 0xf01,
} NvBufSurfaceTag;
/**
* Defines color formats for NvBufSurface.
*/
typedef enum
{
/** Specifies an invalid color format. */
NVBUF_COLOR_FORMAT_INVALID,
/** Specifies 8 bit GRAY scale - single plane */
NVBUF_COLOR_FORMAT_GRAY8,
/** Specifies BT.601 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420,
/** Specifies BT.601 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YVU420,
/** Specifies BT.601 colorspace - YUV420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_ER,
/** Specifies BT.601 colorspace - YVU420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YVU420_ER,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_ER,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV21,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV21_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_UYVY,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_UYVY_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_VYUY,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_VYUY_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YUYV,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YUYV_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YVYU,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YVYU_ER,
/** Specifies BT.601 colorspace - YUV444 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444,
/** Specifies RGBA-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGBA,
/** Specifies BGRA-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGRA,
/** Specifies ARGB-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_ARGB,
/** Specifies ABGR-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_ABGR,
/** Specifies RGBx-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGBx,
/** Specifies BGRx-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGRx,
/** Specifies xRGB-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_xRGB,
/** Specifies xBGR-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_xBGR,
/** Specifies RGB-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGB,
/** Specifies BGR-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGR,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE,
/** Specifies BT.709 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_709,
/** Specifies BT.709 colorspace - YUV420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_709_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_709_ER,
/** Specifies BT.2020 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_2020,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_2020,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_2020,
/** Specifies color format for packed 2 signed shorts */
NVBUF_COLOR_FORMAT_SIGNED_R16G16,
/** Specifies RGB- unsigned 8 bit multiplanar plane. */
NVBUF_COLOR_FORMAT_R8_G8_B8,
/** Specifies BGR- unsigned 8 bit multiplanar plane. */
NVBUF_COLOR_FORMAT_B8_G8_R8,
/** Specifies RGB-32bit Floating point multiplanar plane. */
NVBUF_COLOR_FORMAT_R32F_G32F_B32F,
/** Specifies BGR-32bit Floating point multiplanar plane. */
NVBUF_COLOR_FORMAT_B32F_G32F_R32F,
/** Specifies BT.601 colorspace - YUV422 multi-planar. */
NVBUF_COLOR_FORMAT_YUV422,
/** Specifies BT.601 colorspace - Y/CrCb 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV21_10LE,
/** Specifies BT.601 colorspace - Y/CrCb 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV21_12LE,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE_2020,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:2 multi-planar. */
NVBUF_COLOR_FORMAT_NV16,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:2 10-bit semi-planar. */
NVBUF_COLOR_FORMAT_NV16_10LE,
/** Specifies BT.601 colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24,
/** Specifies BT.601 colorspace - Y/CrCb 4:4:4 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV24_10LE,
/** Specifies BT.601_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NVBUF_COLOR_FORMAT_NV16_ER,
/** Specifies BT.601_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:2 multi-planar. */
NVBUF_COLOR_FORMAT_NV16_709,
/** Specifies BT.709 colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_709,
/** Specifies BT.709_ER colorspace - Y/CbCr 4:2:2 multi-planar. */
NVBUF_COLOR_FORMAT_NV16_709_ER,
/** Specifies BT.709_ER colorspace - Y/CbCr 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_709_ER,
/** Specifies BT.709 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_10LE_709,
/** Specifies BT.709 ER colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 10 bit 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_10LE_2020,
/** Specifies BT.2020 colorspace - Y/CbCr 12 bit 4:4:4 multi-planar. */
NVBUF_COLOR_FORMAT_NV24_12LE_2020,
/** Specifies Non-linear RGB BT.709 colorspace - RGBA-10-10-10-2 planar. */
NVBUF_COLOR_FORMAT_RGBA_10_10_10_2_709,
/** Specifies Non-linear RGB BT.2020 colorspace - RGBA-10-10-10-2 planar. */
NVBUF_COLOR_FORMAT_RGBA_10_10_10_2_2020,
/** Specifies Non-linear RGB BT.709 colorspace - BGRA-10-10-10-2 planar. */
NVBUF_COLOR_FORMAT_BGRA_10_10_10_2_709,
/** Specifies Non-linear RGB BT.2020 colorspace - BGRA-10-10-10-2 planar. */
NVBUF_COLOR_FORMAT_BGRA_10_10_10_2_2020,
/** Specifies Optical flow SAD calculation Buffer format */
NVBUF_COLOR_FORMAT_A32,
/** Specifies BT.601 colorspace - 10 bit YUV 4:2:2 interleaved. */
NVBUF_COLOR_FORMAT_UYVP,
/** Specifies BT.601 colorspace - 10 bit YUV ER 4:2:2 interleaved. */
NVBUF_COLOR_FORMAT_UYVP_ER,
NVBUF_COLOR_FORMAT_LAST
} NvBufSurfaceColorFormat;
/**
* Specifies layout formats for \ref NvBufSurface video planes.
*/
typedef enum
{
/** Specifies pitch layout. */
NVBUF_LAYOUT_PITCH,
/** Specifies block linear layout. */
NVBUF_LAYOUT_BLOCK_LINEAR,
} NvBufSurfaceLayout;
/**
* Specifies memory types for \ref NvBufSurface.
*/
typedef enum
{
/** Specifies the default memory type, i.e. \ref NVBUF_MEM_CUDA_DEVICE
for dGPU, \ref NVBUF_MEM_SURFACE_ARRAY for Jetson. Use \ref NVBUF_MEM_DEFAULT
to allocate whichever type of memory is appropriate for the platform. */
NVBUF_MEM_DEFAULT,
/** Specifies CUDA Host memory type. */
NVBUF_MEM_CUDA_PINNED,
/** Specifies CUDA Device memory type. */
NVBUF_MEM_CUDA_DEVICE,
/** Specifies CUDA Unified memory type. */
NVBUF_MEM_CUDA_UNIFIED,
/** Specifies NVRM Surface Array type. Valid only for Jetson. */
NVBUF_MEM_SURFACE_ARRAY,
/** Specifies NVRM Handle type. Valid only for Jetson. */
NVBUF_MEM_HANDLE,
/** Specifies memory allocated by malloc(). */
NVBUF_MEM_SYSTEM,
} NvBufSurfaceMemType;
/**
* Defines display scan formats for NvBufSurface video planes.
*/
typedef enum
{
/** Progessive scan formats. */
NVBUF_DISPLAYSCANFORMAT_PROGRESSIVE,
/** Interlaced scan formats. */
NVBUF_DISPLAYSCANFORMAT_INTERLACED,
} NvBufSurfaceDisplayScanFormat;
/**
* Holds plane wise parameters(extended) of a buffer.
*/
typedef struct NvBufSurfacePlaneParamsEx
{
/** display scan format - progressive/interlaced. */
NvBufSurfaceDisplayScanFormat scanformat[NVBUF_MAX_PLANES];
/** offset of the second field for interlaced buffer. */
uint32_t secondfieldoffset[NVBUF_MAX_PLANES];
/** block height of the planes for blockLinear layout buffer. */
uint32_t blockheightlog2[NVBUF_MAX_PLANES];
/** physical address of allocated planes. */
uint32_t physicaladdress[NVBUF_MAX_PLANES];
/** flags associated with planes */
uint64_t flags[NVBUF_MAX_PLANES];
void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
} NvBufSurfacePlaneParamsEx;
/**
* Holds plane wise parameters of a buffer.
*/
typedef struct NvBufSurfacePlaneParams
{
/** Holds the number of planes. */
uint32_t num_planes;
/** Holds the widths of planes. */
uint32_t width[NVBUF_MAX_PLANES];
/** Holds the heights of planes. */
uint32_t height[NVBUF_MAX_PLANES];
/** Holds the pitches of planes in bytes. */
uint32_t pitch[NVBUF_MAX_PLANES];
/** Holds the offsets of planes in bytes. */
uint32_t offset[NVBUF_MAX_PLANES];
/** Holds the sizes of planes in bytes. */
uint32_t psize[NVBUF_MAX_PLANES];
/** Holds the number of bytes occupied by a pixel in each plane. */
uint32_t bytesPerPix[NVBUF_MAX_PLANES];
void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
} NvBufSurfacePlaneParams;
/**
* Holds Chroma Subsampling parameters for NvBufSurface allocation.
*/
typedef struct NvBufSurfaceChromaSubsamplingParams
{
/** location settings */
uint8_t chromaLocHoriz;
uint8_t chromaLocVert;
} NvBufSurfaceChromaSubsamplingParams;
/**
* Holds parameters required to allocate an \ref NvBufSurface.
*/
typedef struct NvBufSurfaceCreateParams {
/** Holds the GPU ID. Valid only for a multi-GPU system. */
uint32_t gpuId;
/** Holds the width of the buffer. */
uint32_t width;
/** Holds the height of the buffer. */
uint32_t height;
/** Holds the amount of memory to be allocated. Optional; if set, all other
parameters (width, height, etc.) are ignored. */
uint32_t size;
/** Holds a "contiguous memory" flag. If set, contiguous memory is allocated
for the batch. Valid only for CUDA memory types. */
bool isContiguous;
/** Holds the color format of the buffer. */
NvBufSurfaceColorFormat colorFormat;
/** Holds the surface layout. May be Block Linear (BL) or Pitch Linear (PL).
For a dGPU, only PL is valid. */
NvBufSurfaceLayout layout;
/** Holds the type of memory to be allocated. */
NvBufSurfaceMemType memType;
} NvBufSurfaceCreateParams;
/**
* Hold extended parameters required to allocate NvBufSurface.
* (Applicable for NvBufSurfaceAllocate API)
*/
typedef struct NvBufSurfaceAllocateParams {
/** Hold legacy NvBufSurface creation parameters */
NvBufSurfaceCreateParams params;
/** Display scan format */
NvBufSurfaceDisplayScanFormat displayscanformat;
/** Chroma Subsampling parameters */
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** components tag to be used for memory allocation */
NvBufSurfaceTag memtag;
/** disable pitch padding allocation only applicable for cuda and system memory allocation
pitch would be width times bytes per pixel for the plane, for odd width it would be
multiple of 2, also note for some non standard video resolution cuda kernels may fail
due to unaligned pitch
*/
bool disablePitchPadding;
/** Used void* from custom param for 64 bit machine, using other uint32_t param */
uint32_t _reservedParam;
void * _reserved[STRUCTURE_PADDING-1];
} NvBufSurfaceAllocateParams;
/**
* Hold the pointers of mapped buffer.
*/
typedef struct NvBufSurfaceMappedAddr {
/** Holds planewise pointers to a CPU mapped buffer. */
void * addr[NVBUF_MAX_PLANES];
/** Holds a pointer to a mapped EGLImage. */
void *eglImage;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceMappedAddr;
/**
* Hold the information(extended) of single buffer in the batch.
*/
typedef struct NvBufSurfaceParamsEx {
/** offset in bytes from the start of the buffer to the first valid byte.
(Applicable for NVBUF_MEM_HANDLE) */
int32_t startofvaliddata;
/** size of the valid data from the first to the last valid byte.
(Applicable for NVBUF_MEM_HANDLE) */
int32_t sizeofvaliddatainbytes;
/** chroma subsampling parameters.
(Applicable for NVBUF_MEM_SURFACE_ARRAY) */
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** get buffer vpr information. */
bool is_protected;
/** plane wise extended info */
NvBufSurfacePlaneParamsEx planeParamsex;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceParamsEx;
/**
* Hold the information of single buffer in the batch.
*/
typedef struct NvBufSurfaceParams {
/** Holds the width of the buffer. */
uint32_t width;
/** Holds the height of the buffer. */
uint32_t height;
/** Holds the pitch of the buffer. */
uint32_t pitch;
/** Holds the color format of the buffer. */
NvBufSurfaceColorFormat colorFormat;
/** Holds BL or PL. For dGPU, only PL is valid. */
NvBufSurfaceLayout layout;
/** Holds a DMABUF FD. Valid only for \ref NVBUF_MEM_SURFACE_ARRAY and
\ref NVBUF_MEM_HANDLE type memory. */
uint64_t bufferDesc;
/** Holds the amount of allocated memory. */
uint32_t dataSize;
/** Holds a pointer to allocated memory. Not valid for
\ref NVBUF_MEM_SURFACE_ARRAY or \ref NVBUF_MEM_HANDLE. */
void * dataPtr;
/** Holds planewise information (width, height, pitch, offset, etc.). */
NvBufSurfacePlaneParams planeParams;
/** Holds pointers to mapped buffers. Initialized to NULL
when the structure is created. */
NvBufSurfaceMappedAddr mappedAddr;
/** pointers of extended parameters of single buffer in the batch.*/
NvBufSurfaceParamsEx *paramex;
void * _reserved[STRUCTURE_PADDING - 1];
} NvBufSurfaceParams;
/**
* Holds information about batched buffers.
*/
typedef struct NvBufSurface {
/** Holds a GPU ID. Valid only for a multi-GPU system. */
uint32_t gpuId;
/** Holds the batch size. */
uint32_t batchSize;
/** Holds the number valid and filled buffers. Initialized to zero when
an instance of the structure is created. */
uint32_t numFilled;
/** Holds an "is contiguous" flag. If set, memory allocated for the batch
is contiguous. */
bool isContiguous;
/** Holds type of memory for buffers in the batch. */
NvBufSurfaceMemType memType;
/** Holds a pointer to an array of batched buffers. */
NvBufSurfaceParams *surfaceList;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurface;
/**
* Holds plane parameters to map the buffer received from another process.
*/
typedef struct NvBufSurfaceMapPlaneParams
{
/** Holds the widths of planes */
uint32_t width;
/** Holds the heights of planes */
uint32_t height;
/** Holds the pitches of planes in bytes */
uint32_t pitch;
/** Holds the offsets of planes in bytes */
uint32_t offset;
/** Holds the sizes of planes in bytes */
uint32_t psize;
/** Holds offset of the second field for interlaced buffer */
uint32_t secondfieldoffset;
/** Holds block height of the planes for blockLinear layout buffer */
uint32_t blockheightlog2;
/** Holds flags associated with the planes */
uint64_t flags;
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceMapPlaneParams;
/**
* Holds buffer parameters to map the buffer received from another process.
*/
typedef struct NvBufSurfaceMapParams {
/** Holds the number of planes. */
uint32_t num_planes;
/** Holds a GPU ID */
uint32_t gpuId;
/** Holds a DMABUF FD */
uint64_t fd;
/** Holds the total size of allocated memory */
uint32_t totalSize;
/** Holds type of memory */
NvBufSurfaceMemType memType;
/** Holds BL or PL layout */
NvBufSurfaceLayout layout;
/** Holds display scan format */
NvBufSurfaceDisplayScanFormat scanformat;
/** Holds the color format */
NvBufSurfaceColorFormat colorFormat;
/** Holds chroma subsampling parameters */
NvBufSurfaceChromaSubsamplingParams chromaSubsampling;
/** Holds plane parameters */
NvBufSurfaceMapPlaneParams planes[NVBUF_MAX_PLANES];
/** Reserved */
uint8_t reserved[64];
} NvBufSurfaceMapParams;
/**
* \brief Allocates a batch of buffers.
*
* Allocates memory for \a batchSize buffers and returns a pointer to an
* allocated \ref NvBufSurface. The \a params structure must have
* the allocation parameters of a single buffer. If \a params.size
* is set, a buffer of that size is allocated, and all other
* parameters (width, height, color format, etc.) are ignored.
*
* Call NvBufSurfaceDestroy() to free resources allocated by this function.
*
* @param[out] surf An indirect pointer to the allocated batched
* buffers.
* @param[in] batchSize Batch size of buffers.
* @param[in] params A pointer to an \ref NvBufSurfaceCreateParams
* structure.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceCreate (NvBufSurface **surf, uint32_t batchSize,
NvBufSurfaceCreateParams *params);
/**
* \brief Allocate batch of buffers. (Using extended buffer allocation parameters)
*
* Allocates memory for batchSize buffers and returns in *surf a pointer to allocated NvBufSurface.
* params structure should have allocation parameters of single buffer. If size field in
* params is set, buffer of that size will be allocated and all other
* parameters (w, h, color format etc.) will be ignored.
*
* Use NvBufSurfaceDestroy to free all the resources.
*
* @param[out] surf pointer to allocated batched buffers.
* @param[in] batchSize batch size of buffers.
* @param[in] paramsext pointer to NvBufSurfaceAllocateParams structure.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceAllocate (NvBufSurface **surf, uint32_t batchSize,
NvBufSurfaceAllocateParams *paramsext);
/**
* Free the batched buffers previously allocated through NvBufSurfaceCreate.
*
* @param[in] surf A pointer to an \ref NvBufSurface to be freed.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceDestroy (NvBufSurface *surf);
/**
* \brief Maps hardware batched buffers to the HOST or CPU address space.
*
* Valid for \ref NVBUF_MEM_CUDA_UNIFIED type memory for dGPU and
* \ref NVBUF_MEM_SURFACE_ARRAY and \ref NVBUF_MEM_HANDLE type memory for
* Jetson.
*
* This function fills an array of pointers at
* \a surf->surfaceList->mappedAddr->addr.
* \a surf is a pointer to an \ref NvBufSurface.
* \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a addr is declared as an array of pointers to void, and holds pointers
* to the buffers.
*
* The client must call NvBufSurfaceSyncForCpu() with the virtual address
* populated by this function before accessing mapped memory in the CPU.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and the hardware device as
* follows:
* - CPU: If the CPU modifies mapped memory, the client must call
* NvBufSurfaceSyncForDevice() before any hardware device accesses the memory.
* - Hardware device: If a hardware device modifies mapped memory, the client
* must call NvBufSurfaceSyncForCpu() before the CPU accesses the memory.
*
* Use NvBufSurfaceUnMap() to unmap buffer(s) and release any resource.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores pointers to the buffers in a descendant of this
* structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in buffer. -1 refers to all planes
* in the buffer.
* @param[in] type A flag for mapping type.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceMap (NvBufSurface *surf, int index, int plane, NvBufSurfaceMemMapFlags type);
/**
* \brief Unmaps previously mapped buffer(s).
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 indicates
* all buffers in the batch.
* @param[in] plane Index of a plane in the buffer. -1 indicates
* all planes in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMap (NvBufSurface *surf, int index, int plane);
/**
* \brief Copies the content of source batched buffer(s) to destination
* batched buffer(s).
*
* You can use this function to copy source buffer(s) of one memory type
* to destination buffer(s) of another memory type,
* e.g. CUDA host to CUDA device, malloc'ed memory to CUDA device, etc.
*
* The source and destination \ref NvBufSurface objects must have same
* buffer and batch size.
*
* @param[in] srcSurf A pointer to the source NvBufSurface structure.
* @param[in] dstSurf A pointer to the destination NvBufSurface structure.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
/**
* \brief Copies the NvBufSurface plane memory content to a raw buffer plane for a specific
* batched buffer.
*
* This function can be used to copy plane memory content from source raw buffer pointer
* to specific destination batch buffer of supported memory type.
*
* @param[in] Surf pointer to NvBufSurface structure.
* @param[in] index index of buffer in the batch.
* @param[in] plane index of plane in buffer.
* @param[in] out_width aligned width of the raw data plane.
* @param[in] out_height aligned height of the raw data plane.
* @param[in] ptr pointer to the output raw plane data.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurface2Raw (NvBufSurface *Surf, unsigned int index, unsigned int plane, unsigned int out_width, unsigned int out_height, unsigned char *ptr);
/**
* \brief Copies the raw buffer plane memory content to the NvBufSurface plane memory of a specific
* batched buffer.
*
* This function can be used to copy plane memory content from batch buffer
* to specific destination raw buffer pointer.
*
* @param[in] ptr pointer to the input raw plane data.
* @param[in] index index of buffer in the batch.
* @param[in] plane index of plane in buffer.
* @param[in] in_width aligned width of the raw data plane.
* @param[in] in_height aligned height of the raw data plane.
* @param[in] Surf pointer to NvBufSurface structure.
*
* @return 0 for success, -1 for failure.
*/
int Raw2NvBufSurface (unsigned char *ptr, unsigned int index, unsigned int plane, unsigned int in_width, unsigned int in_height, NvBufSurface *Surf);
/**
* Syncs the HW memory cache for the CPU.
*
* Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
* \ref NVBUF_MEM_HANDLE.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of the buffer in the batch. -1 refers to
* all buffers in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceSyncForCpu (NvBufSurface *surf, int index, int plane);
/**
* \brief Syncs the hardware memory cache for the device.
*
* Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
* \ref NVBUF_MEM_HANDLE.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceSyncForDevice (NvBufSurface *surf, int index, int plane);
/**
* \brief Gets the \ref NvBufSurface from the DMABUF FD.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[out] buffer A pointer to the NvBufSurface.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceFromFd (int dmabuf_fd, void **buffer);
/**
* \brief Fills each byte of the buffer(s) in an \ref NvBufSurface with a
* provided value.
*
* You can also use this function to reset the buffer(s) in the batch.
*
* @param[in] surf A pointer to the NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
* @param[in] value The value to be used as fill.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceMemSet (NvBufSurface *surf, int index, int plane, uint8_t value);
/**
* \brief Creates an EGLImage from the memory of one or more
* \ref NvBufSurface buffers.
*
* Only memory type \ref NVBUF_MEM_SURFACE_ARRAY is supported.
*
* This function returns the created EGLImage by storing its address at
* \a surf->surfaceList->mappedAddr->eglImage. (\a surf is a pointer to
* an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a eglImage is declared as a pointer to void, and holds an
* EGLImageKHR.)
*
* You can use this function in scenarios where a CUDA operation on Jetson
* hardware memory (identified by \ref NVBUF_MEM_SURFACE_ARRAY) is required.
* The EGLImageKHR struct provided by this function can then be registered
* with CUDA for further CUDA operations.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores a pointer to the created EGLImage in
* a descendant of this structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 specifies all buffers
* in the batch.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
/**
* \brief Destroys the previously created EGLImage object(s).
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index The index of a buffer in the batch. -1 specifies all
* buffers in the batch.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
/**
* \brief Import parameters received from another process and create hardware buffer.
*
* Calling process must need to call NvBufferDestroy() to remove reference count for
* hardware buffer handle of the imported DMA buffer.
*
* @param[out] out_nvbuf_surf Pointer to hardware buffer.
* @param[in] in_params Parameters to create hardware buffer.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceImport (NvBufSurface **out_nvbuf_surf, const NvBufSurfaceMapParams *in_params);
/**
* \brief Get buffer information to map the buffer in another process.
*
* @param[in] surf Pointer to NvBufSurface structure.
* @param[in] index Index of a buffer in the batch.
* @param[out] params Pointer to NvBufSurfaceMapParams information of the buffer.
*
* @return 0 for success, -1 for failure.
*/
int NvBufSurfaceGetMapParams (const NvBufSurface *surf, int index, NvBufSurfaceMapParams *params);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NVBUFSURFACE_H_ */

1
push_info.txt Normal file
View File

@@ -0,0 +1 @@
jetson_35.3.1