Files
nvsci_samples/event_sample_app/event_loop_threads.c
svcmobrel-release a3f1b7ea33 Updating prebuilts and/or headers
2eba699906039d6615aae4967f6ea79bfe44a40a - event_sample_app/block_pool.c
f3abb0a884f0647204ad32ff51255c4712e52120 - event_sample_app/Makefile
9ee49033e077ac5c8bf458a04c91dd3dbed9633d - event_sample_app/event_loop.h
b33adce6eb1bbc7af23f6c37b6a635479e18a66a - event_sample_app/block_returnsync.c
a56041c06b6bc1d3812b72b399d7d78dd7895485 - event_sample_app/block_limiter.c
ca34c957759f7a010f0cbbbf9bedc03a2c98092b - event_sample_app/block_c2c.c
8d6d0ec3aa8e374a1d2a5fedc9dd24ff7bbdb731 - event_sample_app/block_multicast.c
a76149a2531899e35843d939f60ad8979d8cf65f - event_sample_app/block_consumer_uc1.c
9da8763e4af4b4b7278507a3ebfe2c68a7a24585 - event_sample_app/util.h
2bf7e1383d6e8913c9b0a6a8bdd48fe63d8098d0 - event_sample_app/block_producer_uc1.c
a54abf82eaa2d888e379ab4596ba68ce264e80b5 - event_sample_app/block_info.h
080a6efe263be076c7046e70e31098c2bbed0f6d - event_sample_app/block_presentsync.c
7dd10e5ea71f0c4a09bbe1f9f148f67a13ee098c - event_sample_app/util.c
bc1a6f9017b28e5707c166a658a35e6b3986fdf4 - event_sample_app/usecase1.h
317f43efc59638bf1eae8303f0c79eafb059241a - event_sample_app/block_ipc.c
40361c8f0b68f7d5207db2466ce08c19c0bf1c90 - event_sample_app/event_loop_service.c
efad113d0107e5d8f90146f3102a7c0ed22f1a35 - event_sample_app/event_loop_threads.c
2908615cebcf36330b9850c57e8745bf324867b2 - event_sample_app/block_queue.c
36ed68eca1a7800cf0d94e763c9fc352ee8cda1e - event_sample_app/block_common.c
675f75d61bd0226625a8eaaf0e503c9e976c8d61 - event_sample_app/main.c
c3b26619dd07d221e953fc5dc29a50dcb95a8b97 - rawstream/Makefile
1fbb82e2281bb2e168c87fd20903bbed898ca160 - rawstream/rawstream_cuda.c
1d96498fe3c922f143f7e50e0a32b099714060ad - rawstream/rawstream_consumer.c
d077dafc9176686f6d081026225325c2a303a60e - rawstream/rawstream_producer.c
54ae655edddda7dcabe22fbf0b27c3f617978851 - rawstream/rawstream.h
d5ffeef3c7ad2af6f6f31385db7917b5ef9a7438 - rawstream/rawstream_ipc_linux.c
81e3d6f8ff5252797a7e9e170b74df6255f54f1b - rawstream/rawstream_main.c

Change-Id: I0f4e671693eb0addfe8d0e6532cc8f240cb6c778
2025-09-19 10:10:49 -07:00

130 lines
4.0 KiB
C

/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
*
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
* property and proprietary rights in and to this material, related
* documentation and any modifications thereto. Any use, reproduction,
* disclosure or distribution of this material and related documentation
* without an express license agreement from NVIDIA CORPORATION or
* its affiliates is strictly prohibited.
*/
/*
* NvSciStream Event Loop Driven Sample App - thread-based event handling
*
* This file implements the option to handle events for each block in
* a separate thread. Each thread just has a loop that waits for a
* block event to occur and handles it appropriately, until the block
* has performed all required actions or receives notification that
* the stream has disconnected.
*
* In practice, only a few block types (producer, consumer, and pool)
* receive any events that need to be handled. So a more streamlined
* application might choose to only monitor them, assuming that the
* other blocks can be left alone until the time comes to tear them
* down.
*
* Note: We use standard pthread functions here because it allows this
* sample to run on all operating systems. QNX has its own thread
* management functions which might be more efficient when using
* this approach.
*/
#include <stdint.h>
#include <stdio.h>
#include <pthread.h>
#include "event_loop.h"
/* Structure to track block info */
typedef struct {
NvSciStreamBlock handle;
void* data;
BlockFunc func;
pthread_t thread;
} BlockEventData;
/* List of blocks */
#define MAX_BLOCKS 100U
static uint32_t numBlocks = 0U;
static BlockEventData blocks[MAX_BLOCKS];
static uint32_t success = 1U;
/* The per-thread loop function for each block */
static void* eventThreadFunc(void* arg)
{
/* Simple loop, waiting for events on the block until the block is done */
BlockEventData* entry = (BlockEventData*)arg;
while (1) {
int32_t rv = entry->func(entry->data, 1);
if (rv < 0) {
success = 0U;
break;
} else if (rv == 2) {
break;
}
}
return NULL;
}
/* Initialize per-thread event handling */
static int32_t eventThreadInit(void)
{
/* No special initialization required for this method */
return 1;
}
/* Register a new block with the event management */
static int32_t eventThreadRegister(
NvSciStreamBlock blockHandle,
void* blockData,
BlockFunc blockFunc)
{
/* Sanity check to make sure we left room for enough blocks */
if (numBlocks >= MAX_BLOCKS) {
printf("Exceeded maximum number of blocks\n");
return 0;
}
/* Grab the next entry in the list for the new block and fill it in */
BlockEventData* entry = &blocks[numBlocks++];
entry->handle = blockHandle;
entry->data = blockData;
entry->func = blockFunc;
/* Spawn a thread */
int32_t rv = pthread_create(&entry->thread,
NULL,
eventThreadFunc,
(void*)entry);
if (rv != 0) {
printf("Failed to spawn thread to monitor block\n");
return 0;
}
return 1;
}
/* Main per-thread event loop */
static int32_t eventThreadLoop(void)
{
/*
* Each block has its own thread loop. This main function just needs
* to wait for all of them to exit, and then return any error. This
* waiting can be done in any order.
*/
for (uint32_t i=0; i<numBlocks; ++i) {
(void)pthread_join(blocks[i].thread, NULL);
}
return success;
}
/* Table of functions for per-thread event handling */
EventFuncs const eventFuncs_Threads = {
.init = eventThreadInit,
.reg = eventThreadRegister,
.regInt = NULL,
.loop = eventThreadLoop
};