mirror of
git://nv-tegra.nvidia.com/tegra/nv-sci-src/nvsci_samples.git
synced 2025-12-22 09:21:21 +03:00
2eba699906039d6615aae4967f6ea79bfe44a40a - event_sample_app/block_pool.c f3abb0a884f0647204ad32ff51255c4712e52120 - event_sample_app/Makefile 9ee49033e077ac5c8bf458a04c91dd3dbed9633d - event_sample_app/event_loop.h b33adce6eb1bbc7af23f6c37b6a635479e18a66a - event_sample_app/block_returnsync.c a56041c06b6bc1d3812b72b399d7d78dd7895485 - event_sample_app/block_limiter.c ca34c957759f7a010f0cbbbf9bedc03a2c98092b - event_sample_app/block_c2c.c 8d6d0ec3aa8e374a1d2a5fedc9dd24ff7bbdb731 - event_sample_app/block_multicast.c a76149a2531899e35843d939f60ad8979d8cf65f - event_sample_app/block_consumer_uc1.c 9da8763e4af4b4b7278507a3ebfe2c68a7a24585 - event_sample_app/util.h 2bf7e1383d6e8913c9b0a6a8bdd48fe63d8098d0 - event_sample_app/block_producer_uc1.c a54abf82eaa2d888e379ab4596ba68ce264e80b5 - event_sample_app/block_info.h 080a6efe263be076c7046e70e31098c2bbed0f6d - event_sample_app/block_presentsync.c 7dd10e5ea71f0c4a09bbe1f9f148f67a13ee098c - event_sample_app/util.c bc1a6f9017b28e5707c166a658a35e6b3986fdf4 - event_sample_app/usecase1.h 317f43efc59638bf1eae8303f0c79eafb059241a - event_sample_app/block_ipc.c 40361c8f0b68f7d5207db2466ce08c19c0bf1c90 - event_sample_app/event_loop_service.c efad113d0107e5d8f90146f3102a7c0ed22f1a35 - event_sample_app/event_loop_threads.c 2908615cebcf36330b9850c57e8745bf324867b2 - event_sample_app/block_queue.c 36ed68eca1a7800cf0d94e763c9fc352ee8cda1e - event_sample_app/block_common.c 675f75d61bd0226625a8eaaf0e503c9e976c8d61 - event_sample_app/main.c c3b26619dd07d221e953fc5dc29a50dcb95a8b97 - rawstream/Makefile 1fbb82e2281bb2e168c87fd20903bbed898ca160 - rawstream/rawstream_cuda.c 1d96498fe3c922f143f7e50e0a32b099714060ad - rawstream/rawstream_consumer.c d077dafc9176686f6d081026225325c2a303a60e - rawstream/rawstream_producer.c 54ae655edddda7dcabe22fbf0b27c3f617978851 - rawstream/rawstream.h d5ffeef3c7ad2af6f6f31385db7917b5ef9a7438 - rawstream/rawstream_ipc_linux.c 81e3d6f8ff5252797a7e9e170b74df6255f54f1b - rawstream/rawstream_main.c Change-Id: I0f4e671693eb0addfe8d0e6532cc8f240cb6c778
340 lines
12 KiB
C
340 lines
12 KiB
C
/*
|
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
|
*
|
|
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
|
* property and proprietary rights in and to this material, related
|
|
* documentation and any modifications thereto. Any use, reproduction,
|
|
* disclosure or distribution of this material and related documentation
|
|
* without an express license agreement from NVIDIA CORPORATION or
|
|
* its affiliates is strictly prohibited.
|
|
*/
|
|
|
|
/*
|
|
* NvSciStream Event Loop Driven Sample App - service-based event handling
|
|
*
|
|
* This file implements the option to handle events for all blocks
|
|
* through an event service. Each block adds an event notifier to
|
|
* a list. That notifier will be signaled when an event is ready
|
|
* on the block. A single main loop waits for one or more of the
|
|
* notifiers to trigger, processes events on the corresponding
|
|
* blocks, and goes back to waiting. When all blocks have been
|
|
* destroyed either due to failure or all payloads being processed,
|
|
* the loop exits and the function returns.
|
|
*/
|
|
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
#include <stdio.h>
|
|
#include <stdbool.h>
|
|
#if (QNX == 1)
|
|
#include <sys/neutrino.h>
|
|
#endif
|
|
#include "nvscievent.h"
|
|
#include "block_info.h"
|
|
#include "event_loop.h"
|
|
|
|
/* Event service */
|
|
static NvSciEventLoopService* service = NULL;
|
|
|
|
/* List of blocks */
|
|
#define MAX_INTERNAL_NOTIFIERS 10
|
|
#define MAX_NOTIFIERS MAX_BLOCKS + MAX_INTERNAL_NOTIFIERS
|
|
|
|
int32_t numBlocks = 0U;
|
|
uint32_t numAlive = 0U;
|
|
static uint32_t numIntNotifiers = 0U;
|
|
static int32_t numNotifiers = 0U;
|
|
|
|
BlockEventData blocks[MAX_BLOCKS];
|
|
BlockEventData* blocksAlive[MAX_BLOCKS];
|
|
static NvSciEventNotifier* intNotifiers[MAX_INTERNAL_NOTIFIERS];
|
|
|
|
static uint32_t success = 1U;
|
|
|
|
/* Initialize service-based event handling */
|
|
static int32_t eventServiceInit(void)
|
|
{
|
|
/*
|
|
* The OS configuration should be NULL for Linux and should
|
|
* have a valid configuration for QNX.
|
|
* See NvSciEventLoopServiceCreateSafe API Specification for more
|
|
* information.
|
|
*/
|
|
void *osConfig = NULL;
|
|
|
|
#if (QNX == 1)
|
|
struct nto_channel_config config = {0};
|
|
|
|
/*
|
|
* The number of pulses could be calculated based on the
|
|
* number of notifiers bind to the event service, number of packets and
|
|
* number of events handled by each block.
|
|
* (num_of_pulses = num_of_notifiers * 4 + \
|
|
* (num_packets + 5) * num_of_endpoints)
|
|
* If experienced pulse pool shortage issue in normal operation, increase
|
|
* the number of pulses.
|
|
* If there are no available pulses in the pool, SIGKILL is delivered
|
|
* by default. You may configure the sigevent that you want to be
|
|
* delivered when a pulse can't be obtained from the pool.
|
|
*
|
|
* See NvSciEventLoopServiceCreateSafe API Specification for more
|
|
* information.
|
|
*/
|
|
|
|
/* The num_pulses set below is just an example number and should be
|
|
* adjusted depending on the use case.
|
|
*/
|
|
config.num_pulses = 1024U;
|
|
config.rearm_threshold = 0;
|
|
osConfig = &config;
|
|
#endif
|
|
|
|
/* Create event loop service */
|
|
NvSciError err = NvSciEventLoopServiceCreateSafe(1U, osConfig, &service);
|
|
if (NvSciError_Success != err) {
|
|
printf("Failed (%x) to create event service\n", err);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
/* Register a new block with the event management */
|
|
static int32_t eventServiceRegister(
|
|
NvSciStreamBlock blockHandle,
|
|
void* blockData,
|
|
BlockFunc blockFunc)
|
|
{
|
|
/* Sanity check to make sure we left room for enough blocks */
|
|
if (numBlocks >= MAX_BLOCKS) {
|
|
printf("Exceeded maximum number of blocks\n");
|
|
return 0;
|
|
}
|
|
|
|
/* Grab the next entry in the list for the new block and fill it in */
|
|
BlockEventData* entry = &blocks[numBlocks++];
|
|
entry->handle = blockHandle;
|
|
entry->data = blockData;
|
|
entry->func = blockFunc;
|
|
entry->isAlive = true;
|
|
entry->retry = false;
|
|
|
|
/* Create a notifier for events on this block */
|
|
NvSciError err =
|
|
NvSciStreamBlockEventServiceSetup(entry->handle,
|
|
&service->EventService,
|
|
&entry->notifier);
|
|
|
|
if (NvSciError_Success != err ) {
|
|
printf("Failed (%x) to create event notifier for block\n", err);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
/* Register a new block with the event management to handle internal event.
|
|
*
|
|
* It's only supported on IpcSrc/IpcDst blocks now.
|
|
*
|
|
* Without user-provided event service, each IpcSrc/IpcDst block creates
|
|
* an internal event service and spawns a dispatch thread to handle the
|
|
* internal I/O messages.
|
|
*
|
|
* With the user-provided event service, no internal thread will be created.
|
|
* The application needs to wait for events on these internal notifiers.
|
|
* When there's a new notification on the internal notifiers, it will
|
|
* trigger the NvSciStream callback function automatically.
|
|
*
|
|
* The application can bind the internal notifiers and the external
|
|
* notifiers, which is used to monitor the NvSciStreamEvent on the block,
|
|
* to the same event service or different ones. In this sample app, we
|
|
* bind them to the same event service and use one thread to handle all
|
|
* the events.
|
|
*/
|
|
static int32_t eventServiceInternalRegister(
|
|
NvSciStreamBlock blockHandle)
|
|
{
|
|
/* Gets notifiers for internal events on this block */
|
|
numIntNotifiers = MAX_INTERNAL_NOTIFIERS;
|
|
NvSciError err =
|
|
NvSciStreamBlockInternalEventServiceSetup(
|
|
blockHandle,
|
|
&service->EventService,
|
|
&numIntNotifiers,
|
|
intNotifiers);
|
|
|
|
if (NvSciError_Success != err) {
|
|
printf("Failed (%x) to setup internal event service for block\n", err);
|
|
return 0;
|
|
}
|
|
|
|
/* Sanity check to make sure we left room for enough internal notifiers */
|
|
if (numIntNotifiers >= MAX_INTERNAL_NOTIFIERS) {
|
|
printf("Exceeded maximum number of internal notifiers\n");
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
/* Main service-based event loop */
|
|
static int32_t eventServiceLoop(void)
|
|
{
|
|
int32_t i;
|
|
int32_t k;
|
|
|
|
/*
|
|
* Notes on handling notificiations:
|
|
* If more than one signal occurs on a notifier in between calls
|
|
* to check for events, then NvSciEvent will squash the notifications,
|
|
* so only one is received. This means the application must drain
|
|
* all pending events on a block after its notifier signals. It won't
|
|
* receive new notifications for those pending events.
|
|
* A simple implementation might process each block's events in a loop
|
|
* until there are no more, and then move on to the next block. But
|
|
* this poses a risk of starvation. Consider the case of a stream in
|
|
* mailbox mode, where the mailbox already has a waiting payload.
|
|
* If the producer receives a PacketReady event, it will obtain
|
|
* the packet, fill it with data, and present it to the stream.
|
|
* Because the mailbox is full, the packet will immediately be
|
|
* returned, resulting in a new PacketReady event. The application
|
|
* can go into an infinite loop, generating new payloads on the
|
|
* producer without giving the consumer a chance to process them.
|
|
* We therefore use an event loop that only processes one event
|
|
* per block for each iteration, but keeps track of whether there
|
|
* was an event on a block for the previous pass, and if so
|
|
* retries it even if no new signal occurred. The event loop
|
|
* waits for events only when there was no prior event. Otherwise
|
|
* it only polls for new ones.
|
|
* For internal notifiers, as handler is registered by NvSciStream
|
|
* when creating the notifiers, the handler will be triggered
|
|
* automatically when there's new event. Application only needs
|
|
* to wait for new events but no need to handle the new events.
|
|
*/
|
|
|
|
/* Pack all notifiers into an array */
|
|
NvSciEventNotifier* notifiers[MAX_NOTIFIERS];
|
|
|
|
/* Initialize loop control parameters */
|
|
int64_t timeout = 1000000;
|
|
bool event[MAX_NOTIFIERS];
|
|
uint32_t numAliveBlocks;
|
|
|
|
numAlive = numBlocks;
|
|
|
|
/* Main loop - Handle events until all blocks report completion or fail */
|
|
while (numAlive && !atomic_load(&streamDone)) {
|
|
|
|
numNotifiers = 0;
|
|
numAliveBlocks = 0;
|
|
|
|
/* Acquire the lock */
|
|
pthread_mutex_lock(&mutex);
|
|
/* Pack the external notifiers for the block */
|
|
for (i=0; i<numBlocks; ++i) {
|
|
if (blocks[i].isAlive) {
|
|
blocksAlive[numAliveBlocks] = &blocks[i];
|
|
notifiers[numAliveBlocks] = blocks[i].notifier;
|
|
numAliveBlocks++;
|
|
}
|
|
}
|
|
|
|
k = numAliveBlocks;
|
|
/* Pack the internal notifiers */
|
|
for (uint32_t j = 0; j < numIntNotifiers; ++j,++k) {
|
|
notifiers[k] = intNotifiers[j];
|
|
}
|
|
|
|
numNotifiers = numAliveBlocks + numIntNotifiers;
|
|
|
|
/* Release the lock */
|
|
pthread_mutex_unlock(&mutex);
|
|
|
|
/* Wait/poll for events, depending on current timeout */
|
|
memset(event, 0, sizeof(event));
|
|
|
|
NvSciError err = service->WaitForMultipleEventsExt(
|
|
&service->EventService,
|
|
notifiers,
|
|
numNotifiers,
|
|
timeout,
|
|
event);
|
|
|
|
if ((NvSciError_Success != err) && (NvSciError_Timeout != err)) {
|
|
printf("Failure (%x) while waiting/polling event service\n", err);
|
|
return 0;
|
|
}
|
|
|
|
/* Timeout for next pass will be infinite unless we need to retry */
|
|
timeout = 1000000;
|
|
|
|
/*
|
|
* Check for events on new blocks that signaled or old blocks that
|
|
* had an event on the previous pass. This is done in reverse
|
|
* of the order in which blocks were registered. This is because
|
|
* producers are created before consumers, and for mailbox mode
|
|
* we want to give the consumer a chance to use payloads before
|
|
* the producer replaces them.
|
|
*/
|
|
for (i=numAliveBlocks-1; ((i>=0) && (!atomic_load(&streamDone))); --i) {
|
|
/* Get block info */
|
|
BlockEventData* entry = blocksAlive[i];
|
|
if (entry != NULL) {
|
|
if (event[i] || entry->retry) {
|
|
|
|
/* Reset to no retry for next pass */
|
|
entry->retry = false;
|
|
|
|
/* Skip if this block is no longer in use */
|
|
if (entry->data) {
|
|
|
|
/* Call the block's event handler function */
|
|
int32_t rv = entry->func(entry->data, 0);
|
|
if (rv < 0) {
|
|
/* On failure, no longer check block and app failed */
|
|
success = 0U;
|
|
entry->data = NULL;
|
|
numAlive--;
|
|
} else if (rv == 2) {
|
|
/* On completion, no longer check block */
|
|
entry->isAlive = false;
|
|
entry->data = NULL;
|
|
numAlive--;
|
|
} else if (rv == 1) {
|
|
/* If event found, retry next loop */
|
|
timeout = 0;
|
|
entry->retry = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/* Delete internal notifiers */
|
|
for (uint32_t j=0; j<numIntNotifiers; ++j) {
|
|
intNotifiers[j]->Delete(intNotifiers[j]);
|
|
}
|
|
|
|
/* Delete notifiers */
|
|
for (i=0; i<numBlocks; ++i) {
|
|
blocks[i].notifier->Delete(blocks[i].notifier);
|
|
}
|
|
|
|
/* Delete service */
|
|
service->EventService.Delete(&service->EventService);
|
|
|
|
return success;
|
|
}
|
|
|
|
/* Table of functions for service-based event handling */
|
|
EventFuncs const eventFuncs_Service = {
|
|
.init = eventServiceInit,
|
|
.reg = eventServiceRegister,
|
|
.regInt = eventServiceInternalRegister,
|
|
.loop = eventServiceLoop
|
|
};
|