mirror of
git://nv-tegra.nvidia.com/tegra/nv-sci-src/nvsci_samples.git
synced 2025-12-22 09:21:21 +03:00
1158201e78094e9e866fa99095c9ffc2ec9f5a27 - event_sample_app/block_limiter.c a71ed037f9d77d0944f40f54cf25db8180d007e2 - event_sample_app/block_queue.c ced622a41d1a48dcb23e6a1a02ae9640ef9b837c - event_sample_app/util.h a0bd135d707994a41ed3a4234b5f875a268fed4d - event_sample_app/Makefile d7e42e2b6088ff4596abc7256eb018d757a4021e - event_sample_app/usecase1.h f5e2aea98ba9264ee1068a700222dff8d5d5c7a4 - event_sample_app/block_c2c.c dac99c442185b020fbdae07bfc1e7df78343eb83 - event_sample_app/block_info.h 5001f036389a4f7952cb4974dd3323908208ca30 - event_sample_app/event_loop_threads.c 8193be73ce0a488f62034cb87083cdf09f52cd5d - event_sample_app/block_pool.c 3a1013021a572887303fb6db245b5b01fe07e9a0 - event_sample_app/block_producer_uc1.c 6ff0f1c2d7ef2e2fa9ece6fdc850b58b87207526 - event_sample_app/block_returnsync.c e0861e9fe5d160d47d758464146d7192f9c70a5f - event_sample_app/util.c b5dd68bec3ae6f9049aad1cb5a86c3db4af02e17 - event_sample_app/block_presentsync.c b52e34443ac441a9df48029de944aa0a50d1b101 - event_sample_app/event_loop_service.c ef057870dade9af70656b37340e9bcad35d49380 - event_sample_app/block_multicast.c 65ffe5af6ae6bc0418f348167c473849d4697e47 - event_sample_app/block_ipc.c 44f6de348f8bdd5cb584b3e8cc4b05e9482dddd2 - event_sample_app/event_loop.h 40f949c4c37ab4aa4a84182b345f3de6fceab39b - event_sample_app/main.c 641e3634da873970b574b23a1024b2e7155b88ff - event_sample_app/block_consumer_uc1.c d6bbd17599543f1760d87851150a12a2a842a24d - event_sample_app/block_common.c 1fbb82e2281bb2e168c87fd20903bbed898ca160 - rawstream/rawstream_cuda.c 458833ab233a725c067bf9b1fc60ef39872eee80 - rawstream/Makefile 3df4e5c00a3dc002ee9877e282bd28ffa87fa6f0 - rawstream/rawstream.h d5ffeef3c7ad2af6f6f31385db7917b5ef9a7438 - rawstream/rawstream_ipc_linux.c f28c1cd5fe26b6dc5930d5556b54364c9b91767c - rawstream/rawstream_main.c 2bed038ca070aa5dccd6b672a98f093340e829bb - rawstream/rawstream_producer.c e26c09f1ad1a3a7d2c29dae1b38d3fd90c23af6e - rawstream/rawstream_consumer.c Change-Id: Iec84f03094414392580a103d2bc94bac8ed86fbb
250 lines
9.0 KiB
C
250 lines
9.0 KiB
C
/* NvSciStream Event Loop Driven Sample App - service-based event handling
|
|
*
|
|
* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
*
|
|
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
|
* property and proprietary rights in and to this material, related
|
|
* documentation and any modifications thereto. Any use, reproduction,
|
|
* disclosure or distribution of this material and related documentation
|
|
* without an express license agreement from NVIDIA CORPORATION or
|
|
* its affiliates is strictly prohibited.
|
|
*/
|
|
|
|
/*
|
|
* This file implements the option to handle events for all blocks
|
|
* through an event service. Each block adds an event notifier to
|
|
* a list. That notifier will be signaled when an event is ready
|
|
* on the block. A single main loop waits for one or more of the
|
|
* notifiers to trigger, processes events on the corresponding
|
|
* blocks, and goes back to waiting. When all blocks have been
|
|
* destroyed either due to failure or all payloads being processed,
|
|
* the loop exits and the function returns.
|
|
*/
|
|
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
#include <stdio.h>
|
|
#include <stdbool.h>
|
|
#if (QNX == 1)
|
|
#include <sys/neutrino.h>
|
|
#endif
|
|
#include "nvscievent.h"
|
|
#include "event_loop.h"
|
|
|
|
/* Event service */
|
|
static NvSciEventLoopService* service = NULL;
|
|
|
|
/* Structure to track block info */
|
|
typedef struct {
|
|
NvSciStreamBlock handle;
|
|
void* data;
|
|
BlockFunc func;
|
|
NvSciEventNotifier* notifier;
|
|
} BlockEventData;
|
|
|
|
/* List of blocks */
|
|
#define MAX_BLOCKS 100
|
|
static int32_t numBlocks = 0;
|
|
static BlockEventData blocks[MAX_BLOCKS];
|
|
static uint32_t success = 1U;
|
|
|
|
/* Initialize service-based event handling */
|
|
static int32_t eventServiceInit(void)
|
|
{
|
|
/*
|
|
* The OS configuration should be NULL for Linux and should
|
|
* have a valid configuration for QNX.
|
|
* See NvSciEventLoopServiceCreateSafe API Specification for more
|
|
* information.
|
|
*/
|
|
void *osConfig = NULL;
|
|
|
|
#if (QNX == 1)
|
|
struct nto_channel_config config = {0};
|
|
|
|
/*
|
|
* The number of pulses could be calculated based on the
|
|
* number of notifiers bind to the event service, number of packets and
|
|
* number of events handled by each block.
|
|
* (num_of_pulses = num_of_notifiers * 4 + \
|
|
* (num_packets + 5) * num_of_endpoints)
|
|
* If experienced pulse pool shortage issue in normal operation, increase
|
|
* the number of pulses.
|
|
* If there are no available pulses in the pool, SIGKILL is delivered
|
|
* by default. You may configure the sigevent that you want to be
|
|
* delivered when a pulse can't be obtained from the pool.
|
|
*
|
|
* See NvSciEventLoopServiceCreateSafe API Specification for more
|
|
* information.
|
|
*/
|
|
|
|
/* The num_pulses set below is just an example number and should be
|
|
* adjusted depending on the use case.
|
|
*/
|
|
config.num_pulses = 100U;
|
|
config.rearm_threshold = 0;
|
|
osConfig = &config;
|
|
#endif
|
|
|
|
/* Create event loop service */
|
|
NvSciError err = NvSciEventLoopServiceCreateSafe(1U, osConfig, &service);
|
|
if (NvSciError_Success != err) {
|
|
printf("Failed (%x) to create event service\n", err);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
/* Register a new block with the event management */
|
|
static int32_t eventServiceRegister(
|
|
NvSciStreamBlock blockHandle,
|
|
void* blockData,
|
|
BlockFunc blockFunc)
|
|
{
|
|
/* Sanity check to make sure we left room for enough blocks */
|
|
if (numBlocks >= MAX_BLOCKS) {
|
|
printf("Exceeded maximum number of blocks\n");
|
|
return 0;
|
|
}
|
|
|
|
/* Grab the next entry in the list for the new block and fill it in */
|
|
BlockEventData* entry = &blocks[numBlocks++];
|
|
entry->handle = blockHandle;
|
|
entry->data = blockData;
|
|
entry->func = blockFunc;
|
|
|
|
/* Create a notifier for events on this block */
|
|
NvSciError err =
|
|
NvSciStreamBlockEventServiceSetup(entry->handle,
|
|
&service->EventService,
|
|
&entry->notifier);
|
|
if (NvSciError_Success != err ) {
|
|
printf("Failed (%x) to create event notifier for block\n", err);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
/* Main service-based event loop */
|
|
static int32_t eventServiceLoop(void)
|
|
{
|
|
int32_t i;
|
|
|
|
/*
|
|
* Notes on handling notificiations:
|
|
* If more than one signal occurs on a notifier in between calls
|
|
* to check for events, then NvSciEvent will squash the notifications,
|
|
* so only one is received. This means the application must drain
|
|
* all pending events on a block after its notifier signals. It won't
|
|
* receive new notifications for those pending events.
|
|
* A simple implementation might process each block's events in a loop
|
|
* until there are no more, and then move on to the next block. But
|
|
* this poses a risk of starvation. Consider the case of a stream in
|
|
* mailbox mode, where the mailbox already has a waiting payload.
|
|
* If the producer receives a PacketReady event, it will obtain
|
|
* the packet, fill it with data, and present it to the stream.
|
|
* Because the mailbox is full, the packet will immediately be
|
|
* returned, resulting in a new PacketReady event. The application
|
|
* can go into an infinite loop, generating new payloads on the
|
|
* producer without giving the consumer a chance to process them.
|
|
* We therefore use an event loop that only processes one event
|
|
* per block for each iteration, but keeps track of whether there
|
|
* was an event on a block for the previous pass, and if so
|
|
* retries it even if no new signal occurred. The event loop
|
|
* waits for events only when there was no prior event. Otherwise
|
|
* it only polls for new ones.
|
|
*/
|
|
|
|
/* Pack all notifiers into an array */
|
|
NvSciEventNotifier* notifiers[MAX_BLOCKS];
|
|
for (i=0; i<numBlocks; ++i) {
|
|
notifiers[i] = blocks[i].notifier;
|
|
}
|
|
|
|
/* Initialize loop control parameters */
|
|
uint32_t numAlive = numBlocks;
|
|
int64_t timeout = -1;
|
|
bool retry[MAX_BLOCKS];
|
|
bool event[MAX_BLOCKS];
|
|
memset(retry, 0, sizeof(retry));
|
|
|
|
/* Main loop - Handle events until all blocks report completion or fail */
|
|
while (numAlive) {
|
|
|
|
/* Wait/poll for events, depending on current timeout */
|
|
memset(event, 0, sizeof(event));
|
|
NvSciError err = service->WaitForMultipleEventsExt(
|
|
&service->EventService,
|
|
notifiers,
|
|
numBlocks,
|
|
timeout,
|
|
event);
|
|
if ((NvSciError_Success != err) && (NvSciError_Timeout != err)) {
|
|
printf("Failure (%x) while waiting/polling event service\n", err);
|
|
return 0;
|
|
}
|
|
|
|
/* Timeout for next pass will be infinite unless we need to retry */
|
|
timeout = -1;
|
|
|
|
/*
|
|
* Check for events on new blocks that signaled or old blocks that
|
|
* had an event on the previous pass. This is done in reverse
|
|
* of the order in which blocks were registered. This is because
|
|
* producers are created before consumers, and for mailbox mode
|
|
* we want to give the consumer a chance to use payloads before
|
|
* the producer replaces them.
|
|
*/
|
|
for (i=numBlocks-1; i>=0; --i) {
|
|
if (event[i] || retry[i]) {
|
|
|
|
/* Get block info */
|
|
BlockEventData* entry = &blocks[i];
|
|
|
|
/* Reset to no retry for next pass */
|
|
retry[i] = false;
|
|
|
|
/* Skip if this block is no longer in use */
|
|
if (entry->data) {
|
|
|
|
/* Call the block's event handler function */
|
|
int32_t rv = entry->func(entry->data, 0);
|
|
if (rv < 0) {
|
|
/* On failure, no longer check block and app failed */
|
|
success = 0U;
|
|
entry->data = NULL;
|
|
numAlive--;
|
|
} else if (rv == 2) {
|
|
/* On completion, no longer check block */
|
|
entry->data = NULL;
|
|
numAlive--;
|
|
} else if (rv == 1) {
|
|
/* If event found, retry next loop */
|
|
timeout = 0;
|
|
retry[i] = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Delete notifiers */
|
|
for (i=0; i<numBlocks; ++i) {
|
|
notifiers[i]->Delete(notifiers[i]);
|
|
}
|
|
|
|
/* Delete service */
|
|
service->EventService.Delete(&service->EventService);
|
|
|
|
return success;
|
|
}
|
|
|
|
/* Table of functions for service-based event handling */
|
|
EventFuncs const eventFuncs_Service = {
|
|
.init = eventServiceInit,
|
|
.reg = eventServiceRegister,
|
|
.loop = eventServiceLoop
|
|
};
|