mirror of
git://nv-tegra.nvidia.com/tegra/nv-sci-src/nvsci_samples.git
synced 2025-12-22 09:21:21 +03:00
2eba699906039d6615aae4967f6ea79bfe44a40a - event_sample_app/block_pool.c f3abb0a884f0647204ad32ff51255c4712e52120 - event_sample_app/Makefile 9ee49033e077ac5c8bf458a04c91dd3dbed9633d - event_sample_app/event_loop.h b33adce6eb1bbc7af23f6c37b6a635479e18a66a - event_sample_app/block_returnsync.c a56041c06b6bc1d3812b72b399d7d78dd7895485 - event_sample_app/block_limiter.c ca34c957759f7a010f0cbbbf9bedc03a2c98092b - event_sample_app/block_c2c.c 8d6d0ec3aa8e374a1d2a5fedc9dd24ff7bbdb731 - event_sample_app/block_multicast.c a76149a2531899e35843d939f60ad8979d8cf65f - event_sample_app/block_consumer_uc1.c 9da8763e4af4b4b7278507a3ebfe2c68a7a24585 - event_sample_app/util.h 2bf7e1383d6e8913c9b0a6a8bdd48fe63d8098d0 - event_sample_app/block_producer_uc1.c a54abf82eaa2d888e379ab4596ba68ce264e80b5 - event_sample_app/block_info.h 080a6efe263be076c7046e70e31098c2bbed0f6d - event_sample_app/block_presentsync.c 7dd10e5ea71f0c4a09bbe1f9f148f67a13ee098c - event_sample_app/util.c bc1a6f9017b28e5707c166a658a35e6b3986fdf4 - event_sample_app/usecase1.h 317f43efc59638bf1eae8303f0c79eafb059241a - event_sample_app/block_ipc.c 40361c8f0b68f7d5207db2466ce08c19c0bf1c90 - event_sample_app/event_loop_service.c efad113d0107e5d8f90146f3102a7c0ed22f1a35 - event_sample_app/event_loop_threads.c 2908615cebcf36330b9850c57e8745bf324867b2 - event_sample_app/block_queue.c 36ed68eca1a7800cf0d94e763c9fc352ee8cda1e - event_sample_app/block_common.c 675f75d61bd0226625a8eaaf0e503c9e976c8d61 - event_sample_app/main.c c3b26619dd07d221e953fc5dc29a50dcb95a8b97 - rawstream/Makefile 1fbb82e2281bb2e168c87fd20903bbed898ca160 - rawstream/rawstream_cuda.c 1d96498fe3c922f143f7e50e0a32b099714060ad - rawstream/rawstream_consumer.c d077dafc9176686f6d081026225325c2a303a60e - rawstream/rawstream_producer.c 54ae655edddda7dcabe22fbf0b27c3f617978851 - rawstream/rawstream.h d5ffeef3c7ad2af6f6f31385db7917b5ef9a7438 - rawstream/rawstream_ipc_linux.c 81e3d6f8ff5252797a7e9e170b74df6255f54f1b - rawstream/rawstream_main.c Change-Id: I66e33d0d23ed328c6299d72ca9eb42de2429a9aa
173 lines
5.1 KiB
C
173 lines
5.1 KiB
C
/*
|
|
* Copyright (c) 2020-2022 NVIDIA Corporation. All Rights Reserved.
|
|
*
|
|
* NVIDIA Corporation and its licensors retain all intellectual property and
|
|
* proprietary rights in and to this software and related documentation. Any
|
|
* use, reproduction, disclosure or distribution of this software and related
|
|
* documentation without an express license agreement from NVIDIA Corporation
|
|
* is strictly prohibited.
|
|
*/
|
|
|
|
#include "rawstream.h"
|
|
|
|
// Initialize one end of named communcation channel
|
|
NvSciError ipcInit(const char* endpointName, IpcWrapper* ipcWrapper)
|
|
{
|
|
NvSciError err = NvSciError_Success;
|
|
|
|
// Open named endpoint
|
|
err = NvSciIpcOpenEndpoint(endpointName, &ipcWrapper->endpoint);
|
|
if (err != NvSciError_Success) {
|
|
fprintf(stderr, "Unable to open endpoint %s (%x)\n",
|
|
endpointName, err);
|
|
goto fail;
|
|
}
|
|
|
|
// initialize IPC event notifier
|
|
err = NvSciIpcGetLinuxEventFd(ipcWrapper->endpoint, &ipcWrapper->ipcEventFd);
|
|
if (err != NvSciError_Success) {
|
|
fprintf(stderr, "Unable to get Linux event fd (%x)\n", err);
|
|
goto fail;
|
|
}
|
|
|
|
// Retrieve endpoint info
|
|
err = NvSciIpcGetEndpointInfo(ipcWrapper->endpoint, &ipcWrapper->info);
|
|
if (NvSciError_Success != err) {
|
|
fprintf(stderr, "Unable to retrieve IPC endpoint info (%x)", err);
|
|
goto fail;
|
|
}
|
|
|
|
err = NvSciIpcResetEndpointSafe(ipcWrapper->endpoint);
|
|
if (NvSciError_Success != err) {
|
|
fprintf(stderr, "Unable to reset IPC endpoint (%x)", err);
|
|
}
|
|
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
// Clean up IPC when done
|
|
void ipcDeinit(IpcWrapper* ipcWrapper)
|
|
{
|
|
NvSciError err = NvSciIpcCloseEndpointSafe(ipcWrapper->endpoint, false);
|
|
if (NvSciError_Success != err) {
|
|
fprintf(stderr, "NvSciIpcCloseEndpointSafe failed (%x)\n", err);
|
|
}
|
|
}
|
|
|
|
// Wait for an event on IPC channel
|
|
static NvSciError waitEvent(IpcWrapper* ipcWrapper, uint32_t value)
|
|
{
|
|
fd_set rfds;
|
|
uint32_t event = 0;
|
|
NvSciError err;
|
|
|
|
while (true) {
|
|
// Get pending IPC events
|
|
err = NvSciIpcGetEventSafe(ipcWrapper->endpoint, &event);
|
|
if (NvSciError_Success != err) {
|
|
fprintf(stderr, "NvSciIpcGetEventSafe failed (%x)\n", err);
|
|
return err;
|
|
}
|
|
// Return if event is the kind we're looking for
|
|
if (0U != (event & value)) {
|
|
break;
|
|
}
|
|
|
|
FD_ZERO(&rfds);
|
|
FD_SET(ipcWrapper->ipcEventFd, &rfds);
|
|
|
|
// Wait for signalling indicating new event
|
|
if (select(ipcWrapper->ipcEventFd + 1, &rfds, NULL, NULL, NULL) < 0) {
|
|
// select failed
|
|
return NvSciError_ResourceError;
|
|
}
|
|
if(!FD_ISSET(ipcWrapper->ipcEventFd, &rfds)) {
|
|
return NvSciError_NvSciIpcUnknown;
|
|
}
|
|
}
|
|
return NvSciError_Success;
|
|
}
|
|
|
|
// Send a message over IPC
|
|
NvSciError ipcSend(IpcWrapper* ipcWrapper, const void* buf, const size_t size)
|
|
{
|
|
NvSciError err = NvSciError_Success;
|
|
bool done = false;
|
|
uint32_t bytes;
|
|
|
|
// Loop until entire message sent
|
|
while (done == false) {
|
|
|
|
// Wait for room in channel to send a message
|
|
err = waitEvent(ipcWrapper, NV_SCI_IPC_EVENT_WRITE);
|
|
if (NvSciError_Success != err) {
|
|
goto fail;
|
|
}
|
|
|
|
assert(size <= UINT32_MAX);
|
|
|
|
// Send as much of the message as we can
|
|
err = NvSciIpcWriteSafe(ipcWrapper->endpoint, buf, (uint32_t)size,
|
|
&bytes);
|
|
if (NvSciError_Success != err) {
|
|
fprintf(stderr, "IPC write failed (%x)\n", err);
|
|
goto fail;
|
|
}
|
|
|
|
// For this simple sample, we just fail if the entire message wasn't
|
|
// sent. Could instead retry to send the rest.
|
|
if (size != (size_t)bytes) {
|
|
fprintf(stderr, "Failed to send entire message (%u < %zu)\n",
|
|
bytes, size);
|
|
err = NvSciError_NvSciIpcUnknown;
|
|
goto fail;
|
|
}
|
|
done = true;
|
|
}
|
|
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
// Receive a message over IPC
|
|
NvSciError ipcRecvFill(IpcWrapper* ipcWrapper, void* buf, const size_t size)
|
|
{
|
|
NvSciError err = NvSciError_Success;
|
|
bool done = false;
|
|
uint32_t bytes;
|
|
|
|
// Loop until entire message received
|
|
while (done == false) {
|
|
|
|
// Wait for incoming data
|
|
err = waitEvent(ipcWrapper, NV_SCI_IPC_EVENT_READ);
|
|
if (NvSciError_Success != err) {
|
|
goto fail;
|
|
}
|
|
|
|
assert(size <= UINT32_MAX);
|
|
|
|
// Read as much of the message as we can
|
|
err = NvSciIpcReadSafe(ipcWrapper->endpoint, buf, (uint32_t)size,
|
|
&bytes);
|
|
if (NvSciError_Success != err) {
|
|
fprintf(stderr, "IPC read failed (%x)\n", err);
|
|
goto fail;
|
|
}
|
|
|
|
// For this simple sample, we just fail if the entire message wasn't
|
|
// read. Could instead retry to receive the rest.
|
|
if (size != (size_t)bytes) {
|
|
fprintf(stderr, "Failed to read entire message (%u < %zu)\n",
|
|
bytes, size);
|
|
err = NvSciError_NvSciIpcUnknown;
|
|
goto fail;
|
|
}
|
|
done = true;
|
|
}
|
|
|
|
fail:
|
|
return err;
|
|
}
|