mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
nvscic2c-pcie: Fix MISRA rule 10.4 violations
Fix total 65 violations of rule 10.4 JIRA NVIPC-3121 Change-Id: I5a1bead886683cbe3ec4b0e68531ee6e2a149175 Signed-off-by: cyeddu <cyeddu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3246908 Reviewed-by: Janardhan Reddy AnnapuReddy <jreddya@nvidia.com> GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Tested-by: Janardhan Reddy AnnapuReddy <jreddya@nvidia.com> Reviewed-by: Sumeet Gupta <sumeetg@nvidia.com>
This commit is contained in:
@@ -42,7 +42,7 @@
|
|||||||
* after few retries.
|
* after few retries.
|
||||||
*/
|
*/
|
||||||
#define COMM_CHANNEL_NFRAMES (1024)
|
#define COMM_CHANNEL_NFRAMES (1024)
|
||||||
#define COMM_CHANNEL_FRAME_SZ (64)
|
#define COMM_CHANNEL_FRAME_SZ (64U)
|
||||||
|
|
||||||
/* fifo header.*/
|
/* fifo header.*/
|
||||||
struct header {
|
struct header {
|
||||||
@@ -170,7 +170,7 @@ can_recv(struct fifo_t *fifo, int *ret)
|
|||||||
bool recv = false;
|
bool recv = false;
|
||||||
u32 toread = (fifo->recv_hdr->wr_count - fifo->local_hdr->rd_count);
|
u32 toread = (fifo->recv_hdr->wr_count - fifo->local_hdr->rd_count);
|
||||||
|
|
||||||
if (toread == 0) {
|
if (toread == 0U) {
|
||||||
/* no frame available to read.*/
|
/* no frame available to read.*/
|
||||||
recv = false;
|
recv = false;
|
||||||
*ret = -ENODATA;
|
*ret = -ENODATA;
|
||||||
@@ -231,7 +231,7 @@ send_msg(struct comm_channel_ctx_t *comm_ctx, struct comm_msg *msg)
|
|||||||
writel(0x1, syncpt->peer_mem.pva);
|
writel(0x1, syncpt->peer_mem.pva);
|
||||||
}
|
}
|
||||||
|
|
||||||
fifo->wr_pos = fifo->wr_pos + 1;
|
fifo->wr_pos = fifo->wr_pos + 1U;
|
||||||
if (fifo->wr_pos >= fifo->nframes)
|
if (fifo->wr_pos >= fifo->nframes)
|
||||||
fifo->wr_pos = 0;
|
fifo->wr_pos = 0;
|
||||||
|
|
||||||
@@ -335,7 +335,7 @@ recv_taskfn(void *arg)
|
|||||||
|
|
||||||
/* do not noifty peer for space availability. */
|
/* do not noifty peer for space availability. */
|
||||||
|
|
||||||
fifo->rd_pos = fifo->rd_pos + 1;
|
fifo->rd_pos = fifo->rd_pos + 1U;
|
||||||
if (fifo->rd_pos >= fifo->nframes)
|
if (fifo->rd_pos >= fifo->nframes)
|
||||||
fifo->rd_pos = 0;
|
fifo->rd_pos = 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,8 +16,8 @@
|
|||||||
#define DRIVER_NAME_EPC "nvscic2c-pcie-epc"
|
#define DRIVER_NAME_EPC "nvscic2c-pcie-epc"
|
||||||
|
|
||||||
/* STREAM_OBJ_TYPE. */
|
/* STREAM_OBJ_TYPE. */
|
||||||
#define STREAM_OBJ_TYPE_MEM (0)
|
#define STREAM_OBJ_TYPE_MEM (0U)
|
||||||
#define STREAM_OBJ_TYPE_SYNC (1)
|
#define STREAM_OBJ_TYPE_SYNC (1U)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This capped number shall be used to derive export descriptor, therefore any
|
* This capped number shall be used to derive export descriptor, therefore any
|
||||||
@@ -38,8 +38,8 @@
|
|||||||
* change should be evaluated thoroughly.
|
* change should be evaluated thoroughly.
|
||||||
*/
|
*/
|
||||||
#define MAX_BOARDS (16)
|
#define MAX_BOARDS (16)
|
||||||
#define MAX_SOCS (16)
|
#define MAX_SOCS (16U)
|
||||||
#define MAX_PCIE_CNTRLRS (16)
|
#define MAX_PCIE_CNTRLRS (16U)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum NvSciIpc INTER_CHHIP(NvSciC2cPcie) endpoints that can be supported
|
* Maximum NvSciIpc INTER_CHHIP(NvSciC2cPcie) endpoints that can be supported
|
||||||
@@ -50,7 +50,7 @@
|
|||||||
* This capped number shall be used to derive export descriptor, therefore any
|
* This capped number shall be used to derive export descriptor, therefore any
|
||||||
* change should be evaluated thoroughly.
|
* change should be evaluated thoroughly.
|
||||||
*/
|
*/
|
||||||
#define MAX_ENDPOINTS (16)
|
#define MAX_ENDPOINTS (16U)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each NvSciIpc INTER_CHIP(NvSciC2cPcie) endpoint shall require at least one
|
* Each NvSciIpc INTER_CHIP(NvSciC2cPcie) endpoint shall require at least one
|
||||||
@@ -66,7 +66,7 @@
|
|||||||
#define MIN_NUM_NOTIFY (MAX_ENDPOINTS + (2))
|
#define MIN_NUM_NOTIFY (MAX_ENDPOINTS + (2))
|
||||||
|
|
||||||
/* NvRmHost1xSyncpointShim have size: 64KB on Orin.*/
|
/* NvRmHost1xSyncpointShim have size: 64KB on Orin.*/
|
||||||
#define SP_SIZE (0x10000)
|
#define SP_SIZE (0x10000UL)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Represents SyncpointShimBase on all T234.
|
* Represents SyncpointShimBase on all T234.
|
||||||
@@ -89,9 +89,9 @@
|
|||||||
* These are three PCI Function Device ID's to be configured in PCI header
|
* These are three PCI Function Device ID's to be configured in PCI header
|
||||||
* when Tegra acting as PCI Function to peer Tegra acting as PCI RP.
|
* when Tegra acting as PCI Function to peer Tegra acting as PCI RP.
|
||||||
*/
|
*/
|
||||||
#define PCI_DEVICE_ID_C2C_1 (0x22CB)
|
#define PCI_DEVICE_ID_C2C_1 (0x22CBU)
|
||||||
#define PCI_DEVICE_ID_C2C_2 (0x22CC)
|
#define PCI_DEVICE_ID_C2C_2 (0x22CCU)
|
||||||
#define PCI_DEVICE_ID_C2C_3 (0x22CD)
|
#define PCI_DEVICE_ID_C2C_3 (0x22CDU)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For NvStreams extensions over NvSciC2cPcie, an endpoint is a producer on
|
* For NvStreams extensions over NvSciC2cPcie, an endpoint is a producer on
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
/*
|
||||||
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||||
|
* All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
#ifndef __DESCRIPTOR_H__
|
#ifndef __DESCRIPTOR_H__
|
||||||
#define __DESCRIPTOR_H__
|
#define __DESCRIPTOR_H__
|
||||||
@@ -9,7 +12,7 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
/* Magic code for descriptor.*/
|
/* Magic code for descriptor.*/
|
||||||
#define DESC_MAGIC_CODE_32BIT (0x69152734)
|
#define DESC_MAGIC_CODE_32BIT (0x69152734UL)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Format of Export Descriptor (at the moment)
|
* Format of Export Descriptor (at the moment)
|
||||||
|
|||||||
@@ -26,11 +26,11 @@
|
|||||||
#define SOC_ID_PROP_NAME ("nvidia,soc-id")
|
#define SOC_ID_PROP_NAME ("nvidia,soc-id")
|
||||||
#define CNTRLR_ID_PROP_NAME ("nvidia,cntrlr-id")
|
#define CNTRLR_ID_PROP_NAME ("nvidia,cntrlr-id")
|
||||||
#define ENDPOINT_DB_PROP_NAME ("nvidia,endpoint-db")
|
#define ENDPOINT_DB_PROP_NAME ("nvidia,endpoint-db")
|
||||||
#define MAX_PROP_LEN (1024)
|
#define MAX_PROP_LEN (1024U)
|
||||||
#define FRAME_SZ_ALIGN (64)
|
#define FRAME_SZ_ALIGN (64U)
|
||||||
|
|
||||||
#define MAX_FRAME_SZ (SZ_32K)
|
#define MAX_FRAME_SZ (SZ_32K)
|
||||||
#define MAX_NFRAMES (64)
|
#define MAX_NFRAMES (64U)
|
||||||
#define MIN_BAR_WIN_SZ (SZ_64M)
|
#define MIN_BAR_WIN_SZ (SZ_64M)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -435,7 +435,7 @@ parse_bar_win_size(struct driver_param_t *drv_param)
|
|||||||
drv_param->bar_win_size);
|
drv_param->bar_win_size);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
if (drv_param->bar_win_size & (drv_param->bar_win_size - 1)) {
|
if (drv_param->bar_win_size & (drv_param->bar_win_size - 1U)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("BAR window size: (%u) not a power of 2\n",
|
pr_err("BAR window size: (%u) not a power of 2\n",
|
||||||
drv_param->bar_win_size);
|
drv_param->bar_win_size);
|
||||||
@@ -467,13 +467,13 @@ validate_endpoint_prop(struct endpoint_prop_t *prop)
|
|||||||
if ((prop->name[0] == '\0')) {
|
if ((prop->name[0] == '\0')) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("Endpoint must have a name\n");
|
pr_err("Endpoint must have a name\n");
|
||||||
} else if (prop->nframes == 0) {
|
} else if (prop->nframes == 0U) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("(%s): Invalid number of frames\n", prop->name);
|
pr_err("(%s): Invalid number of frames\n", prop->name);
|
||||||
} else if (prop->frame_sz == 0) {
|
} else if (prop->frame_sz == 0U) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("(%s): Invalid frame size\n", prop->name);
|
pr_err("(%s): Invalid frame size\n", prop->name);
|
||||||
} else if ((prop->frame_sz & (FRAME_SZ_ALIGN - 1)) != 0) {
|
} else if ((prop->frame_sz & (FRAME_SZ_ALIGN - 1U)) != 0U) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("(%s): Frame size unaligned to (%u)\n",
|
pr_err("(%s): Frame size unaligned to (%u)\n",
|
||||||
prop->name, FRAME_SZ_ALIGN);
|
prop->name, FRAME_SZ_ALIGN);
|
||||||
@@ -511,7 +511,7 @@ parse_endpoint_db(struct driver_param_t *drv_param)
|
|||||||
}
|
}
|
||||||
nr_endpoint = ret;
|
nr_endpoint = ret;
|
||||||
|
|
||||||
if (nr_endpoint == 0) {
|
if (nr_endpoint == 0U) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("No endpoint information in property: (%s)\n",
|
pr_err("No endpoint information in property: (%s)\n",
|
||||||
ENDPOINT_DB_PROP_NAME);
|
ENDPOINT_DB_PROP_NAME);
|
||||||
@@ -541,7 +541,7 @@ parse_endpoint_db(struct driver_param_t *drv_param)
|
|||||||
* per endpoint entry in endpointdb is longer than
|
* per endpoint entry in endpointdb is longer than
|
||||||
* expected.
|
* expected.
|
||||||
*/
|
*/
|
||||||
if (strlen(entry) > (MAX_PROP_LEN - 1)) {
|
if (strlen(entry) > (MAX_PROP_LEN - 1U)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("Endpoint entry invalid\n");
|
pr_err("Endpoint entry invalid\n");
|
||||||
break;
|
break;
|
||||||
@@ -556,7 +556,7 @@ parse_endpoint_db(struct driver_param_t *drv_param)
|
|||||||
pr_err("Error parsing endpoint name\n");
|
pr_err("Error parsing endpoint name\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (strlen(name) > (NAME_MAX - 1)) {
|
if (strlen(name) > (NAME_MAX - 1U)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("Endpoint name: (%s) long, max char:(%u)\n",
|
pr_err("Endpoint name: (%s) long, max char:(%u)\n",
|
||||||
name, (NAME_MAX - 1));
|
name, (NAME_MAX - 1));
|
||||||
|
|||||||
@@ -1053,11 +1053,11 @@ endpoints_setup(struct driver_ctx_t *drv_ctx, void **endpoints_h)
|
|||||||
if (WARN_ON(!drv_ctx || !endpoints_h || *endpoints_h))
|
if (WARN_ON(!drv_ctx || !endpoints_h || *endpoints_h))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (WARN_ON(drv_ctx->drv_param.nr_endpoint == 0 ||
|
if (WARN_ON(drv_ctx->drv_param.nr_endpoint == 0U ||
|
||||||
drv_ctx->drv_param.nr_endpoint > MAX_ENDPOINTS))
|
drv_ctx->drv_param.nr_endpoint > MAX_ENDPOINTS))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (WARN_ON(strlen(drv_ctx->drv_name) > (NAME_MAX - 1)))
|
if (WARN_ON(strlen(drv_ctx->drv_name) > (NAME_MAX - 1U)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* start by allocating the endpoint driver (global for all eps) ctx.*/
|
/* start by allocating the endpoint driver (global for all eps) ctx.*/
|
||||||
@@ -1114,7 +1114,7 @@ endpoints_setup(struct driver_ctx_t *drv_ctx, void **endpoints_h)
|
|||||||
/* set index of the msi-x interruper vector
|
/* set index of the msi-x interruper vector
|
||||||
* where the first one is reserved for comm-channel
|
* where the first one is reserved for comm-channel
|
||||||
*/
|
*/
|
||||||
endpoint->msi_irq = i + 1;
|
endpoint->msi_irq = i + 1U;
|
||||||
stream_ext_params->local_node = &drv_ctx->drv_param.local_node;
|
stream_ext_params->local_node = &drv_ctx->drv_param.local_node;
|
||||||
stream_ext_params->peer_node = &drv_ctx->drv_param.peer_node;
|
stream_ext_params->peer_node = &drv_ctx->drv_param.peer_node;
|
||||||
stream_ext_params->host1x_pdev = drv_ctx->drv_param.host1x_pdev;
|
stream_ext_params->host1x_pdev = drv_ctx->drv_param.host1x_pdev;
|
||||||
|
|||||||
@@ -435,7 +435,7 @@ nvscic2c_pcie_epc_probe(struct pci_dev *pdev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_request_region;
|
goto err_request_region;
|
||||||
|
|
||||||
win_size = pci_resource_len(pdev, drv_ctx->bar);
|
win_size = pci_resource_len(pdev, 0U);
|
||||||
ret = allocate_inbound_area(pdev, win_size, &drv_ctx->self_mem);
|
ret = allocate_inbound_area(pdev, win_size, &drv_ctx->self_mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_alloc_inbound;
|
goto err_alloc_inbound;
|
||||||
@@ -545,7 +545,7 @@ nvscic2c_pcie_epc_probe(struct pci_dev *pdev,
|
|||||||
timeout =
|
timeout =
|
||||||
wait_for_completion_timeout(&drv_ctx->epc_ctx->epf_ready_cmpl,
|
wait_for_completion_timeout(&drv_ctx->epc_ctx->epf_ready_cmpl,
|
||||||
msecs_to_jiffies(MAX_EPF_SETUP_TIMEOUT_MSEC));
|
msecs_to_jiffies(MAX_EPF_SETUP_TIMEOUT_MSEC));
|
||||||
if (timeout == 0) {
|
if (timeout == 0U) {
|
||||||
ret = -ENOLINK;
|
ret = -ENOLINK;
|
||||||
pr_err("(%s): Timed-out waiting for nvscic2c-pcie-epf\n",
|
pr_err("(%s): Timed-out waiting for nvscic2c-pcie-epf\n",
|
||||||
drv_ctx->drv_name);
|
drv_ctx->drv_name);
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
// Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
/*
|
||||||
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||||
|
* All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) "nvscic2c-pcie: iova-alloc: " fmt
|
#define pr_fmt(fmt) "nvscic2c-pcie: iova-alloc: " fmt
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
@@ -95,7 +98,7 @@ iova_alloc_init(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
iova_len = size >> shift;
|
iova_len = size >> shift;
|
||||||
|
|
||||||
/* Recommendation is to allocate in power of 2.*/
|
/* Recommendation is to allocate in power of 2.*/
|
||||||
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
|
if (iova_len < (1U << (IOVA_RANGE_CACHE_MAX_SIZE - 1U)))
|
||||||
iova_len = roundup_pow_of_two(iova_len);
|
iova_len = roundup_pow_of_two(iova_len);
|
||||||
|
|
||||||
if (*ivd_ctx->dev->dma_mask)
|
if (*ivd_ctx->dev->dma_mask)
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
/* Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
/*
|
||||||
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES.
|
||||||
|
* All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
#ifndef __IOVA_ALLOC_H__
|
#ifndef __IOVA_ALLOC_H__
|
||||||
#define __IOVA_ALLOC_H__
|
#define __IOVA_ALLOC_H__
|
||||||
@@ -37,7 +40,7 @@ iova_alloc_deinit(dma_addr_t dma_handle, size_t size,
|
|||||||
struct iova_alloc_domain_t **ivd_h);
|
struct iova_alloc_domain_t **ivd_h);
|
||||||
|
|
||||||
#ifndef IOVA_RANGE_CACHE_MAX_SIZE
|
#ifndef IOVA_RANGE_CACHE_MAX_SIZE
|
||||||
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
|
#define IOVA_RANGE_CACHE_MAX_SIZE 6U /* log of max cached IOVA range size (in pages) */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif //__IOVA_ALLOC_H__
|
#endif //__IOVA_ALLOC_H__
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
/*
|
||||||
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||||
|
* All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) "nvscic2c-pcie: iova-mgr: " fmt
|
#define pr_fmt(fmt) "nvscic2c-pcie: iova-mgr: " fmt
|
||||||
|
|
||||||
@@ -308,7 +311,7 @@ iova_mngr_init(char *name, u64 base_address, size_t size, void **mngr_handle)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (strlen(name) > (NAME_MAX - 1)) {
|
if (strlen(name) > (NAME_MAX - 1U)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("name: (%s) long, max char:(%u)\n", name, (NAME_MAX - 1));
|
pr_err("name: (%s) long, max char:(%u)\n", name, (NAME_MAX - 1));
|
||||||
goto err;
|
goto err;
|
||||||
|
|||||||
@@ -677,7 +677,7 @@ pci_client_change_link_status(void *pci_client_h,
|
|||||||
|
|
||||||
/* interrupt registered users. */
|
/* interrupt registered users. */
|
||||||
mutex_lock(&ctx->event_tbl_lock);
|
mutex_lock(&ctx->event_tbl_lock);
|
||||||
for (i = 0; i < MAX_LINK_EVENT_USERS; i++) {
|
for (i = 0U; i < MAX_LINK_EVENT_USERS; i++) {
|
||||||
event = &ctx->event_tbl[i];
|
event = &ctx->event_tbl[i];
|
||||||
if (atomic_read(&event->in_use)) {
|
if (atomic_read(&event->in_use)) {
|
||||||
ops = &event->cb_ops;
|
ops = &event->cb_ops;
|
||||||
|
|||||||
@@ -412,9 +412,9 @@ ioctl_export_obj(struct stream_ext_ctx_t *ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* only target/remote can be exported.*/
|
/* only target/remote can be exported.*/
|
||||||
if (args->obj_type == NVSCIC2C_PCIE_OBJ_TYPE_TARGET_MEM)
|
if (args->obj_type == (__s32)NVSCIC2C_PCIE_OBJ_TYPE_TARGET_MEM)
|
||||||
export_type = STREAM_OBJ_TYPE_MEM;
|
export_type = STREAM_OBJ_TYPE_MEM;
|
||||||
else if (args->obj_type == NVSCIC2C_PCIE_OBJ_TYPE_REMOTE_SYNC)
|
else if (args->obj_type == (__s32)NVSCIC2C_PCIE_OBJ_TYPE_REMOTE_SYNC)
|
||||||
export_type = STREAM_OBJ_TYPE_SYNC;
|
export_type = STREAM_OBJ_TYPE_SYNC;
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -646,7 +646,7 @@ ioctl_set_max_copy_requests(struct stream_ext_ctx_t *ctx,
|
|||||||
struct copy_request *cr = NULL;
|
struct copy_request *cr = NULL;
|
||||||
struct list_head *curr = NULL, *next = NULL;
|
struct list_head *curr = NULL, *next = NULL;
|
||||||
|
|
||||||
if (ctx->aperture_limit == 0) {
|
if (ctx->aperture_limit == 0U) {
|
||||||
pr_err("Err: Streaming is not supported in this Endpoint: %s\n", ctx->ep_name);
|
pr_err("Err: Streaming is not supported in this Endpoint: %s\n", ctx->ep_name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -1277,17 +1277,17 @@ validate_flush_range(struct stream_ext_ctx_t *ctx,
|
|||||||
struct file *filep = NULL;
|
struct file *filep = NULL;
|
||||||
struct stream_ext_obj *stream_obj = NULL;
|
struct stream_ext_obj *stream_obj = NULL;
|
||||||
|
|
||||||
if (flush_range->size <= 0)
|
if (flush_range->size <= 0U)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* eDMA expects u32 datatype.*/
|
/* eDMA expects u32 datatype.*/
|
||||||
if (flush_range->size > U32_MAX)
|
if (flush_range->size > U32_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (flush_range->size & 0x3)
|
if (flush_range->size & 0x3U)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (flush_range->offset & 0x3)
|
if (flush_range->offset & 0x3U)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = validate_handle(ctx, flush_range->src_handle,
|
ret = validate_handle(ctx, flush_range->src_handle,
|
||||||
@@ -1450,7 +1450,7 @@ allocate_copy_request(struct stream_ext_ctx_t *ctx,
|
|||||||
|
|
||||||
/* flush range has two handles: src, dst + all possible post_fences.*/
|
/* flush range has two handles: src, dst + all possible post_fences.*/
|
||||||
cr->handles = kzalloc((sizeof(*cr->handles) *
|
cr->handles = kzalloc((sizeof(*cr->handles) *
|
||||||
((2 * ctx->cr_limits.max_flush_ranges) +
|
((2U * ctx->cr_limits.max_flush_ranges) +
|
||||||
(ctx->cr_limits.max_post_fences))),
|
(ctx->cr_limits.max_post_fences))),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (WARN_ON(!cr->handles)) {
|
if (WARN_ON(!cr->handles)) {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||||
|
* All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __UAPI_NVSCIC2C_PCIE_IOCTL_H__
|
#ifndef __UAPI_NVSCIC2C_PCIE_IOCTL_H__
|
||||||
@@ -214,7 +215,7 @@ union nvscic2c_pcie_ioctl_arg_max_size {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* IOCTL magic number - seen available in ioctl-number.txt*/
|
/* IOCTL magic number - seen available in ioctl-number.txt*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_MAGIC 0xC2
|
#define NVSCIC2C_PCIE_IOCTL_MAGIC 0xC2U
|
||||||
|
|
||||||
#define NVSCIC2C_PCIE_IOCTL_GET_INFO \
|
#define NVSCIC2C_PCIE_IOCTL_GET_INFO \
|
||||||
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 1,\
|
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 1,\
|
||||||
@@ -230,42 +231,42 @@ union nvscic2c_pcie_ioctl_arg_max_size {
|
|||||||
* Pin/Map Mem or Sync objects.
|
* Pin/Map Mem or Sync objects.
|
||||||
*/
|
*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_MAP \
|
#define NVSCIC2C_PCIE_IOCTL_MAP \
|
||||||
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 3,\
|
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 3U,\
|
||||||
struct nvscic2c_pcie_map_obj_args)
|
struct nvscic2c_pcie_map_obj_args)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get Export descriptor for Target/Remote Mem/Sync objects.
|
* Get Export descriptor for Target/Remote Mem/Sync objects.
|
||||||
*/
|
*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_GET_AUTH_TOKEN \
|
#define NVSCIC2C_PCIE_IOCTL_GET_AUTH_TOKEN \
|
||||||
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 4,\
|
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 4U,\
|
||||||
struct nvscic2c_pcie_export_obj_args)
|
struct nvscic2c_pcie_export_obj_args)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get Handle from the imported export descriptor.
|
* Get Handle from the imported export descriptor.
|
||||||
*/
|
*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_GET_HANDLE \
|
#define NVSCIC2C_PCIE_IOCTL_GET_HANDLE \
|
||||||
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 5,\
|
_IOWR(NVSCIC2C_PCIE_IOCTL_MAGIC, 5U,\
|
||||||
struct nvscic2c_pcie_import_obj_args)
|
struct nvscic2c_pcie_import_obj_args)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Free the Mapped/Pinned Source, Target or Imported Mem or Sync object handle.
|
* Free the Mapped/Pinned Source, Target or Imported Mem or Sync object handle.
|
||||||
*/
|
*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_FREE \
|
#define NVSCIC2C_PCIE_IOCTL_FREE \
|
||||||
_IOW(NVSCIC2C_PCIE_IOCTL_MAGIC, 6,\
|
_IOW(NVSCIC2C_PCIE_IOCTL_MAGIC, 6U,\
|
||||||
struct nvscic2c_pcie_free_obj_args)
|
struct nvscic2c_pcie_free_obj_args)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Submit a Copy request for transfer.
|
* Submit a Copy request for transfer.
|
||||||
*/
|
*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_SUBMIT_COPY_REQUEST \
|
#define NVSCIC2C_PCIE_IOCTL_SUBMIT_COPY_REQUEST \
|
||||||
_IOW(NVSCIC2C_PCIE_IOCTL_MAGIC, 7,\
|
_IOW(NVSCIC2C_PCIE_IOCTL_MAGIC, 7U,\
|
||||||
struct nvscic2c_pcie_submit_copy_args)
|
struct nvscic2c_pcie_submit_copy_args)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the maximum possible outstanding copy requests that can be submitted.
|
* Set the maximum possible outstanding copy requests that can be submitted.
|
||||||
*/
|
*/
|
||||||
#define NVSCIC2C_PCIE_IOCTL_MAX_COPY_REQUESTS \
|
#define NVSCIC2C_PCIE_IOCTL_MAX_COPY_REQUESTS \
|
||||||
_IOW(NVSCIC2C_PCIE_IOCTL_MAGIC, 8,\
|
_IOW(NVSCIC2C_PCIE_IOCTL_MAGIC, 8U,\
|
||||||
struct nvscic2c_pcie_max_copy_args)
|
struct nvscic2c_pcie_max_copy_args)
|
||||||
|
|
||||||
#define NVSCIC2C_PCIE_IOCTL_NUMBER_MAX 8
|
#define NVSCIC2C_PCIE_IOCTL_NUMBER_MAX 8
|
||||||
|
|||||||
Reference in New Issue
Block a user