mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
nvscic2c-pcie: Fix Top25 CWE CERT-C violations
Fix total 48 violations of below CERT-C rules: CERT ARR30-C - 1 CERT EXP34-C - 1 CERT INT08-C - 20 CERT INT30-C - 20 CERT STR07-C - 4 CERT STR31-C - 1 FORWARD_NULL - 1 JIRA NVIPC-3120 Change-Id: I2b9b35e97fe6968ec4c656cdcdd01a764640033c Signed-off-by: Anjanii <amohil@nvidia.com> Signed-off-by: Janardhan Reddy <jreddya@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3263785 Reviewed-by: Deepak Kumar Badgaiyan <dbadgaiyan@nvidia.com> GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: Sumeet Gupta <sumeetg@nvidia.com>
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||
* All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*/
|
||||
|
||||
#ifndef __COMMON_H__
|
||||
@@ -214,4 +213,38 @@ static inline u64 get_syncpt_shim_offset(u32 id, u8 chip_id)
|
||||
|
||||
return (base + ((u64)id * SP_SIZE));
|
||||
}
|
||||
|
||||
/* Adition of uint64 variables with overflow detection */
|
||||
static inline bool
|
||||
AddU64(uint64_t op1, uint64_t op2, uint64_t *result)
|
||||
{
|
||||
bool e = false;
|
||||
|
||||
if (((U64_MAX - op1) < op2) == false) {
|
||||
*result = op1 + op2;
|
||||
e = true;
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
/* Multiplication of uint64 variables with overflow detection */
|
||||
static inline bool
|
||||
MultU64(uint64_t op1, uint64_t op2, uint64_t *result)
|
||||
{
|
||||
bool e = false;
|
||||
|
||||
if ((op1 == 0U) || (op2 == 0U))
|
||||
*result = 0U;
|
||||
else if ((op1 > (U64_MAX / op2)) == false)
|
||||
*result = op1 * op2;
|
||||
else
|
||||
goto fail;
|
||||
|
||||
e = true;
|
||||
|
||||
fail:
|
||||
return e;
|
||||
}
|
||||
|
||||
#endif //__COMMON_H__
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||
* All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "nvscic2c-pcie: endpoint: " fmt
|
||||
@@ -864,6 +863,8 @@ allocate_memory(struct endpoint_drv_ctx_t *eps_ctx, struct endpoint_t *ep)
|
||||
int ret = 0;
|
||||
int prot = 0;
|
||||
size_t offsetof = 0x0;
|
||||
bool retval = 0;
|
||||
uint64_t total_size = 0;
|
||||
|
||||
/*
|
||||
* memory size includes space for frames(aligned to PAGE_SIZE) plus
|
||||
@@ -871,7 +872,12 @@ allocate_memory(struct endpoint_drv_ctx_t *eps_ctx, struct endpoint_t *ep)
|
||||
*/
|
||||
ep->self_mem.size = (ep->nframes * ep->frame_sz);
|
||||
ep->self_mem.size = ALIGN(ep->self_mem.size, PAGE_SIZE);
|
||||
ep->self_mem.size += PAGE_SIZE;
|
||||
retval = AddU64(ep->self_mem.size, PAGE_SIZE, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("sum of ep->self_mem.size and PAGE_SIZE exceeding max U64 limit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ep->self_mem.size = total_size;
|
||||
ep->self_mem.pva = alloc_pages_exact(ep->self_mem.size,
|
||||
(GFP_KERNEL | __GFP_ZERO));
|
||||
if (!ep->self_mem.pva) {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES.
|
||||
* All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*/
|
||||
|
||||
#include <nvidia/conftest.h>
|
||||
@@ -178,6 +177,15 @@ assign_outbound_area(struct pci_dev *pdev, size_t win_size, int bar,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Added below check to fix CERT ARR30-C violation:
|
||||
* cert_arr30_c_violation: pdev->resource[bar] evaluates to an address
|
||||
* that could be at negative offset of an array.
|
||||
*/
|
||||
if (bar < 0) {
|
||||
pr_err("Invalid BAR index : %d", bar);
|
||||
return -EINVAL;
|
||||
}
|
||||
peer_mem->size = win_size;
|
||||
peer_mem->aper = pci_resource_start(pdev, bar);
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||
* All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "nvscic2c-pcie: iova-mgr: " fmt
|
||||
@@ -162,6 +161,9 @@ iova_mngr_block_release(void *mngr_handle, void **block_handle)
|
||||
struct block_t *curr = NULL, *prev = NULL;
|
||||
bool done = false;
|
||||
int ret = 0;
|
||||
bool retval = 0;
|
||||
uint64_t last_address = 0;
|
||||
uint64_t total_size = 0;
|
||||
|
||||
if (!ctx || !release)
|
||||
return -EINVAL;
|
||||
@@ -223,9 +225,19 @@ iova_mngr_block_release(void *mngr_handle, void **block_handle)
|
||||
*/
|
||||
struct block_t *last =
|
||||
list_last_entry(ctx->free_list, struct block_t, node);
|
||||
if ((last->address + last->size) == release->address) {
|
||||
retval = AddU64(last->address, last->size, &last_address);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: last->address, last->size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (last_address == release->address) {
|
||||
/* can be merged with last node of list.*/
|
||||
last->size += release->size;
|
||||
retval = AddU64(last->size, release->size, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: last->size, release->size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
last->size = total_size;
|
||||
list_del(&release->node);
|
||||
kfree(release);
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||
* All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "nvscic2c-pcie: stream-ext: " fmt
|
||||
@@ -1012,6 +1011,8 @@ prepare_edma_desc(enum drv_mode_t drv_mode, struct copy_req_params *params,
|
||||
struct file *filep = NULL;
|
||||
struct stream_ext_obj *stream_obj = NULL;
|
||||
struct nvscic2c_pcie_flush_range *flush_range = NULL;
|
||||
bool retval = 0;
|
||||
uint64_t dest_address = 0;
|
||||
|
||||
*num_desc = 0;
|
||||
for (i = 0; i < params->num_flush_ranges; i++) {
|
||||
@@ -1032,7 +1033,12 @@ prepare_edma_desc(enum drv_mode_t drv_mode, struct copy_req_params *params,
|
||||
desc[iter].dst = stream_obj->aper;
|
||||
else
|
||||
desc[iter].dst = stream_obj->vmap.iova;
|
||||
desc[iter].dst += flush_range->offset;
|
||||
retval = AddU64(desc[iter].dst, flush_range->offset, &dest_address);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: flush_range dst address, offset\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
desc[iter].dst = dest_address;
|
||||
fput(filep);
|
||||
|
||||
desc[iter].sz = flush_range->size;
|
||||
@@ -1438,6 +1444,12 @@ allocate_copy_request(struct stream_ext_ctx_t *ctx,
|
||||
{
|
||||
int ret = 0;
|
||||
struct copy_request *cr = NULL;
|
||||
uint64_t size_of_element = 0;
|
||||
uint64_t num_elements = 0;
|
||||
uint64_t max_flush_ranges = 0;
|
||||
uint64_t maxcr_limits = 0;
|
||||
uint64_t total_size = 0;
|
||||
bool retval = 0;
|
||||
|
||||
/*worst-case allocation for each copy request.*/
|
||||
|
||||
@@ -1449,52 +1461,98 @@ allocate_copy_request(struct stream_ext_ctx_t *ctx,
|
||||
cr->ctx = ctx;
|
||||
|
||||
/* flush range has two handles: src, dst + all possible post_fences.*/
|
||||
cr->handles = kzalloc((sizeof(*cr->handles) *
|
||||
((2U * ctx->cr_limits.max_flush_ranges) +
|
||||
(ctx->cr_limits.max_post_fences))),
|
||||
GFP_KERNEL);
|
||||
retval = MultU64(2, ctx->cr_limits.max_flush_ranges, &max_flush_ranges);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: 2, max_flush_ranges\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
retval = AddU64(max_flush_ranges, ctx->cr_limits.max_post_fences, &maxcr_limits);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: max_flush_ranges, max_post_fences\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
size_of_element = sizeof(*cr->handles);
|
||||
retval = MultU64(size_of_element, maxcr_limits, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: size_of_element, maxcr_limits\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
cr->handles = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!cr->handles)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* edma_desc shall include flush_range.*/
|
||||
cr->edma_desc = kzalloc((sizeof(*cr->edma_desc) *
|
||||
ctx->cr_limits.max_flush_ranges),
|
||||
GFP_KERNEL);
|
||||
size_of_element = sizeof(*cr->edma_desc);
|
||||
num_elements = ctx->cr_limits.max_flush_ranges;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: flush_ranges: size_of_element, num_elements\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
cr->edma_desc = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!cr->edma_desc)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* OR all max_post_fences could be local_post_fence. */
|
||||
cr->local_post_fences = kzalloc((sizeof(*cr->local_post_fences) *
|
||||
ctx->cr_limits.max_post_fences),
|
||||
GFP_KERNEL);
|
||||
size_of_element = sizeof(*cr->local_post_fences);
|
||||
num_elements = ctx->cr_limits.max_post_fences;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: local_post_fences: size_of_element, num_elements\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
cr->local_post_fences = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!cr->local_post_fences)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
cr->remote_post_fences = kzalloc((sizeof(*cr->remote_post_fences) *
|
||||
ctx->cr_limits.max_post_fences),
|
||||
GFP_KERNEL);
|
||||
|
||||
size_of_element = sizeof(*cr->remote_post_fences);
|
||||
num_elements = ctx->cr_limits.max_post_fences;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: remote_post_fences: size_of_element, num_elements\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
cr->remote_post_fences = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!cr->remote_post_fences)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cr->remote_buf_objs = kzalloc((sizeof(*cr->remote_buf_objs) *
|
||||
ctx->cr_limits.max_flush_ranges),
|
||||
GFP_KERNEL);
|
||||
size_of_element = sizeof(*cr->remote_buf_objs);
|
||||
num_elements = ctx->cr_limits.max_flush_ranges;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: remote_buf_objs: size_of_element, num_elements\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
cr->remote_buf_objs = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!cr->remote_buf_objs)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cr->remote_post_fence_values =
|
||||
kzalloc((sizeof(*cr->remote_post_fence_values) *
|
||||
ctx->cr_limits.max_post_fences),
|
||||
GFP_KERNEL);
|
||||
size_of_element = sizeof(*cr->remote_post_fence_values);
|
||||
num_elements = ctx->cr_limits.max_post_fences;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: remote_post_fence_values: size_of_element, num_elements");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
cr->remote_post_fence_values = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!cr->remote_post_fence_values)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
@@ -1528,36 +1586,61 @@ allocate_copy_req_params(struct stream_ext_ctx_t *ctx,
|
||||
struct copy_req_params *params)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t size_of_element = 0;
|
||||
uint64_t num_elements = 0;
|
||||
uint64_t total_size = 0;
|
||||
bool retval = 0;
|
||||
|
||||
/*worst-case allocation for each.*/
|
||||
|
||||
params->flush_ranges = kzalloc((sizeof(*params->flush_ranges) *
|
||||
ctx->cr_limits.max_flush_ranges),
|
||||
GFP_KERNEL);
|
||||
size_of_element = sizeof(*params->flush_ranges);
|
||||
num_elements = ctx->cr_limits.max_flush_ranges;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false)
|
||||
return -EINVAL;
|
||||
params->flush_ranges = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!params->flush_ranges)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
params->local_post_fences =
|
||||
kzalloc((sizeof(*params->local_post_fences) *
|
||||
ctx->cr_limits.max_post_fences),
|
||||
GFP_KERNEL);
|
||||
|
||||
size_of_element = sizeof(*params->local_post_fences);
|
||||
num_elements = ctx->cr_limits.max_post_fences;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: local_post_fences: size_of_element, num_elements\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
params->local_post_fences = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!params->local_post_fences)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
params->remote_post_fences =
|
||||
kzalloc((sizeof(*params->remote_post_fences) *
|
||||
ctx->cr_limits.max_post_fences),
|
||||
GFP_KERNEL);
|
||||
|
||||
size_of_element = sizeof(*params->remote_post_fences);
|
||||
num_elements = ctx->cr_limits.max_post_fences;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: remote_post_fences: size_of_element, num_elements\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
params->remote_post_fences = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!params->remote_post_fences)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
params->remote_post_fence_values =
|
||||
kzalloc((sizeof(*params->remote_post_fence_values) *
|
||||
ctx->cr_limits.max_post_fences),
|
||||
GFP_KERNEL);
|
||||
|
||||
size_of_element = sizeof(*params->remote_post_fence_values);
|
||||
num_elements = ctx->cr_limits.max_post_fences;
|
||||
retval = MultU64(size_of_element, num_elements, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("MultU64 overflow: remote_post_fence_values: size_of_element, num_elements");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
params->remote_post_fence_values = kzalloc(total_size, GFP_KERNEL);
|
||||
if (WARN_ON(!params->remote_post_fence_values)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES.
|
||||
* All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "nvscic2c-pcie: vmap: " fmt
|
||||
@@ -48,8 +47,15 @@ match_dmabuf(int id, void *entry, void *data)
|
||||
static int dev_map_limit_check(uint64_t aperture_limit, uint64_t aperture_inuse, size_t map_size)
|
||||
{
|
||||
int ret = 0;
|
||||
bool retval = 0;
|
||||
uint64_t total_size = 0;
|
||||
|
||||
if ((aperture_inuse + map_size) > aperture_limit) {
|
||||
retval = AddU64(aperture_inuse, map_size, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: aperture_inuse, map_size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (total_size > aperture_limit) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("per endpoint mapping limit exceeded, aperture_inuse: %lld, map_size: %zu\n",
|
||||
aperture_inuse, map_size);
|
||||
@@ -78,6 +84,8 @@ static int dma_mem_get_size(struct vmap_ctx_t *vmap_ctx, struct memobj_pin_t *pi
|
||||
int ret = 0;
|
||||
u32 sg_index = 0;
|
||||
struct scatterlist *sg = NULL;
|
||||
bool retval = 0;
|
||||
uint64_t total_size = 0;
|
||||
|
||||
/*
|
||||
* pin to dummy device (which has smmu disabled) to get scatter-list
|
||||
@@ -97,8 +105,14 @@ static int dma_mem_get_size(struct vmap_ctx_t *vmap_ctx, struct memobj_pin_t *pi
|
||||
}
|
||||
|
||||
*map_size = 0;
|
||||
for_each_sg(pin->sgt->sgl, sg, pin->sgt->nents, sg_index)
|
||||
*map_size += sg->length;
|
||||
for_each_sg(pin->sgt->sgl, sg, pin->sgt->nents, sg_index) {
|
||||
retval = AddU64(*map_size, sg->length, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: map_size, sg->length\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*map_size = total_size;
|
||||
}
|
||||
|
||||
fn_exit:
|
||||
dma_mem_get_size_exit(pin);
|
||||
@@ -117,6 +131,8 @@ memobj_map(struct vmap_ctx_t *vmap_ctx,
|
||||
size_t map_size = 0;
|
||||
struct memobj_map_ref *map = NULL;
|
||||
struct dma_buf *dmabuf = NULL;
|
||||
bool retval = 0;
|
||||
uint64_t total_size = 0;
|
||||
|
||||
dmabuf = dma_buf_get(params->fd);
|
||||
if (IS_ERR_OR_NULL(dmabuf)) {
|
||||
@@ -196,8 +212,14 @@ memobj_map(struct vmap_ctx_t *vmap_ctx,
|
||||
kfree(map);
|
||||
goto err;
|
||||
}
|
||||
if ((map->pin.mngd == VMAP_MNGD_CLIENT) && (aperture_inuse != NULL))
|
||||
*aperture_inuse += map->pin.attrib.size;
|
||||
if ((map->pin.mngd == VMAP_MNGD_CLIENT) && (aperture_inuse != NULL)) {
|
||||
retval = AddU64(*aperture_inuse, map->pin.attrib.size, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: aperture_inuse, map->pin.attrib.size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*aperture_inuse = total_size;
|
||||
}
|
||||
}
|
||||
|
||||
attrib->type = VMAP_OBJ_TYPE_MEM;
|
||||
@@ -293,6 +315,8 @@ syncobj_map(struct vmap_ctx_t *vmap_ctx,
|
||||
s32 id_exist = 0;
|
||||
u32 syncpt_id = 0;
|
||||
struct syncobj_map_ref *map = NULL;
|
||||
bool retval = 0;
|
||||
uint64_t total_size = 0;
|
||||
|
||||
syncpt_id = params->id;
|
||||
mutex_lock(&vmap_ctx->sync_idr_lock);
|
||||
@@ -350,8 +374,14 @@ syncobj_map(struct vmap_ctx_t *vmap_ctx,
|
||||
kfree(map);
|
||||
goto err;
|
||||
}
|
||||
if ((params->mngd == VMAP_MNGD_CLIENT) && (aperture_inuse != NULL))
|
||||
*aperture_inuse += map->pin.attrib.size;
|
||||
if ((params->mngd == VMAP_MNGD_CLIENT) && (aperture_inuse != NULL)) {
|
||||
retval = AddU64(*aperture_inuse, map->pin.attrib.size, &total_size);
|
||||
if (retval == false) {
|
||||
pr_err("AddU64 overflow: aperture_inuse, map->pin.attrib.size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*aperture_inuse = total_size;
|
||||
}
|
||||
|
||||
attrib->type = VMAP_OBJ_TYPE_SYNC;
|
||||
attrib->id = map->obj_id;
|
||||
|
||||
Reference in New Issue
Block a user