video: tegra: nvmap: Fix INT30-C overflow defects

Adding check for overflow in nvmap_pp.c and nvmap_ioctl.c when two unsigned integers are added and return error in case of overflow condition

JIRA: TMM-5724
Bug 4479044

Change-Id: Icecbb45498672d5e55a66c60f2be59c3970bf75d
Signed-off-by: Surbhi Singh <surbhis@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3256309
Reviewed-by: Ketan Patil <ketanp@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
This commit is contained in:
Surbhi Singh
2024-11-24 21:02:08 +00:00
committed by Jon Hunter
parent 2727334720
commit 1f2a35edf4
2 changed files with 30 additions and 6 deletions

View File

@@ -435,6 +435,7 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
void *tmp = NULL;
void *addr;
int ret = 0;
unsigned long sum;
if ((h->heap_type & nvmap_dev->cpu_access_mask) == 0)
return -EPERM;
@@ -508,7 +509,12 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
false);
copied += elem_size;
sys_addr += sys_stride;
if (check_add_overflow(sys_addr, sys_stride, &sum)) {
ret = -EOVERFLOW;
break;
}
sys_addr = sum;
h_offs += h_stride;
addr += h_stride;
}

View File

@@ -246,11 +246,12 @@ static void nvmap_pp_zero_pages(struct page **pages, int nr)
trace_nvmap_pp_zero_pages(nr);
}
static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
static int nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
{
int i;
struct page *page;
int ret;
u32 difference, sum;
/*
* Statically declared array of pages to be zeroed in a batch,
* local to this thread but too big for the stack.
@@ -263,7 +264,12 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
if (page == NULL)
break;
pending_zero_pages[i] = page;
pool->under_zero++;
if (check_add_overflow(pool->under_zero, 1U, &sum)) {
rt_mutex_unlock(&pool->lock);
return -EOVERFLOW;
}
pool->under_zero = sum;
}
rt_mutex_unlock(&pool->lock);
@@ -271,13 +277,20 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
rt_mutex_lock(&pool->lock);
ret = __nvmap_page_pool_fill_lots_locked(pool, pending_zero_pages, i);
pool->under_zero -= i;
if (check_sub_overflow(pool->under_zero, (u32)i, &difference)) {
rt_mutex_unlock(&pool->lock);
return -EOVERFLOW;
}
pool->under_zero = difference;
rt_mutex_unlock(&pool->lock);
trace_nvmap_pp_do_background_zero_pages(ret, i);
for (; ret < i; ret++)
__free_page(pending_zero_pages[ret]);
return 0;
}
/*
@@ -291,6 +304,7 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool)
static int nvmap_background_zero_thread(void *arg)
{
struct nvmap_page_pool *pool = nvmap_dev->pool;
int err;
pr_info("PP zeroing thread starting.\n");
@@ -298,8 +312,11 @@ static int nvmap_background_zero_thread(void *arg)
sched_set_normal(current, MAX_NICE);
while (!kthread_should_stop()) {
while (nvmap_bg_should_run(pool))
nvmap_pp_do_background_zero_pages(pool);
while (nvmap_bg_should_run(pool)) {
err = nvmap_pp_do_background_zero_pages(pool);
if (err != 0)
return -EOVERFLOW;
}
wait_event_freezable(nvmap_bg_wait,
nvmap_bg_should_run(pool) ||
@@ -493,6 +510,7 @@ u32 nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
__free_page(pages[i]);
} else {
list_add_tail(&pages[i]->lru, &pool->zero_list);
BUG_ON(pool->to_zero == UINT_MAX);
pool->to_zero++;
}
}