mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: mm: fix CERT-C bugs in pd_cache
Fix CERT-C INT-30 and INT-31 violations in nvgpu.common.mm.gmmu.pd_cache by using safe ops. JIRA NVGPU-3637 Change-Id: Iecc7769e46c5c9c7dabaa852067e8f4052a73ac5 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2146987 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Nitin Kumbhar <nkumbhar@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1eafda5a62
commit
5e75f90f1c
@@ -29,6 +29,7 @@
|
|||||||
#include <nvgpu/log2.h>
|
#include <nvgpu/log2.h>
|
||||||
#include <nvgpu/gk20a.h>
|
#include <nvgpu/gk20a.h>
|
||||||
#include <nvgpu/enabled.h>
|
#include <nvgpu/enabled.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
#include "pd_cache_priv.h"
|
#include "pd_cache_priv.h"
|
||||||
|
|
||||||
@@ -75,19 +76,22 @@ u64 nvgpu_pd_gpu_addr(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
|
|||||||
page_addr = nvgpu_mem_get_addr(g, pd->mem);
|
page_addr = nvgpu_mem_get_addr(g, pd->mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
return page_addr + pd->mem_offs;
|
return nvgpu_safe_add_u64(page_addr, U64(pd->mem_offs));
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 nvgpu_pd_offset_from_index(const struct gk20a_mmu_level *l, u32 pd_idx)
|
u32 nvgpu_pd_offset_from_index(const struct gk20a_mmu_level *l, u32 pd_idx)
|
||||||
{
|
{
|
||||||
return (pd_idx * l->entry_size) / U32(sizeof(u32));
|
return nvgpu_safe_mult_u32(pd_idx, l->entry_size) / U32(sizeof(u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_pd_write(struct gk20a *g, struct nvgpu_gmmu_pd *pd,
|
void nvgpu_pd_write(struct gk20a *g, struct nvgpu_gmmu_pd *pd,
|
||||||
size_t w, u32 data)
|
size_t w, u32 data)
|
||||||
{
|
{
|
||||||
|
u64 tmp_offset = nvgpu_safe_add_u64((pd->mem_offs / sizeof(u32)), w);
|
||||||
|
|
||||||
nvgpu_mem_wr32(g, pd->mem,
|
nvgpu_mem_wr32(g, pd->mem,
|
||||||
(u32)((pd->mem_offs / sizeof(u32)) + w), data);
|
nvgpu_safe_cast_u64_to_u32(tmp_offset),
|
||||||
|
data);
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_pd_cache_init(struct gk20a *g)
|
int nvgpu_pd_cache_init(struct gk20a *g)
|
||||||
@@ -244,25 +248,25 @@ static int nvgpu_pd_cache_alloc_from_partial(struct gk20a *g,
|
|||||||
struct nvgpu_pd_mem_entry *pentry,
|
struct nvgpu_pd_mem_entry *pentry,
|
||||||
struct nvgpu_gmmu_pd *pd)
|
struct nvgpu_gmmu_pd *pd)
|
||||||
{
|
{
|
||||||
unsigned long bit_offs;
|
u32 bit_offs;
|
||||||
u32 mem_offs;
|
u32 mem_offs;
|
||||||
u32 nr_bits = nvgpu_pd_cache_get_nr_entries(pentry);
|
u32 nr_bits = nvgpu_pd_cache_get_nr_entries(pentry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find and allocate an open PD.
|
* Find and allocate an open PD.
|
||||||
*/
|
*/
|
||||||
bit_offs = find_first_zero_bit(pentry->alloc_map, nr_bits);
|
bit_offs = nvgpu_safe_cast_u64_to_u32(
|
||||||
nvgpu_assert(bit_offs <= U32_MAX);
|
find_first_zero_bit(pentry->alloc_map, nr_bits));
|
||||||
mem_offs = (u32)bit_offs * pentry->pd_size;
|
mem_offs = nvgpu_safe_mult_u32(bit_offs, pentry->pd_size);
|
||||||
|
|
||||||
pd_dbg(g, "PD-Alloc [C] Partial: offs=%lu nr_bits=%d src=0x%p",
|
pd_dbg(g, "PD-Alloc [C] Partial: offs=%u nr_bits=%d src=0x%p",
|
||||||
bit_offs, nr_bits, pentry);
|
bit_offs, nr_bits, pentry);
|
||||||
|
|
||||||
/* Bit map full. Somethings wrong. */
|
/* Bit map full. Somethings wrong. */
|
||||||
nvgpu_assert(bit_offs < nr_bits);
|
nvgpu_assert(bit_offs < nr_bits);
|
||||||
|
|
||||||
nvgpu_set_bit((u32)bit_offs, pentry->alloc_map);
|
nvgpu_set_bit(bit_offs, pentry->alloc_map);
|
||||||
pentry->allocs += 1U;
|
pentry->allocs = nvgpu_safe_add_u32(pentry->allocs, 1U);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First update the pd.
|
* First update the pd.
|
||||||
@@ -311,11 +315,15 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
|
|||||||
{
|
{
|
||||||
struct nvgpu_pd_mem_entry *pentry;
|
struct nvgpu_pd_mem_entry *pentry;
|
||||||
int err;
|
int err;
|
||||||
|
bool bytes_valid;
|
||||||
|
|
||||||
pd_dbg(g, "PD-Alloc [C] %u bytes", bytes);
|
pd_dbg(g, "PD-Alloc [C] %u bytes", bytes);
|
||||||
|
|
||||||
if ((bytes & (bytes - 1U)) != 0U ||
|
bytes_valid = bytes >= NVGPU_PD_CACHE_MIN;
|
||||||
bytes < NVGPU_PD_CACHE_MIN) {
|
if (bytes_valid) {
|
||||||
|
bytes_valid = (bytes & nvgpu_safe_sub_u32(bytes, 1U)) == 0U;
|
||||||
|
}
|
||||||
|
if (!bytes_valid) {
|
||||||
pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes);
|
pd_dbg(g, "PD-Alloc [C] Invalid (bytes=%u)!", bytes);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -408,7 +416,7 @@ static void nvgpu_pd_cache_do_free(struct gk20a *g,
|
|||||||
|
|
||||||
/* Mark entry as free. */
|
/* Mark entry as free. */
|
||||||
nvgpu_clear_bit(bit, pentry->alloc_map);
|
nvgpu_clear_bit(bit, pentry->alloc_map);
|
||||||
pentry->allocs -= 1U;
|
pentry->allocs = nvgpu_safe_sub_u32(pentry->allocs, 1U);
|
||||||
|
|
||||||
if (pentry->allocs > 0U) {
|
if (pentry->allocs > 0U) {
|
||||||
/*
|
/*
|
||||||
|
|||||||
Reference in New Issue
Block a user