gpu: nvgpu: Fix MISRA 21.2 violations (pd_cache.c)

MISRA 21.2 states that we may not use reserved identifiers; since
all identifiers beginning with '_' are reserved by libc, the usage
of '__' as a prefix is disallowed.

Fixes for all the pd_cache functions that use '__' prefixes. This
was trivial: the '__' prefix was simply deleted.

JIRA NVGPU-1029

Change-Id: Ia91dabe3ef97fb17a2a85105935fb3a72d7c2c5e
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1813643
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-09-05 16:30:08 -07:00
committed by mobile promotions
parent 2c95becc9e
commit 7405f69ae2
4 changed files with 19 additions and 19 deletions

View File

@@ -200,7 +200,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
*/
pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE);
err = __nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
if (WARN_ON(err)) {
return err;
}
@@ -277,7 +277,7 @@ static int pd_allocate(struct vm_gk20a *vm,
return 0;
}
err = __nvgpu_pd_alloc(vm, pd, pd_size(l, attrs));
err = nvgpu_pd_alloc(vm, pd, pd_size(l, attrs));
if (err) {
nvgpu_info(vm->mm->g, "error allocating page directory!");
return err;

View File

@@ -64,10 +64,10 @@
* lists. For a 4Kb page NVGPU_PD_CACHE_COUNT is 4. This is enough space for
* 256, 512, 1024, and 2048 byte PDs.
*
* __nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD
* nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD
* size is page size or larger and choose the correct allocation scheme - either
* from the PD cache or directly. Similarly __nvgpu_pd_free() will free a PD
* allocated by __nvgpu_pd_alloc().
* from the PD cache or directly. Similarly nvgpu_pd_free() will free a PD
* allocated by nvgpu_pd_alloc().
*
* Since the top level PD (the PDB) is a page aligned pointer but less than a
* page size the direct functions must be used for allocating PDBs. Otherwise
@@ -150,8 +150,8 @@ void nvgpu_pd_cache_fini(struct gk20a *g)
* Note: this does not need the cache lock since it does not modify any of the
* PD cache data structures.
*/
int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
struct nvgpu_gmmu_pd *pd, u32 bytes)
int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
struct nvgpu_gmmu_pd *pd, u32 bytes)
{
int err;
unsigned long flags = 0;
@@ -339,7 +339,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
* cache logistics. Since on Parker and later GPUs some of the page directories
* are smaller than a page packing these PDs together saves a lot of memory.
*/
int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
{
struct gk20a *g = gk20a_from_vm(vm);
int err;
@@ -349,7 +349,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
* alloc.
*/
if (bytes >= PAGE_SIZE) {
err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes);
err = nvgpu_pd_cache_alloc_direct(g, pd, bytes);
if (err) {
return err;
}
@@ -368,7 +368,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
return err;
}
void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
{
pd_dbg(g, "PD-Free [D] 0x%p", pd->mem);
@@ -448,7 +448,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache,
nvgpu_pd_cache_do_free(g, cache, pentry, pd);
}
void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
{
struct gk20a *g = gk20a_from_vm(vm);
@@ -456,7 +456,7 @@ void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
* Simple case: just DMA free.
*/
if (!pd->cached) {
return __nvgpu_pd_cache_free_direct(g, pd);
return nvgpu_pd_cache_free_direct(g, pd);
}
nvgpu_mutex_acquire(&g->mm.pd_cache->lock);

View File

@@ -90,7 +90,7 @@ static void __nvgpu_vm_free_entries(struct vm_gk20a *vm,
int i;
if (pd->mem) {
__nvgpu_pd_free(vm, pd);
nvgpu_pd_free(vm, pd);
pd->mem = NULL;
}
@@ -110,7 +110,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
struct gk20a *g = vm->mm->g;
int i;
__nvgpu_pd_cache_free_direct(g, pdb);
nvgpu_pd_cache_free_direct(g, pdb);
if (!pdb->entries) {
return;
@@ -522,7 +522,7 @@ clean_up_allocators:
}
clean_up_page_tables:
/* Cleans up nvgpu_gmmu_init_page_table() */
__nvgpu_pd_cache_free_direct(g, &vm->pdb);
nvgpu_pd_cache_free_direct(g, &vm->pdb);
clean_up_vgpu_vm:
#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
if (g->is_virtual)

View File

@@ -251,11 +251,11 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm,
struct nvgpu_mem *mem,
u64 gpu_va);
int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes);
void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd);
int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes);
void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd);
int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
struct nvgpu_gmmu_pd *pd, u32 bytes);
void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd);
void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd);
int nvgpu_pd_cache_init(struct gk20a *g);
void nvgpu_pd_cache_fini(struct gk20a *g);