mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: mm: fix misra 5.9 violation
Advisory Rule 5.9 states that identifiers that define objects or functions with internal linkage should be unique. This change eliminates an Advisory Rule 5.9 violation in mm code involving the pd_size() function name by renaming it to pd_get_size(). Jira NVGPU-3178 Change-Id: I3a2e62908257da1c1dc10528f8fec623b5a30ee1 Signed-off-by: Scott Long <scottl@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2190085 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
1a2de585d1
commit
ca02105446
@@ -60,7 +60,7 @@ static int pd_allocate(struct vm_gk20a *vm,
|
|||||||
struct nvgpu_gmmu_pd *pd,
|
struct nvgpu_gmmu_pd *pd,
|
||||||
const struct gk20a_mmu_level *l,
|
const struct gk20a_mmu_level *l,
|
||||||
struct nvgpu_gmmu_attrs *attrs);
|
struct nvgpu_gmmu_attrs *attrs);
|
||||||
static u32 pd_size(const struct gk20a_mmu_level *l,
|
static u32 pd_get_size(const struct gk20a_mmu_level *l,
|
||||||
struct nvgpu_gmmu_attrs *attrs);
|
struct nvgpu_gmmu_attrs *attrs);
|
||||||
/*
|
/*
|
||||||
* Core GMMU map function for the kernel to use. If @addr is 0 then the GPU
|
* Core GMMU map function for the kernel to use. If @addr is 0 then the GPU
|
||||||
@@ -195,7 +195,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
|
|||||||
* Currently PAGE_SIZE is used, even when 64K, to work around an issue
|
* Currently PAGE_SIZE is used, even when 64K, to work around an issue
|
||||||
* with the PDB TLB invalidate code not being pd_cache aware yet.
|
* with the PDB TLB invalidate code not being pd_cache aware yet.
|
||||||
*/
|
*/
|
||||||
pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE);
|
pdb_size = ALIGN(pd_get_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE);
|
||||||
|
|
||||||
err = nvgpu_pd_alloc(vm, &vm->pdb, pdb_size);
|
err = nvgpu_pd_alloc(vm, &vm->pdb, pdb_size);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@@ -243,7 +243,7 @@ static u32 pd_entries(const struct gk20a_mmu_level *l,
|
|||||||
/*
|
/*
|
||||||
* Computes the size of a PD table (in bytes).
|
* Computes the size of a PD table (in bytes).
|
||||||
*/
|
*/
|
||||||
static u32 pd_size(const struct gk20a_mmu_level *l,
|
static u32 pd_get_size(const struct gk20a_mmu_level *l,
|
||||||
struct nvgpu_gmmu_attrs *attrs)
|
struct nvgpu_gmmu_attrs *attrs)
|
||||||
{
|
{
|
||||||
return nvgpu_safe_mult_u32(pd_entries(l, attrs), l->entry_size);
|
return nvgpu_safe_mult_u32(pd_entries(l, attrs), l->entry_size);
|
||||||
@@ -266,7 +266,7 @@ static int pd_allocate(struct vm_gk20a *vm,
|
|||||||
* the underlying DMA memory here.
|
* the underlying DMA memory here.
|
||||||
*/
|
*/
|
||||||
if (pd->mem != NULL) {
|
if (pd->mem != NULL) {
|
||||||
if (pd->pd_size >= pd_size(l, attrs)) {
|
if (pd->pd_size >= pd_get_size(l, attrs)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -276,7 +276,7 @@ static int pd_allocate(struct vm_gk20a *vm,
|
|||||||
pd->mem = NULL;
|
pd->mem = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_pd_alloc(vm, pd, pd_size(l, attrs));
|
err = nvgpu_pd_alloc(vm, pd, pd_get_size(l, attrs));
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_info(vm->mm->g, "error allocating page directory!");
|
nvgpu_info(vm->mm->g, "error allocating page directory!");
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
Reference in New Issue
Block a user