From ca02105446b21e2225a5994492415a441a2ff75c Mon Sep 17 00:00:00 2001 From: Scott Long Date: Wed, 4 Sep 2019 10:00:02 -0700 Subject: [PATCH] gpu: nvgpu: mm: fix misra 5.9 violation Advisory Rule 5.9 states that identifiers that define objects or functions with internal linkage should be unique. This change eliminates an Advisory Rule 5.9 violation in mm code involving the pd_size() function name by renaming it to pd_get_size(). Jira NVGPU-3178 Change-Id: I3a2e62908257da1c1dc10528f8fec623b5a30ee1 Signed-off-by: Scott Long Reviewed-on: https://git-master.nvidia.com/r/2190085 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu/page_table.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 16e99e759..14593bd7d 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -60,8 +60,8 @@ static int pd_allocate(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, const struct gk20a_mmu_level *l, struct nvgpu_gmmu_attrs *attrs); -static u32 pd_size(const struct gk20a_mmu_level *l, - struct nvgpu_gmmu_attrs *attrs); +static u32 pd_get_size(const struct gk20a_mmu_level *l, + struct nvgpu_gmmu_attrs *attrs); /* * Core GMMU map function for the kernel to use. If @addr is 0 then the GPU * VA will be allocated for you. If addr is non-zero then the buffer will be @@ -195,7 +195,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) * Currently PAGE_SIZE is used, even when 64K, to work around an issue * with the PDB TLB invalidate code not being pd_cache aware yet. */ - pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); + pdb_size = ALIGN(pd_get_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); err = nvgpu_pd_alloc(vm, &vm->pdb, pdb_size); if (err != 0) { @@ -243,7 +243,7 @@ static u32 pd_entries(const struct gk20a_mmu_level *l, /* * Computes the size of a PD table (in bytes). */ -static u32 pd_size(const struct gk20a_mmu_level *l, +static u32 pd_get_size(const struct gk20a_mmu_level *l, struct nvgpu_gmmu_attrs *attrs) { return nvgpu_safe_mult_u32(pd_entries(l, attrs), l->entry_size); @@ -266,7 +266,7 @@ static int pd_allocate(struct vm_gk20a *vm, * the underlying DMA memory here. */ if (pd->mem != NULL) { - if (pd->pd_size >= pd_size(l, attrs)) { + if (pd->pd_size >= pd_get_size(l, attrs)) { return 0; } } @@ -276,7 +276,7 @@ static int pd_allocate(struct vm_gk20a *vm, pd->mem = NULL; } - err = nvgpu_pd_alloc(vm, pd, pd_size(l, attrs)); + err = nvgpu_pd_alloc(vm, pd, pd_get_size(l, attrs)); if (err != 0) { nvgpu_info(vm->mm->g, "error allocating page directory!"); return err;