mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: mm: fix MISRA 17.2 violation
MISRA Rule 17.2 prohibits recursion. Update the function nvgpu_locate_pte() to remove recursion. JIRA NVGPU-3340 Change-Id: I027887f45f334a5f9819cf2e620693f10ab4fa0b Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2110597 GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4222052a53
commit
b96ac290c8
@@ -934,13 +934,18 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
struct nvgpu_gmmu_pd **pd_out, u32 *pd_idx_out,
|
struct nvgpu_gmmu_pd **pd_out, u32 *pd_idx_out,
|
||||||
u32 *pd_offs_out)
|
u32 *pd_offs_out)
|
||||||
{
|
{
|
||||||
const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl];
|
const struct gk20a_mmu_level *l;
|
||||||
const struct gk20a_mmu_level *next_l = &vm->mmu_levels[lvl + 1];
|
const struct gk20a_mmu_level *next_l;
|
||||||
u32 pd_idx = pd_index(l, vaddr, attrs);
|
u32 pd_idx;
|
||||||
u32 pte_base;
|
u32 pte_base;
|
||||||
u32 pte_size;
|
u32 pte_size;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
bool done = false;
|
||||||
|
|
||||||
|
do {
|
||||||
|
l = &vm->mmu_levels[lvl];
|
||||||
|
next_l = &vm->mmu_levels[lvl + 1];
|
||||||
|
pd_idx = pd_index(l, vaddr, attrs);
|
||||||
/*
|
/*
|
||||||
* If this isn't the final level (i.e there's a valid next level)
|
* If this isn't the final level (i.e there's a valid next level)
|
||||||
* then find the next level PD and recurse.
|
* then find the next level PD and recurse.
|
||||||
@@ -959,19 +964,17 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nvgpu_locate_pte(g, vm, pd_next,
|
pd = pd_next;
|
||||||
vaddr, lvl + 1, attrs,
|
lvl++;
|
||||||
data, pd_out, pd_idx_out,
|
} else {
|
||||||
pd_offs_out);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pd->mem == NULL) {
|
if (pd->mem == NULL) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Take into account the real offset into the nvgpu_mem since the PD
|
* Take into account the real offset into the nvgpu_mem
|
||||||
* may be located at an offset other than 0 (due to PD packing).
|
* since the PD may be located at an offset other than 0
|
||||||
|
* (due to PD packing).
|
||||||
*/
|
*/
|
||||||
pte_base = (u32)(pd->mem_offs / sizeof(u32)) +
|
pte_base = (u32)(pd->mem_offs / sizeof(u32)) +
|
||||||
nvgpu_pd_offset_from_index(l, pd_idx);
|
nvgpu_pd_offset_from_index(l, pd_idx);
|
||||||
@@ -979,7 +982,8 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
|
|
||||||
if (data != NULL) {
|
if (data != NULL) {
|
||||||
for (i = 0; i < pte_size; i++) {
|
for (i = 0; i < pte_size; i++) {
|
||||||
data[i] = nvgpu_mem_rd32(g, pd->mem, pte_base + i);
|
data[i] = nvgpu_mem_rd32(g, pd->mem,
|
||||||
|
pte_base + i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -992,8 +996,12 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (pd_offs_out != NULL) {
|
if (pd_offs_out != NULL) {
|
||||||
*pd_offs_out = nvgpu_pd_offset_from_index(l, pd_idx);
|
*pd_offs_out = nvgpu_pd_offset_from_index(l,
|
||||||
|
pd_idx);
|
||||||
}
|
}
|
||||||
|
done = true;
|
||||||
|
}
|
||||||
|
} while (!done);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user