mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: mm: whitelist MISRA 17.2 violations
Whitelist 2 MISRA Rule 17.2 violations in MM that were approved as deviations in TID-278. These two violations are for recursive functions that handle page table descriptors in the GMMU page table. Both cases are tightly controlled recursion by limiting the recursion depth to the number of possible page table levels in the hardware. For current hardware that is a maximum recursion depth of 5 which is easily an acceptable depth and should cause no stack issues. JIRA NVGPU-3489 JIRA NVGPU-3492 JIRA TID-278 Change-Id: I5b801ff77f66bb8698f1d6adcd41ebbad3f86f92 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2230077 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
72fc76c8ac
commit
6d0ef6473d
@@ -428,6 +428,7 @@ static int nvgpu_set_pd_level_is_next_level_pde(struct vm_gk20a *vm,
|
|||||||
* phys_addr will always point to a contiguous range - the discontiguous nature
|
* phys_addr will always point to a contiguous range - the discontiguous nature
|
||||||
* of DMA buffers is taken care of at the layer above this.
|
* of DMA buffers is taken care of at the layer above this.
|
||||||
*/
|
*/
|
||||||
|
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 17_2), "TID-278")
|
||||||
static int nvgpu_set_pd_level(struct vm_gk20a *vm,
|
static int nvgpu_set_pd_level(struct vm_gk20a *vm,
|
||||||
struct nvgpu_gmmu_pd *pd,
|
struct nvgpu_gmmu_pd *pd,
|
||||||
u32 lvl,
|
u32 lvl,
|
||||||
@@ -543,6 +544,7 @@ static int nvgpu_set_pd_level(struct vm_gk20a *vm,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 17_2))
|
||||||
|
|
||||||
static int nvgpu_gmmu_do_update_page_table_sgl(struct vm_gk20a *vm,
|
static int nvgpu_gmmu_do_update_page_table_sgl(struct vm_gk20a *vm,
|
||||||
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl,
|
struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl,
|
||||||
|
|||||||
@@ -181,6 +181,7 @@ u32 nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm)
|
|||||||
return vm->mmu_levels[final_pde_level].lo_bit[0];
|
return vm->mmu_levels[final_pde_level].lo_bit[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NVGPU_COV_WHITELIST_BLOCK_BEGIN(deviate, 1, NVGPU_MISRA(Rule, 17_2), "TID-278")
|
||||||
static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm,
|
static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm,
|
||||||
struct nvgpu_gmmu_pd *pd,
|
struct nvgpu_gmmu_pd *pd,
|
||||||
u32 level)
|
u32 level)
|
||||||
@@ -206,6 +207,7 @@ static void nvgpu_vm_do_free_entries(struct vm_gk20a *vm,
|
|||||||
pd->entries = NULL;
|
pd->entries = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 17_2))
|
||||||
|
|
||||||
static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
|
static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
|
||||||
struct nvgpu_gmmu_pd *pdb)
|
struct nvgpu_gmmu_pd *pdb)
|
||||||
|
|||||||
Reference in New Issue
Block a user