gpu: nvgpu: fix MISRA errors in nvgpu.hal.mm

Rule 8.6 requires each identifier with external linkage to have exactly
one external definitions.
Rule 10.x necessitates operands to have essential type; left and right
operands should be of same width and type.
Rule 14.3 doesn't allow controlling expressions to be invariant;
ensuring that all conditions are possible.

Jira NVGPU-3858

Change-Id: I043a3836c4a2cb9c5a52d3053516c517389f55a2
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2162295
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-07-26 15:20:10 -07:00
committed by mobile promotions
parent 01dc65fe32
commit afae2efc23
4 changed files with 16 additions and 5 deletions

View File

@@ -33,16 +33,19 @@ struct gk20a;
* know the actual type before it declares an array (even if the size is not
* technically known here.
*/
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
u64 big_page_size);
u32 gk20a_mm_get_iommu_bit(struct gk20a *g);
#endif
u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
struct nvgpu_gmmu_pd *pd, u32 pd_idx);
u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
struct nvgpu_gmmu_pd *pd, u32 pd_idx);
u32 gk20a_mm_get_iommu_bit(struct gk20a *g);
#endif

View File

@@ -29,8 +29,10 @@ struct gk20a;
struct nvgpu_gmmu_attrs;
u32 gm20b_mm_get_big_page_sizes(void);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
u32 gm20b_mm_get_default_big_page_size(void);
u64 gm20b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys);
#endif
#endif

View File

@@ -334,8 +334,10 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
(gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f() |
gmmu_new_dual_pde_aperture_small_sys_mem_coh_f() |
gmmu_new_dual_pde_aperture_small_video_memory_f())) != 0U) {
u64 addr = ((U64(pde_v[3]) << U64(32)) | (U64(pde_v[2]) &
U64(gmmu_new_dual_pde_address_small_sys_f(~U32(0U))))) <<
u32 new_pde_addr_big_sys =
gmmu_new_dual_pde_address_small_sys_f(~U32(0U));
u64 addr = ((U64(pde_v[3]) << U64(32)) |
(U64(pde_v[2]) & U64(new_pde_addr_big_sys))) <<
U64(gmmu_new_dual_pde_address_shift_v());
if (addr != 0ULL) {
@@ -347,8 +349,10 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
(gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |
gmmu_new_dual_pde_aperture_big_sys_mem_coh_f() |
gmmu_new_dual_pde_aperture_big_video_memory_f())) != 0U) {
u64 addr = ((U64(pde_v[1]) << U64(32)) | (U64(pde_v[0]) &
U64(gmmu_new_dual_pde_address_big_sys_f(~U32(0U))))) <<
u32 new_pde_addr_big_sys =
gmmu_new_dual_pde_address_big_sys_f(~U32(0U));
u64 addr = ((U64(pde_v[1]) << U64(32)) |
(U64(pde_v[0]) & U64(new_pde_addr_big_sys))) <<
U64(gmmu_new_dual_pde_address_big_shift_v());
if (addr != 0ULL) {

View File

@@ -425,8 +425,10 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
*invalidate_replay_val |=
gv11b_fb_get_replay_cancel_global_val(g);
} else {
#ifdef CONFIG_NVGPU_REPLAYABLE_FAULT
*invalidate_replay_val |=
gv11b_fb_get_replay_start_ack_all(g);
#endif
}
} else {
/* cancel faults other than invalid pte */