gpu: nvgpu: fix MISRA errors nvgpu.hal.gr.init

Rule 8.6 requires each identifier with external linkage to have exactly
one external definitions.
Rule 10.x necessitates operands to have essential type; left and right
operands should be of same width and type.
This patch fixes above mentined errors in hal/gr/init/gr_init_gm20b.h,
hal/gr/init/gr_init_gm20b_fusa.c and hal/gr/init/gr_init_gp10b.h.

Jira NVGPU-3828

Change-Id: I915c837a05f62e7bfa543a08e488d118376b23b7
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2158379
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2019-07-21 22:27:34 -07:00
committed by mobile promotions
parent 40460650de
commit f85baae91a
3 changed files with 56 additions and 44 deletions

View File

@@ -38,21 +38,8 @@ struct nvgpu_gr_config;
void gm20b_gr_init_lg_coalesce(struct gk20a *g, u32 data); void gm20b_gr_init_lg_coalesce(struct gk20a *g, u32 data);
void gm20b_gr_init_su_coalesce(struct gk20a *g, u32 data); void gm20b_gr_init_su_coalesce(struct gk20a *g, u32 data);
void gm20b_gr_init_pes_vsc_stream(struct gk20a *g); void gm20b_gr_init_pes_vsc_stream(struct gk20a *g);
void gm20b_gr_init_gpc_mmu(struct gk20a *g);
void gm20b_gr_init_fifo_access(struct gk20a *g, bool enable); void gm20b_gr_init_fifo_access(struct gk20a *g, bool enable);
void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
u32 gm20b_gr_init_get_sm_id_size(void);
int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask);
#ifdef CONFIG_NVGPU_GRAPHICS
void gm20b_gr_init_rop_mapping(struct gk20a *g,
struct nvgpu_gr_config *gr_config);
#endif
int gm20b_gr_init_fs_state(struct gk20a *g);
void gm20b_gr_init_pd_tpc_per_gpc(struct gk20a *g, void gm20b_gr_init_pd_tpc_per_gpc(struct gk20a *g,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
void gm20b_gr_init_pd_skip_table_gpc(struct gk20a *g, void gm20b_gr_init_pd_skip_table_gpc(struct gk20a *g,
@@ -69,6 +56,28 @@ void gm20b_gr_init_load_method_init(struct gk20a *g,
struct netlist_av_list *sw_method_init); struct netlist_av_list *sw_method_init);
int gm20b_gr_init_load_sw_bundle_init(struct gk20a *g, int gm20b_gr_init_load_sw_bundle_init(struct gk20a *g,
struct netlist_av_list *sw_bundle_init); struct netlist_av_list *sw_bundle_init);
u32 gm20b_gr_init_get_global_ctx_cb_buffer_size(struct gk20a *g);
u32 gm20b_gr_init_get_global_ctx_pagepool_buffer_size(struct gk20a *g);
void gm20b_gr_init_commit_global_attrib_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u32 tpc_count, u32 max_tpc, u64 addr,
bool patch);
u32 gm20b_gr_init_get_patch_slots(struct gk20a *g,
struct nvgpu_gr_config *config);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
void gm20b_gr_init_gpc_mmu(struct gk20a *g);
void gm20b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries);
void gm20b_gr_init_sm_id_numbering(struct gk20a *g, u32 gpc, u32 tpc, u32 smid,
struct nvgpu_gr_config *gr_config);
u32 gm20b_gr_init_get_sm_id_size(void);
int gm20b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config);
void gm20b_gr_init_tpc_mask(struct gk20a *g, u32 gpc_index, u32 pes_tpc_mask);
int gm20b_gr_init_fs_state(struct gk20a *g);
void gm20b_gr_init_commit_global_timeslice(struct gk20a *g); void gm20b_gr_init_commit_global_timeslice(struct gk20a *g);
u32 gm20b_gr_init_get_bundle_cb_default_size(struct gk20a *g); u32 gm20b_gr_init_get_bundle_cb_default_size(struct gk20a *g);
@@ -80,8 +89,6 @@ u32 gm20b_gr_init_get_attrib_cb_size(struct gk20a *g, u32 tpc_count);
u32 gm20b_gr_init_get_alpha_cb_size(struct gk20a *g, u32 tpc_count); u32 gm20b_gr_init_get_alpha_cb_size(struct gk20a *g, u32 tpc_count);
u32 gm20b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count, u32 gm20b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
u32 max_tpc); u32 max_tpc);
u32 gm20b_gr_init_get_global_ctx_cb_buffer_size(struct gk20a *g);
u32 gm20b_gr_init_get_global_ctx_pagepool_buffer_size(struct gk20a *g);
void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g, void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch); struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch);
@@ -89,15 +96,10 @@ u32 gm20b_gr_init_pagepool_default_size(struct gk20a *g);
void gm20b_gr_init_commit_global_pagepool(struct gk20a *g, void gm20b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, size_t size, bool patch, struct nvgpu_gr_ctx *gr_ctx, u64 addr, size_t size, bool patch,
bool global_ctx); bool global_ctx);
void gm20b_gr_init_commit_global_attrib_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u32 tpc_count, u32 max_tpc, u64 addr,
bool patch);
void gm20b_gr_init_commit_global_cb_manager(struct gk20a *g, void gm20b_gr_init_commit_global_cb_manager(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_gr_config *config, struct nvgpu_gr_ctx *gr_ctx,
bool patch); bool patch);
u32 gm20b_gr_init_get_patch_slots(struct gk20a *g,
struct nvgpu_gr_config *config);
void gm20b_gr_init_detect_sm_arch(struct gk20a *g); void gm20b_gr_init_detect_sm_arch(struct gk20a *g);
void gm20b_gr_init_get_supported_preemption_modes( void gm20b_gr_init_get_supported_preemption_modes(
@@ -105,4 +107,10 @@ void gm20b_gr_init_get_supported_preemption_modes(
void gm20b_gr_init_get_default_preemption_modes( void gm20b_gr_init_get_default_preemption_modes(
u32 *default_graphics_preempt_mode, u32 *default_compute_preempt_mode); u32 *default_graphics_preempt_mode, u32 *default_compute_preempt_mode);
#ifdef CONFIG_NVGPU_GRAPHICS
void gm20b_gr_init_rop_mapping(struct gk20a *g,
struct nvgpu_gr_config *gr_config);
#endif
#endif /* CONFIG_NVGPU_HAL_NON_FUSA */
#endif /* NVGPU_GR_INIT_GM20B_H */ #endif /* NVGPU_GR_INIT_GM20B_H */

View File

@@ -84,15 +84,15 @@ void gm20b_gr_init_fifo_access(struct gk20a *g, bool enable)
u32 fifo_val; u32 fifo_val;
fifo_val = nvgpu_readl(g, gr_gpfifo_ctl_r()); fifo_val = nvgpu_readl(g, gr_gpfifo_ctl_r());
fifo_val &= ~gr_gpfifo_ctl_semaphore_access_f(1); fifo_val &= ~gr_gpfifo_ctl_semaphore_access_f(1U);
fifo_val &= ~gr_gpfifo_ctl_access_f(1); fifo_val &= ~gr_gpfifo_ctl_access_f(1U);
if (enable) { if (enable) {
fifo_val |= (gr_gpfifo_ctl_access_enabled_f() | fifo_val |= (gr_gpfifo_ctl_access_enabled_f() |
gr_gpfifo_ctl_semaphore_access_enabled_f()); gr_gpfifo_ctl_semaphore_access_enabled_f());
} else { } else {
fifo_val |= (gr_gpfifo_ctl_access_f(0) | fifo_val |= (gr_gpfifo_ctl_access_f(0U) |
gr_gpfifo_ctl_semaphore_access_f(0)); gr_gpfifo_ctl_semaphore_access_f(0U));
} }
nvgpu_writel(g, gr_gpfifo_ctl_r(), fifo_val); nvgpu_writel(g, gr_gpfifo_ctl_r(), fifo_val);

View File

@@ -29,12 +29,32 @@ struct gk20a;
struct nvgpu_gr_ctx; struct nvgpu_gr_ctx;
struct nvgpu_gr_config; struct nvgpu_gr_config;
u32 gp10b_gr_init_get_sm_id_size(void);
int gp10b_gr_init_wait_empty(struct gk20a *g);
void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch);
u32 gp10b_gr_init_pagepool_default_size(struct gk20a *g);
void gp10b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, size_t size, bool patch,
bool global_ctx);
void gp10b_gr_init_commit_global_cb_manager(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx *gr_ctx,
bool patch);
u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc);
void gp10b_gr_init_get_supported_preemption_modes(
u32 *graphics_preemption_mode_flags, u32 *compute_preemption_mode_flags);
void gp10b_gr_init_get_default_preemption_modes(
u32 *default_graphics_preempt_mode, u32 *default_compute_preempt_mode);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
void gp10b_gr_init_get_access_map(struct gk20a *g, void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, u32 *num_entries); u32 **whitelist, u32 *num_entries);
u32 gp10b_gr_init_get_sm_id_size(void);
int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id, int gp10b_gr_init_sm_id_config(struct gk20a *g, u32 *tpc_sm_id,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
int gp10b_gr_init_wait_empty(struct gk20a *g);
int gp10b_gr_init_fs_state(struct gk20a *g); int gp10b_gr_init_fs_state(struct gk20a *g);
int gp10b_gr_init_preemption_state(struct gk20a *g); int gp10b_gr_init_preemption_state(struct gk20a *g);
@@ -45,30 +65,13 @@ u32 gp10b_gr_init_get_alpha_cb_size(struct gk20a *g, u32 tpc_count);
u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count, u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
u32 max_tpc); u32 max_tpc);
void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch);
u32 gp10b_gr_init_pagepool_default_size(struct gk20a *g);
void gp10b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, size_t size, bool patch,
bool global_ctx);
void gp10b_gr_init_commit_global_attrib_cb(struct gk20a *g, void gp10b_gr_init_commit_global_attrib_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u32 tpc_count, u32 max_tpc, u64 addr, struct nvgpu_gr_ctx *gr_ctx, u32 tpc_count, u32 max_tpc, u64 addr,
bool patch); bool patch);
void gp10b_gr_init_commit_global_cb_manager(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx *gr_ctx,
bool patch);
u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc);
void gp10b_gr_init_commit_cbes_reserve(struct gk20a *g, void gp10b_gr_init_commit_cbes_reserve(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch); struct nvgpu_gr_ctx *gr_ctx, bool patch);
void gp10b_gr_init_get_supported_preemption_modes(
u32 *graphics_preemption_mode_flags, u32 *compute_preemption_mode_flags);
void gp10b_gr_init_get_default_preemption_modes(
u32 *default_graphics_preempt_mode, u32 *default_compute_preempt_mode);
#ifdef CONFIG_NVGPU_GRAPHICS #ifdef CONFIG_NVGPU_GRAPHICS
u32 gp10b_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g); u32 gp10b_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g);
u32 gp10b_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g); u32 gp10b_gr_init_get_attrib_cb_gfxp_size(struct gk20a *g);
@@ -80,5 +83,6 @@ u32 gp10b_gr_init_get_ctx_betacb_size(struct gk20a *g);
void gp10b_gr_init_commit_ctxsw_spill(struct gk20a *g, void gp10b_gr_init_commit_ctxsw_spill(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch); struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch);
#endif /* CONFIG_NVGPU_GRAPHICS */ #endif /* CONFIG_NVGPU_GRAPHICS */
#endif /* CONFIG_NVGPU_HAL_NON_FUSA */
#endif /* NVGPU_GR_INIT_GP10B_H */ #endif /* NVGPU_GR_INIT_GP10B_H */