gpu: nvgpu: compile out unused code in gr.falcon hal

gm20b_gr_falcon_submit_fecs_sideband_method_op is used only with
graphics support. Add CONFIG_NVGPU_GRAPHICS checking for that function.

Jira NVGPU-3968

Change-Id: I858f9b27ec668ebbfa02abf89dd58d7496f5678d
Signed-off-by: vinodg <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2248365
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
vinodg
2019-11-26 17:26:26 -08:00
committed by Alex Waterman
parent 27cc81dafa
commit c50de751dd
2 changed files with 51 additions and 48 deletions

View File

@@ -39,21 +39,6 @@ u32 gm20b_gr_falcon_gpccs_base_addr(void);
void gm20b_gr_falcon_fecs_dump_stats(struct gk20a *g);
u32 gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g);
u32 gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size(void);
#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT
void gm20b_gr_falcon_load_gpccs_dmem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_fecs_dmem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_gpccs_imem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_fecs_imem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_start_ucode(struct gk20a *g);
void gm20b_gr_falcon_fecs_host_int_enable(struct gk20a *g);
#endif
#ifdef CONFIG_NVGPU_SIM
void gm20b_gr_falcon_configure_fmodel(struct gk20a *g);
#endif
void gm20b_gr_falcon_start_gpccs(struct gk20a *g);
void gm20b_gr_falcon_start_fecs(struct gk20a *g);
u32 gm20b_gr_falcon_get_gpccs_start_reg_offset(void);
@@ -69,8 +54,6 @@ int gm20b_gr_falcon_wait_mem_scrubbing(struct gk20a *g);
int gm20b_gr_falcon_wait_ctxsw_ready(struct gk20a *g);
int gm20b_gr_falcon_submit_fecs_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op, bool sleepduringwait);
int gm20b_gr_falcon_submit_fecs_sideband_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op);
int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
u32 data, u32 *ret_val);
void gm20b_gr_falcon_set_current_ctx_invalid(struct gk20a *g);
@@ -82,5 +65,23 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes);
u32 gm20b_gr_falcon_read_fecs_ctxsw_status0(struct gk20a *g);
u32 gm20b_gr_falcon_read_fecs_ctxsw_status1(struct gk20a *g);
#ifdef CONFIG_NVGPU_GRAPHICS
int gm20b_gr_falcon_submit_fecs_sideband_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op);
#endif
#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT
void gm20b_gr_falcon_load_gpccs_dmem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_fecs_dmem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_gpccs_imem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_load_fecs_imem(struct gk20a *g,
const u32 *ucode_u32_data, u32 ucode_u32_size);
void gm20b_gr_falcon_start_ucode(struct gk20a *g);
void gm20b_gr_falcon_fecs_host_int_enable(struct gk20a *g);
#endif
#ifdef CONFIG_NVGPU_SIM
void gm20b_gr_falcon_configure_fmodel(struct gk20a *g);
#endif
#endif /* NVGPU_GR_FALCON_GM20B_H */

View File

@@ -786,36 +786,6 @@ int gm20b_gr_falcon_submit_fecs_method_op(struct gk20a *g,
return ret;
}
/* Sideband mailbox writes are done a bit differently */
int gm20b_gr_falcon_submit_fecs_sideband_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op)
{
int ret;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
nvgpu_mutex_acquire(&gr_falcon->fecs_mutex);
nvgpu_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id),
gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr));
nvgpu_writel(g, gr_fecs_method_data_r(), op.method.data);
nvgpu_writel(g, gr_fecs_method_push_r(),
gr_fecs_method_push_adr_f(op.method.addr));
ret = gm20b_gr_falcon_ctx_wait_ucode(g, op.mailbox.id, op.mailbox.ret,
op.cond.ok, op.mailbox.ok,
op.cond.fail, op.mailbox.fail,
false);
if (ret != 0) {
nvgpu_err(g, "fecs method: data=0x%08x push adr=0x%08x",
op.method.data, op.method.addr);
}
nvgpu_mutex_release(&gr_falcon->fecs_mutex);
return ret;
}
int gm20b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
u32 data, u32 *ret_val)
{
@@ -1022,3 +992,35 @@ u32 gm20b_gr_falcon_read_fecs_ctxsw_status1(struct gk20a *g)
{
return nvgpu_readl(g, gr_fecs_ctxsw_status_1_r());
}
#ifdef CONFIG_NVGPU_GRAPHICS
/* Sideband mailbox writes are done a bit differently */
int gm20b_gr_falcon_submit_fecs_sideband_method_op(struct gk20a *g,
struct nvgpu_fecs_method_op op)
{
int ret;
struct nvgpu_gr_falcon *gr_falcon = nvgpu_gr_get_falcon_ptr(g);
nvgpu_mutex_acquire(&gr_falcon->fecs_mutex);
nvgpu_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id),
gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr));
nvgpu_writel(g, gr_fecs_method_data_r(), op.method.data);
nvgpu_writel(g, gr_fecs_method_push_r(),
gr_fecs_method_push_adr_f(op.method.addr));
ret = gm20b_gr_falcon_ctx_wait_ucode(g, op.mailbox.id, op.mailbox.ret,
op.cond.ok, op.mailbox.ok,
op.cond.fail, op.mailbox.fail,
false);
if (ret != 0) {
nvgpu_err(g, "fecs method: data=0x%08x push adr=0x%08x",
op.method.data, op.method.addr);
}
nvgpu_mutex_release(&gr_falcon->fecs_mutex);
return ret;
}
#endif