gpu: nvgpu: execute gr_init_prepare_hw() for each instance

Rename gr_init_reset_enable_hw() to gr_init_prepare_hw() since this
function does not actually do reset, but just prepares the HW
after reset for other SW/HW initialization.

Add a new function gr_init_prepare_hw_impl() that executes per-instance
sequence to prepare GR hardware. Execute this inside
nvgpu_gr_exec_with_ret_for_each_instance().

Note that enabling GR engine interrupts in MC is still expected to
be done in one shot hence keep that code outside of
gr_init_prepare_hw_impl()

Remove redundant calls to gops.gr.init.fifo_access() and
enable_gr_interrupts() from gr_init_setup_hw().
gr_init_prepare_hw() does this already and executes before
gr_init_setup_hw()

Jira NVGPU-5648

Change-Id: If0b7207f80c2fb00d894afebce04b06b7b61d432
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2405408
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Lakshmanan M <lm@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Deepak Nibade
2020-08-27 15:24:15 +05:30
committed by Alex Waterman
parent e579a708f7
commit 35fabed1e8

View File

@@ -209,16 +209,6 @@ int nvgpu_gr_suspend(struct gk20a *g)
return ret;
}
static void enable_gr_interrupts(struct gk20a *g)
{
/** Enable interrupts at MC level */
nvgpu_mc_intr_stall_unit_config(g, MC_INTR_UNIT_GR, MC_INTR_ENABLE);
nvgpu_mc_intr_nonstall_unit_config(g, MC_INTR_UNIT_GR, MC_INTR_ENABLE);
/** Enable interrupts */
g->ops.gr.intr.enable_interrupts(g, true);
}
static int gr_init_setup_hw(struct gk20a *g)
{
struct nvgpu_gr *gr = g->gr;
@@ -248,13 +238,6 @@ static int gr_init_setup_hw(struct gk20a *g)
g->ops.priv_ring.set_ppriv_timeout_settings(g);
}
/* enable fifo access */
g->ops.gr.init.fifo_access(g, true);
/* TBD: reload gr ucode when needed */
enable_gr_interrupts(g);
/** Enable fecs error interrupts */
g->ops.gr.falcon.fecs_host_int_enable(g);
@@ -563,7 +546,7 @@ clean_up:
return err;
}
static int gr_init_reset_enable_hw(struct gk20a *g)
static int gr_init_prepare_hw_impl(struct gk20a *g)
{
struct netlist_av_list *sw_non_ctx_load =
nvgpu_netlist_get_sw_non_ctx_load_av_list(g);
@@ -572,11 +555,12 @@ static int gr_init_reset_enable_hw(struct gk20a *g)
nvgpu_log_fn(g, " ");
/** Enable interrupts */
g->ops.gr.intr.enable_interrupts(g, true);
/* enable fifo access */
g->ops.gr.init.fifo_access(g, true);
enable_gr_interrupts(g);
/* load non_ctx init */
nvgpu_log_info(g, "begin: netlist: sw_non_ctx_load: register writes");
for (i = 0; i < sw_non_ctx_load->count; i++) {
@@ -610,6 +594,18 @@ out:
return err;
}
static int gr_init_prepare_hw(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
/** Enable interrupts at MC level */
nvgpu_mc_intr_stall_unit_config(g, MC_INTR_UNIT_GR, MC_INTR_ENABLE);
nvgpu_mc_intr_nonstall_unit_config(g, MC_INTR_UNIT_GR, MC_INTR_ENABLE);
return nvgpu_gr_exec_with_ret_for_each_instance(g,
gr_init_prepare_hw_impl(g));
}
static int gr_reset_engine(struct gk20a *g)
{
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
@@ -682,7 +678,7 @@ int nvgpu_gr_enable_hw(struct gk20a *g)
return err;
}
err = gr_init_reset_enable_hw(g);
err = gr_init_prepare_hw(g);
if (err != 0) {
return err;
}