gpu: nvgpu: avoid gr_falcon dependency outside gr

Basic units like fifo, rc are having dependency on
gr_falcon. Avoided outside gr units dependency on gr_falcon
by moving following functions to gr:

int nvgpu_gr_falcon_disable_ctxsw(struct gk20a *g,
			struct nvgpu_gr_falcon *falcon); ->
int nvgpu_gr_disable_ctxsw(struct gk20a *g);

int nvgpu_gr_falcon_enable_ctxsw(struct gk20a *g,
			struct nvgpu_gr_falcon *falcon); ->
int nvgpu_gr_enable_ctxsw(struct gk20a *g);
int nvgpu_gr_falcon_halt_pipe(struct gk20a *g); ->
		int nvgpu_gr_halt_pipe(struct gk20a *g);

HALs also moved accordingly and updated code to reflect this.

Also moved following data back to gr from gr_falcon:
struct nvgpu_mutex ctxsw_disable_mutex;
int ctxsw_disable_count;

JIRA NVGPU-3168

Change-Id: I2bdd4a646b6f87df4c835638fc83c061acf4051e
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2100009
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-04-17 16:46:28 -07:00
committed by mobile promotions
parent 24af0d3330
commit a91535e3a3
22 changed files with 142 additions and 147 deletions

View File

@@ -1927,7 +1927,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
* at that point the hardware state can be inspected to
* determine if the context we're interested in is current.
*/
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon);
err = g->ops.gr.disable_ctxsw(g);
if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw");
/* this should probably be ctx-fatal... */
@@ -1944,7 +1944,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops,
num_ctx_rd_ops, ch_is_curr_ctx);
tmp_err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon);
tmp_err = g->ops.gr.enable_ctxsw(g);
if (tmp_err != 0) {
nvgpu_err(g, "unable to restart ctxsw!");
err = tmp_err;
@@ -2290,7 +2290,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon);
err = g->ops.gr.disable_ctxsw(g);
if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw");
goto clean_up;
@@ -2310,7 +2310,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
nvgpu_mutex_release(&dbg_s->ch_list_lock);
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon);
err = g->ops.gr.enable_ctxsw(g);
if (err != 0) {
nvgpu_err(g, "unable to restart ctxsw!");
}
@@ -2335,7 +2335,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon);
err = g->ops.gr.disable_ctxsw(g);
if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw");
goto clean_up;
@@ -2351,7 +2351,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
}
}
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon);
err = g->ops.gr.enable_ctxsw(g);
if (err != 0) {
nvgpu_err(g, "unable to restart ctxsw!");
}