gpu: nvgpu: avoid gr_falcon dependency outside gr

Basic units like fifo, rc are having dependency on
gr_falcon. Avoided outside gr units dependency on gr_falcon
by moving following functions to gr:

int nvgpu_gr_falcon_disable_ctxsw(struct gk20a *g,
			struct nvgpu_gr_falcon *falcon); ->
int nvgpu_gr_disable_ctxsw(struct gk20a *g);

int nvgpu_gr_falcon_enable_ctxsw(struct gk20a *g,
			struct nvgpu_gr_falcon *falcon); ->
int nvgpu_gr_enable_ctxsw(struct gk20a *g);
int nvgpu_gr_falcon_halt_pipe(struct gk20a *g); ->
		int nvgpu_gr_halt_pipe(struct gk20a *g);

HALs also moved accordingly and updated code to reflect this.

Also moved following data back to gr from gr_falcon:
struct nvgpu_mutex ctxsw_disable_mutex;
int ctxsw_disable_count;

JIRA NVGPU-3168

Change-Id: I2bdd4a646b6f87df4c835638fc83c061acf4051e
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2100009
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-04-17 16:46:28 -07:00
committed by mobile promotions
parent 24af0d3330
commit a91535e3a3
22 changed files with 142 additions and 147 deletions

View File

@@ -2786,7 +2786,7 @@ int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
return 0; return 0;
} }
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to disable ctxsw"); nvgpu_err(g, "failed to disable ctxsw");
goto fail; goto fail;
@@ -2821,7 +2821,7 @@ int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
nvgpu_mutex_release(&f->deferred_reset_mutex); nvgpu_mutex_release(&f->deferred_reset_mutex);
clean_up: clean_up:
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to enable ctxsw"); nvgpu_err(g, "failed to enable ctxsw");
} }

View File

@@ -543,7 +543,7 @@ void nvgpu_engine_reset(struct gk20a *g, u32 engine_id)
#endif #endif
if (!nvgpu_platform_is_simulation(g)) { if (!nvgpu_platform_is_simulation(g)) {
/*HALT_PIPELINE method, halt GR engine*/ /*HALT_PIPELINE method, halt GR engine*/
if (g->ops.gr.falcon.halt_pipe(g) != 0) { if (g->ops.gr.halt_pipe(g) != 0) {
nvgpu_err(g, "failed to halt gr pipe"); nvgpu_err(g, "failed to halt gr pipe");
} }
/* /*

View File

@@ -35,6 +35,7 @@
#include <nvgpu/gr/fs_state.h> #include <nvgpu/gr/fs_state.h>
#include <nvgpu/gr/fecs_trace.h> #include <nvgpu/gr/fecs_trace.h>
#include <nvgpu/power_features/cg.h> #include <nvgpu/power_features/cg.h>
#include <nvgpu/power_features/pg.h>
#include "gr_priv.h" #include "gr_priv.h"
@@ -381,6 +382,13 @@ static int gr_init_setup_sw(struct gk20a *g)
gr->g = g; gr->g = g;
err = nvgpu_mutex_init(&gr->ctxsw_disable_mutex);
if (err != 0) {
nvgpu_err(g, "Error in ctxsw_disable_mutex init");
return err;
}
gr->ctxsw_disable_count = 0;
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_GK20A_CYCLE_STATS)
err = nvgpu_mutex_init(&g->gr->cs_lock); err = nvgpu_mutex_init(&g->gr->cs_lock);
if (err != 0) { if (err != 0) {
@@ -696,3 +704,86 @@ void nvgpu_gr_free(struct gk20a *g)
} }
g->gr = NULL; g->gr = NULL;
} }
/**
* Stop processing (stall) context switches at FECS:-
* If fecs is sent stop_ctxsw method, elpg entry/exit cannot happen
* and may timeout. It could manifest as different error signatures
* depending on when stop_ctxsw fecs method gets sent with respect
* to pmu elpg sequence. It could come as pmu halt or abort or
* maybe ext error too.
*/
int nvgpu_gr_disable_ctxsw(struct gk20a *g)
{
struct nvgpu_gr *gr = g->gr;
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
nvgpu_mutex_acquire(&gr->ctxsw_disable_mutex);
gr->ctxsw_disable_count++;
if (gr->ctxsw_disable_count == 1) {
err = nvgpu_pg_elpg_disable(g);
if (err != 0) {
nvgpu_err(g,
"failed to disable elpg for stop_ctxsw");
/* stop ctxsw command is not sent */
gr->ctxsw_disable_count--;
} else {
err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_STOP, 0U, NULL);
if (err != 0) {
nvgpu_err(g, "failed to stop fecs ctxsw");
/* stop ctxsw failed */
gr->ctxsw_disable_count--;
}
}
} else {
nvgpu_log_info(g, "ctxsw disabled, ctxsw_disable_count: %d",
gr->ctxsw_disable_count);
}
nvgpu_mutex_release(&gr->ctxsw_disable_mutex);
return err;
}
/* Start processing (continue) context switches at FECS */
int nvgpu_gr_enable_ctxsw(struct gk20a *g)
{
struct nvgpu_gr *gr = g->gr;
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
nvgpu_mutex_acquire(&gr->ctxsw_disable_mutex);
if (gr->ctxsw_disable_count == 0) {
goto ctxsw_already_enabled;
}
gr->ctxsw_disable_count--;
WARN_ON(gr->ctxsw_disable_count < 0);
if (gr->ctxsw_disable_count == 0) {
err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_START, 0U, NULL);
if (err != 0) {
nvgpu_err(g, "failed to start fecs ctxsw");
} else {
if (nvgpu_pg_elpg_enable(g) != 0) {
nvgpu_err(g,
"failed to enable elpg for start_ctxsw");
}
}
} else {
nvgpu_log_info(g, "ctxsw_disable_count: %d is not 0 yet",
gr->ctxsw_disable_count);
}
ctxsw_already_enabled:
nvgpu_mutex_release(&gr->ctxsw_disable_mutex);
return err;
}
int nvgpu_gr_halt_pipe(struct gk20a *g)
{
return g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_HALT_PIPELINE, 0U, NULL);
}

View File

@@ -30,7 +30,6 @@
#include <nvgpu/sizes.h> #include <nvgpu/sizes.h>
#include <nvgpu/mm.h> #include <nvgpu/mm.h>
#include <nvgpu/acr.h> #include <nvgpu/acr.h>
#include <nvgpu/power_features/pg.h>
#include <nvgpu/pmu/lsfm.h> #include <nvgpu/pmu/lsfm.h>
#include <nvgpu/sec2/lsfm.h> #include <nvgpu/sec2/lsfm.h>
#include <nvgpu/dma.h> #include <nvgpu/dma.h>
@@ -53,13 +52,6 @@ struct nvgpu_gr_falcon *nvgpu_gr_falcon_init_support(struct gk20a *g)
return falcon; return falcon;
} }
err = nvgpu_mutex_init(&falcon->ctxsw_disable_mutex);
if (err != 0) {
nvgpu_err(g, "Error in ctxsw_disable_mutex init");
goto done;
}
falcon->ctxsw_disable_count = 0;
err = nvgpu_mutex_init(&falcon->fecs_mutex); err = nvgpu_mutex_init(&falcon->fecs_mutex);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Error in fecs_mutex init"); nvgpu_err(g, "Error in fecs_mutex init");
@@ -600,89 +592,6 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
return 0; return 0;
} }
/**
* Stop processing (stall) context switches at FECS:-
* If fecs is sent stop_ctxsw method, elpg entry/exit cannot happen
* and may timeout. It could manifest as different error signatures
* depending on when stop_ctxsw fecs method gets sent with respect
* to pmu elpg sequence. It could come as pmu halt or abort or
* maybe ext error too.
*/
int nvgpu_gr_falcon_disable_ctxsw(struct gk20a *g,
struct nvgpu_gr_falcon *falcon)
{
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
nvgpu_mutex_acquire(&falcon->ctxsw_disable_mutex);
falcon->ctxsw_disable_count++;
if (falcon->ctxsw_disable_count == 1) {
err = nvgpu_pg_elpg_disable(g);
if (err != 0) {
nvgpu_err(g,
"failed to disable elpg for stop_ctxsw");
/* stop ctxsw command is not sent */
falcon->ctxsw_disable_count--;
} else {
err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_STOP, 0U, NULL);
if (err != 0) {
nvgpu_err(g, "failed to stop fecs ctxsw");
/* stop ctxsw failed */
falcon->ctxsw_disable_count--;
}
}
} else {
nvgpu_log_info(g, "ctxsw disabled, ctxsw_disable_count: %d",
falcon->ctxsw_disable_count);
}
nvgpu_mutex_release(&falcon->ctxsw_disable_mutex);
return err;
}
/* Start processing (continue) context switches at FECS */
int nvgpu_gr_falcon_enable_ctxsw(struct gk20a *g,
struct nvgpu_gr_falcon *falcon)
{
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
nvgpu_mutex_acquire(&falcon->ctxsw_disable_mutex);
if (falcon->ctxsw_disable_count == 0) {
goto ctxsw_already_enabled;
}
falcon->ctxsw_disable_count--;
WARN_ON(falcon->ctxsw_disable_count < 0);
if (falcon->ctxsw_disable_count == 0) {
err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_START, 0U, NULL);
if (err != 0) {
nvgpu_err(g, "failed to start fecs ctxsw");
} else {
if (nvgpu_pg_elpg_enable(g) != 0) {
nvgpu_err(g,
"failed to enable elpg for start_ctxsw");
}
}
} else {
nvgpu_log_info(g, "ctxsw_disable_count: %d is not 0 yet",
falcon->ctxsw_disable_count);
}
ctxsw_already_enabled:
nvgpu_mutex_release(&falcon->ctxsw_disable_mutex);
return err;
}
int nvgpu_gr_falcon_halt_pipe(struct gk20a *g)
{
return g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_HALT_PIPELINE, 0U, NULL);
}
struct nvgpu_mutex *nvgpu_gr_falcon_get_fecs_mutex( struct nvgpu_mutex *nvgpu_gr_falcon_get_fecs_mutex(
struct nvgpu_gr_falcon *falcon) struct nvgpu_gr_falcon *falcon)
{ {

View File

@@ -67,8 +67,6 @@ struct nvgpu_ctxsw_ucode_info {
struct nvgpu_gr_falcon { struct nvgpu_gr_falcon {
struct nvgpu_ctxsw_ucode_info ctxsw_ucode_info; struct nvgpu_ctxsw_ucode_info ctxsw_ucode_info;
struct nvgpu_mutex ctxsw_disable_mutex;
int ctxsw_disable_count;
struct nvgpu_mutex fecs_mutex; /* protect fecs method */ struct nvgpu_mutex fecs_mutex; /* protect fecs method */
bool skip_ucode_init; bool skip_ucode_init;
}; };

View File

@@ -99,6 +99,9 @@ struct nvgpu_gr {
#endif #endif
u32 max_css_buffer_size; u32 max_css_buffer_size;
u32 max_ctxsw_ring_buffer_size; u32 max_ctxsw_ring_buffer_size;
struct nvgpu_mutex ctxsw_disable_mutex;
int ctxsw_disable_count;
}; };
#endif /* NVGPU_GR_PRIV_H */ #endif /* NVGPU_GR_PRIV_H */

View File

@@ -191,7 +191,7 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct tsg_gk20a *tsg,
* changing until engine status is checked to make sure tsg * changing until engine status is checked to make sure tsg
* being recovered is not loaded on the engines * being recovered is not loaded on the engines
*/ */
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
/* if failed to disable ctxsw, just abort tsg */ /* if failed to disable ctxsw, just abort tsg */
@@ -208,7 +208,7 @@ void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct tsg_gk20a *tsg,
* By that time if tsg is not on the engine, engine need not * By that time if tsg is not on the engine, engine need not
* be reset. * be reset.
*/ */
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to enable ctxsw"); nvgpu_err(g, "failed to enable ctxsw");
} }

View File

@@ -26,7 +26,7 @@
#include <nvgpu/vgpu/vgpu.h> #include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/error_notifier.h> #include <nvgpu/error_notifier.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/vgpu/ce_vgpu.h> #include <nvgpu/vgpu/ce_vgpu.h>
#include <nvgpu/vgpu/vm_vgpu.h> #include <nvgpu/vgpu/vm_vgpu.h>
@@ -181,6 +181,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
gr_gk20a_get_offset_in_gpccs_segment, gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode, .set_debug_mode = gm20b_gr_set_debug_mode,
.reset = NULL, .reset = NULL,
.halt_pipe = NULL,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ctxsw_prog = { .ctxsw_prog = {
.hw_get_fecs_header_size = .hw_get_fecs_header_size =
gm20b_ctxsw_prog_hw_get_fecs_header_size, gm20b_ctxsw_prog_hw_get_fecs_header_size,
@@ -278,9 +281,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.falcon = { .falcon = {
.init_ctx_state = vgpu_gr_init_ctx_state, .init_ctx_state = vgpu_gr_init_ctx_state,
.load_ctxsw_ucode = NULL, .load_ctxsw_ucode = NULL,
.halt_pipe = NULL,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
}, },
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_GK20A_CTXSW_TRACE
.fecs_trace = { .fecs_trace = {

View File

@@ -71,6 +71,7 @@
#include "common/clk_arb/clk_arb_gp10b.h" #include "common/clk_arb/clk_arb_gp10b.h"
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/vgpu/vgpu.h> #include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/error_notifier.h> #include <nvgpu/error_notifier.h>
@@ -106,7 +107,6 @@
#include <nvgpu/debugger.h> #include <nvgpu/debugger.h>
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/vgpu/ce_vgpu.h> #include <nvgpu/vgpu/ce_vgpu.h>
#include <nvgpu/vgpu/vm_vgpu.h> #include <nvgpu/vgpu/vm_vgpu.h>
@@ -216,6 +216,9 @@ static const struct gpu_ops vgpu_gv11b_ops = {
gr_gk20a_get_offset_in_gpccs_segment, gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode, .set_debug_mode = gm20b_gr_set_debug_mode,
.reset = NULL, .reset = NULL,
.halt_pipe = NULL,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ctxsw_prog = { .ctxsw_prog = {
.hw_get_fecs_header_size = .hw_get_fecs_header_size =
gm20b_ctxsw_prog_hw_get_fecs_header_size, gm20b_ctxsw_prog_hw_get_fecs_header_size,
@@ -328,9 +331,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.falcon = { .falcon = {
.init_ctx_state = vgpu_gr_init_ctx_state, .init_ctx_state = vgpu_gr_init_ctx_state,
.load_ctxsw_ucode = NULL, .load_ctxsw_ucode = NULL,
.halt_pipe = NULL,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
}, },
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_GK20A_CTXSW_TRACE
.fecs_trace = { .fecs_trace = {

View File

@@ -1927,7 +1927,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
* at that point the hardware state can be inspected to * at that point the hardware state can be inspected to
* determine if the context we're interested in is current. * determine if the context we're interested in is current.
*/ */
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
/* this should probably be ctx-fatal... */ /* this should probably be ctx-fatal... */
@@ -1944,7 +1944,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops, err = __gr_gk20a_exec_ctx_ops(ch, ctx_ops, num_ops, num_ctx_wr_ops,
num_ctx_rd_ops, ch_is_curr_ctx); num_ctx_rd_ops, ch_is_curr_ctx);
tmp_err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); tmp_err = g->ops.gr.enable_ctxsw(g);
if (tmp_err != 0) { if (tmp_err != 0) {
nvgpu_err(g, "unable to restart ctxsw!"); nvgpu_err(g, "unable to restart ctxsw!");
err = tmp_err; err = tmp_err;
@@ -2290,7 +2290,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
goto clean_up; goto clean_up;
@@ -2310,7 +2310,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
nvgpu_mutex_release(&dbg_s->ch_list_lock); nvgpu_mutex_release(&dbg_s->ch_list_lock);
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to restart ctxsw!"); nvgpu_err(g, "unable to restart ctxsw!");
} }
@@ -2335,7 +2335,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
goto clean_up; goto clean_up;
@@ -2351,7 +2351,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
} }
} }
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to restart ctxsw!"); nvgpu_err(g, "unable to restart ctxsw!");
} }

View File

@@ -35,7 +35,6 @@
#include <nvgpu/gr/ctx.h> #include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/ltc.h> #include <nvgpu/ltc.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/engine_status.h> #include <nvgpu/engine_status.h>
@@ -696,7 +695,7 @@ int gm20b_gr_clear_sm_error_state(struct gk20a *g,
(void) memset(&tsg->sm_error_states[sm_id], 0, (void) memset(&tsg->sm_error_states[sm_id], 0,
sizeof(*tsg->sm_error_states)); sizeof(*tsg->sm_error_states));
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
goto fail; goto fail;
@@ -717,7 +716,7 @@ int gm20b_gr_clear_sm_error_state(struct gk20a *g,
0); 0);
} }
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
fail: fail:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);

View File

@@ -1052,7 +1052,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
@@ -1077,7 +1077,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_mutex_release(&dbg_s->ch_list_lock); nvgpu_mutex_release(&dbg_s->ch_list_lock);
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
goto clean_up; goto clean_up;

View File

@@ -43,7 +43,6 @@
#include <nvgpu/gr/ctx.h> #include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/config.h> #include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
@@ -3165,7 +3164,7 @@ int gv11b_gr_clear_sm_error_state(struct gk20a *g,
(void)memset(&tsg->sm_error_states[sm_id], 0, sizeof(*tsg->sm_error_states)); (void)memset(&tsg->sm_error_states[sm_id], 0, sizeof(*tsg->sm_error_states));
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
goto fail; goto fail;
@@ -3197,7 +3196,7 @@ int gv11b_gr_clear_sm_error_state(struct gk20a *g,
0); 0);
} }
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
fail: fail:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);

View File

@@ -304,6 +304,9 @@ static const struct gpu_ops gm20b_ops = {
.log_mme_exception = NULL, .log_mme_exception = NULL,
.reset = nvgpu_gr_reset, .reset = nvgpu_gr_reset,
.esr_bpt_pending_events = gm20b_gr_esr_bpt_pending_events, .esr_bpt_pending_events = gm20b_gr_esr_bpt_pending_events,
.halt_pipe = nvgpu_gr_halt_pipe,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ctxsw_prog = { .ctxsw_prog = {
.hw_get_fecs_header_size = .hw_get_fecs_header_size =
gm20b_ctxsw_prog_hw_get_fecs_header_size, gm20b_ctxsw_prog_hw_get_fecs_header_size,
@@ -568,9 +571,6 @@ static const struct gpu_ops gm20b_ops = {
.submit_fecs_sideband_method_op = .submit_fecs_sideband_method_op =
gm20b_gr_falcon_submit_fecs_sideband_method_op, gm20b_gr_falcon_submit_fecs_sideband_method_op,
.ctrl_ctxsw = gm20b_gr_falcon_ctrl_ctxsw, .ctrl_ctxsw = gm20b_gr_falcon_ctrl_ctxsw,
.halt_pipe = nvgpu_gr_falcon_halt_pipe,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
.get_current_ctx = gm20b_gr_falcon_get_current_ctx, .get_current_ctx = gm20b_gr_falcon_get_current_ctx,
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr, .get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = .get_fecs_current_ctx_data =

View File

@@ -338,6 +338,9 @@ static const struct gpu_ops gp10b_ops = {
.log_mme_exception = NULL, .log_mme_exception = NULL,
.reset = nvgpu_gr_reset, .reset = nvgpu_gr_reset,
.esr_bpt_pending_events = gm20b_gr_esr_bpt_pending_events, .esr_bpt_pending_events = gm20b_gr_esr_bpt_pending_events,
.halt_pipe = nvgpu_gr_halt_pipe,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ecc = { .ecc = {
.detect = gp10b_ecc_detect_enabled_units, .detect = gp10b_ecc_detect_enabled_units,
.init = gp10b_ecc_init, .init = gp10b_ecc_init,
@@ -633,9 +636,6 @@ static const struct gpu_ops gp10b_ops = {
.submit_fecs_sideband_method_op = .submit_fecs_sideband_method_op =
gm20b_gr_falcon_submit_fecs_sideband_method_op, gm20b_gr_falcon_submit_fecs_sideband_method_op,
.ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw, .ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw,
.halt_pipe = nvgpu_gr_falcon_halt_pipe,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
.get_current_ctx = gm20b_gr_falcon_get_current_ctx, .get_current_ctx = gm20b_gr_falcon_get_current_ctx,
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr, .get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = .get_fecs_current_ctx_data =

View File

@@ -452,6 +452,9 @@ static const struct gpu_ops gv100_ops = {
.log_mme_exception = NULL, .log_mme_exception = NULL,
.reset = nvgpu_gr_reset, .reset = nvgpu_gr_reset,
.esr_bpt_pending_events = gv11b_gr_esr_bpt_pending_events, .esr_bpt_pending_events = gv11b_gr_esr_bpt_pending_events,
.halt_pipe = nvgpu_gr_halt_pipe,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ctxsw_prog = { .ctxsw_prog = {
.hw_get_fecs_header_size = .hw_get_fecs_header_size =
gm20b_ctxsw_prog_hw_get_fecs_header_size, gm20b_ctxsw_prog_hw_get_fecs_header_size,
@@ -775,9 +778,6 @@ static const struct gpu_ops gv100_ops = {
.submit_fecs_sideband_method_op = .submit_fecs_sideband_method_op =
gm20b_gr_falcon_submit_fecs_sideband_method_op, gm20b_gr_falcon_submit_fecs_sideband_method_op,
.ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw, .ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw,
.halt_pipe = nvgpu_gr_falcon_halt_pipe,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
.get_current_ctx = gm20b_gr_falcon_get_current_ctx, .get_current_ctx = gm20b_gr_falcon_get_current_ctx,
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr, .get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = .get_fecs_current_ctx_data =

View File

@@ -427,6 +427,9 @@ static const struct gpu_ops gv11b_ops = {
gr_gv11b_ctxsw_checksum_mismatch_mailbox_val, gr_gv11b_ctxsw_checksum_mismatch_mailbox_val,
.reset = nvgpu_gr_reset, .reset = nvgpu_gr_reset,
.esr_bpt_pending_events = gv11b_gr_esr_bpt_pending_events, .esr_bpt_pending_events = gv11b_gr_esr_bpt_pending_events,
.halt_pipe = nvgpu_gr_halt_pipe,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ecc = { .ecc = {
.detect = gv11b_ecc_detect_enabled_units, .detect = gv11b_ecc_detect_enabled_units,
.init = gv11b_ecc_init, .init = gv11b_ecc_init,
@@ -752,9 +755,6 @@ static const struct gpu_ops gv11b_ops = {
.submit_fecs_sideband_method_op = .submit_fecs_sideband_method_op =
gm20b_gr_falcon_submit_fecs_sideband_method_op, gm20b_gr_falcon_submit_fecs_sideband_method_op,
.ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw, .ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw,
.halt_pipe = nvgpu_gr_falcon_halt_pipe,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
.get_current_ctx = gm20b_gr_falcon_get_current_ctx, .get_current_ctx = gm20b_gr_falcon_get_current_ctx,
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr, .get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = .get_fecs_current_ctx_data =

View File

@@ -474,6 +474,9 @@ static const struct gpu_ops tu104_ops = {
.log_mme_exception = gr_tu104_log_mme_exception, .log_mme_exception = gr_tu104_log_mme_exception,
.reset = nvgpu_gr_reset, .reset = nvgpu_gr_reset,
.esr_bpt_pending_events = gv11b_gr_esr_bpt_pending_events, .esr_bpt_pending_events = gv11b_gr_esr_bpt_pending_events,
.halt_pipe = nvgpu_gr_halt_pipe,
.disable_ctxsw = nvgpu_gr_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_enable_ctxsw,
.ecc = { .ecc = {
.detect = NULL, .detect = NULL,
.init = tu104_ecc_init, .init = tu104_ecc_init,
@@ -805,9 +808,6 @@ static const struct gpu_ops tu104_ops = {
.submit_fecs_sideband_method_op = .submit_fecs_sideband_method_op =
gm20b_gr_falcon_submit_fecs_sideband_method_op, gm20b_gr_falcon_submit_fecs_sideband_method_op,
.ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw, .ctrl_ctxsw = gp10b_gr_falcon_ctrl_ctxsw,
.halt_pipe = nvgpu_gr_falcon_halt_pipe,
.disable_ctxsw = nvgpu_gr_falcon_disable_ctxsw,
.enable_ctxsw = nvgpu_gr_falcon_enable_ctxsw,
.get_current_ctx = gm20b_gr_falcon_get_current_ctx, .get_current_ctx = gm20b_gr_falcon_get_current_ctx,
.get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr, .get_ctx_ptr = gm20b_gr_falcon_get_ctx_ptr,
.get_fecs_current_ctx_data = .get_fecs_current_ctx_data =

View File

@@ -422,6 +422,9 @@ struct gpu_ops {
void (*log_mme_exception)(struct gk20a *g); void (*log_mme_exception)(struct gk20a *g);
int (*reset)(struct gk20a *g); int (*reset)(struct gk20a *g);
bool (*esr_bpt_pending_events)(u32 global_esr, u32 bpt_event); bool (*esr_bpt_pending_events)(u32 global_esr, u32 bpt_event);
int (*halt_pipe)(struct gk20a *g);
int (*disable_ctxsw)(struct gk20a *g);
int (*enable_ctxsw)(struct gk20a *g);
struct { struct {
void (*detect)(struct gk20a *g); void (*detect)(struct gk20a *g);
int (*init)(struct gk20a *g); int (*init)(struct gk20a *g);
@@ -575,11 +578,6 @@ struct gpu_ops {
struct nvgpu_fecs_method_op op); struct nvgpu_fecs_method_op op);
int (*ctrl_ctxsw)(struct gk20a *g, u32 fecs_method, int (*ctrl_ctxsw)(struct gk20a *g, u32 fecs_method,
u32 fecs_data, u32 *ret_val); u32 fecs_data, u32 *ret_val);
int (*halt_pipe)(struct gk20a *g);
int (*disable_ctxsw)(struct gk20a *g,
struct nvgpu_gr_falcon *falcon);
int (*enable_ctxsw)(struct gk20a *g,
struct nvgpu_gr_falcon *falcon);
u32 (*get_current_ctx)(struct gk20a *g); u32 (*get_current_ctx)(struct gk20a *g);
u32 (*get_ctx_ptr)(u32 ctx); u32 (*get_ctx_ptr)(u32 ctx);
u32 (*get_fecs_current_ctx_data)(struct gk20a *g, u32 (*get_fecs_current_ctx_data)(struct gk20a *g,

View File

@@ -37,5 +37,8 @@ void nvgpu_gr_wait_initialized(struct gk20a *g);
void nvgpu_gr_init(struct gk20a *g); void nvgpu_gr_init(struct gk20a *g);
int nvgpu_gr_alloc(struct gk20a *g); int nvgpu_gr_alloc(struct gk20a *g);
void nvgpu_gr_free(struct gk20a *g); void nvgpu_gr_free(struct gk20a *g);
int nvgpu_gr_disable_ctxsw(struct gk20a *g);
int nvgpu_gr_enable_ctxsw(struct gk20a *g);
int nvgpu_gr_halt_pipe(struct gk20a *g);
#endif /* NVGPU_GR_H */ #endif /* NVGPU_GR_H */

View File

@@ -84,11 +84,6 @@ int nvgpu_gr_falcon_load_ctxsw_ucode(struct gk20a *g,
struct nvgpu_gr_falcon *falcon); struct nvgpu_gr_falcon *falcon);
int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g, int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g,
struct nvgpu_gr_falcon *falcon); struct nvgpu_gr_falcon *falcon);
int nvgpu_gr_falcon_disable_ctxsw(struct gk20a *g,
struct nvgpu_gr_falcon *falcon);
int nvgpu_gr_falcon_enable_ctxsw(struct gk20a *g,
struct nvgpu_gr_falcon *falcon);
int nvgpu_gr_falcon_halt_pipe(struct gk20a *g);
struct nvgpu_mutex *nvgpu_gr_falcon_get_fecs_mutex( struct nvgpu_mutex *nvgpu_gr_falcon_get_fecs_mutex(
struct nvgpu_gr_falcon *falcon); struct nvgpu_gr_falcon *falcon);

View File

@@ -1103,7 +1103,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
/* Suspend GPU context switching */ /* Suspend GPU context switching */
err = g->ops.gr.falcon.disable_ctxsw(g, g->gr->falcon); err = g->ops.gr.disable_ctxsw(g);
if (err) { if (err) {
nvgpu_err(g, "unable to stop gr ctxsw"); nvgpu_err(g, "unable to stop gr ctxsw");
/* this should probably be ctx-fatal... */ /* this should probably be ctx-fatal... */
@@ -1121,7 +1121,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
break; break;
} }
err = g->ops.gr.falcon.enable_ctxsw(g, g->gr->falcon); err = g->ops.gr.enable_ctxsw(g);
if (err) if (err)
nvgpu_err(g, "unable to restart ctxsw!"); nvgpu_err(g, "unable to restart ctxsw!");