mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: remove channel.check_ctxsw_timeout
nvgpu_channel_check_ctxsw_timeout is removed as ctxsw timeout is not checked for channel that is not bound to tsg. JIRA NVGPU-1312 Change-Id: I8d12251e478a959d150b736206396c338575b2ec Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2079513 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
dfafddcc21
commit
434931799a
@@ -1453,26 +1453,6 @@ bool nvgpu_channel_update_and_check_ctxsw_timeout(struct channel_gk20a *ch,
|
||||
ch->ctxsw_timeout_accumulated_ms > ch->ctxsw_timeout_max_ms;
|
||||
}
|
||||
|
||||
bool nvgpu_channel_check_ctxsw_timeout(struct channel_gk20a *ch,
|
||||
bool *verbose, u32 *ms)
|
||||
{
|
||||
bool recover = false;
|
||||
bool progress = false;
|
||||
struct gk20a *g = ch->g;
|
||||
|
||||
recover = nvgpu_channel_update_and_check_ctxsw_timeout(ch,
|
||||
g->ctxsw_timeout_period_ms, &progress);
|
||||
*verbose = ch->ctxsw_timeout_debug_dump;
|
||||
*ms = ch->ctxsw_timeout_accumulated_ms;
|
||||
if (recover) {
|
||||
nvgpu_channel_set_error_notifier(g, ch,
|
||||
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
return recover;
|
||||
}
|
||||
|
||||
|
||||
void nvgpu_channel_recover(struct gk20a *g, struct channel_gk20a *ch,
|
||||
bool verbose, u32 rc_type)
|
||||
{
|
||||
|
||||
@@ -507,7 +507,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.enable = vgpu_channel_enable,
|
||||
.disable = vgpu_channel_disable,
|
||||
.count = vgpu_channel_count,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
@@ -587,7 +587,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.enable = vgpu_channel_enable,
|
||||
.disable = vgpu_channel_disable,
|
||||
.count = vgpu_channel_count,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
@@ -666,7 +666,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.count = gm20b_channel_count,
|
||||
.read_state = gk20a_channel_read_state,
|
||||
.force_ctx_reload = gm20b_channel_force_ctx_reload,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
@@ -747,7 +747,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.count = gm20b_channel_count,
|
||||
.read_state = gk20a_channel_read_state,
|
||||
.force_ctx_reload = gm20b_channel_force_ctx_reload,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
@@ -929,7 +929,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.read_state = gv11b_channel_read_state,
|
||||
.force_ctx_reload = gm20b_channel_force_ctx_reload,
|
||||
.reset_faulted = gv11b_channel_reset_faulted,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
@@ -886,7 +886,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.read_state = gv11b_channel_read_state,
|
||||
.force_ctx_reload = gm20b_channel_force_ctx_reload,
|
||||
.reset_faulted = gv11b_channel_reset_faulted,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
@@ -375,8 +375,6 @@ bool nvgpu_channel_mark_error(struct gk20a *g, struct channel_gk20a *ch);
|
||||
|
||||
bool nvgpu_channel_update_and_check_ctxsw_timeout(struct channel_gk20a *ch,
|
||||
u32 timeout_delta_ms, bool *progress);
|
||||
bool nvgpu_channel_check_ctxsw_timeout(struct channel_gk20a *ch,
|
||||
bool *verbose, u32 *ms);
|
||||
|
||||
void nvgpu_channel_recover(struct gk20a *g, struct channel_gk20a *ch,
|
||||
bool verbose, u32 rc_type);
|
||||
|
||||
@@ -1042,8 +1042,6 @@ struct gpu_ops {
|
||||
void (*force_ctx_reload)(struct channel_gk20a *ch);
|
||||
void (*reset_faulted)(struct gk20a *g, struct channel_gk20a *ch,
|
||||
bool eng, bool pbdma);
|
||||
bool (*check_ctxsw_timeout)(struct channel_gk20a *ch,
|
||||
bool *verbose, u32 *ms);
|
||||
} channel;
|
||||
struct {
|
||||
bool (*check_ctxsw_timeout)(struct tsg_gk20a *tsg,
|
||||
|
||||
@@ -966,7 +966,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.read_state = gv11b_channel_read_state,
|
||||
.force_ctx_reload = gm20b_channel_force_ctx_reload,
|
||||
.reset_faulted = gv11b_channel_reset_faulted,
|
||||
.check_ctxsw_timeout = nvgpu_channel_check_ctxsw_timeout,
|
||||
},
|
||||
.tsg = {
|
||||
.check_ctxsw_timeout = nvgpu_tsg_check_ctxsw_timeout,
|
||||
|
||||
Reference in New Issue
Block a user