mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: remove code for ch not bound to tsg
- Remove handling for channels that are no more bound to tsg as channel could be referenceable but no more part of a tsg - Use tsg_gk20a_from_ch to get pointer to tsg for a given channel - Clear unhandled gr interrupts Bug 2429295 JIRA NVGPU-1580 Change-Id: I9da43a2bc9a0282c793b9f301eaf8e8604f91d70 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1972492 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f4fc5d41ea
commit
013ca60edd
@@ -167,28 +167,26 @@ int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
|
||||
{
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
g->ops.fifo.enable_tsg(tsg);
|
||||
return 0;
|
||||
} else {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
|
||||
{
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
g->ops.fifo.disable_tsg(tsg);
|
||||
return 0;
|
||||
} else {
|
||||
g->ops.fifo.disable_channel(ch);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
|
||||
@@ -241,19 +239,8 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
|
||||
|
||||
if (tsg != NULL) {
|
||||
return gk20a_fifo_abort_tsg(ch->g, tsg, channel_preempt);
|
||||
}
|
||||
|
||||
/* make sure new kickoffs are prevented */
|
||||
gk20a_channel_set_timedout(ch);
|
||||
|
||||
ch->g->ops.fifo.disable_channel(ch);
|
||||
|
||||
if (channel_preempt) {
|
||||
ch->g->ops.fifo.preempt_channel(ch->g, ch);
|
||||
}
|
||||
|
||||
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) {
|
||||
ch->g->ops.fifo.ch_abort_clean_up(ch);
|
||||
} else {
|
||||
nvgpu_err(ch->g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -448,8 +448,14 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
|
||||
struct gk20a_fecs_trace *trace = g->fecs_trace;
|
||||
struct nvgpu_mem *mem;
|
||||
u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch);
|
||||
pid_t pid;
|
||||
u32 aperture_mask;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg == NULL) {
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
|
||||
"chid=%d context_ptr=%x inst_block=%llx",
|
||||
@@ -495,11 +501,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
|
||||
/* pid (process identifier) in user space, corresponds to tgid (thread
|
||||
* group id) in kernel space.
|
||||
*/
|
||||
if (gk20a_is_channel_marked_as_tsg(ch))
|
||||
pid = tsg_gk20a_from_ch(ch)->tgid;
|
||||
else
|
||||
pid = ch->tgid;
|
||||
gk20a_fecs_trace_hash_add(g, context_ptr, pid);
|
||||
gk20a_fecs_trace_hash_add(g, context_ptr, tsg->tgid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1253,7 +1253,8 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
|
||||
|
||||
int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
|
||||
{
|
||||
unsigned long engine_id, engines;
|
||||
unsigned long engine_id, engines = 0U;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
|
||||
gr_gk20a_disable_ctxsw(g);
|
||||
@@ -1262,13 +1263,14 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
engines = g->ops.fifo.get_engines_mask_on_id(g,
|
||||
ch->tsgid, true);
|
||||
tsg->tsgid, true);
|
||||
} else {
|
||||
engines = g->ops.fifo.get_engines_mask_on_id(g,
|
||||
ch->chid, false);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
|
||||
if (engines == 0U) {
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -1426,16 +1428,18 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
} else if (type == fifo_engine_status_id_type_chid_v()) {
|
||||
ch = &g->fifo.channel[id];
|
||||
refch = gk20a_channel_get(ch);
|
||||
if (refch != NULL) {
|
||||
tsg = tsg_gk20a_from_ch(refch);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* Look up channel from the inst block pointer. */
|
||||
ch = gk20a_refch_from_inst_ptr(g,
|
||||
mmfault_info.inst_ptr);
|
||||
refch = ch;
|
||||
}
|
||||
|
||||
if ((ch != NULL) && gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
if (refch != NULL) {
|
||||
tsg = tsg_gk20a_from_ch(refch);
|
||||
}
|
||||
}
|
||||
|
||||
/* check if engine reset should be deferred */
|
||||
@@ -1462,16 +1466,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
||||
/*
|
||||
* For non fake mmu fault, both tsg and ch pointers
|
||||
* could be valid. Check tsg first.
|
||||
*/
|
||||
if (tsg != NULL)
|
||||
if (tsg != NULL) {
|
||||
gk20a_ctxsw_trace_tsg_reset(g, tsg);
|
||||
else if (ch)
|
||||
gk20a_ctxsw_trace_channel_reset(g, ch);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disable the channel/TSG from hw and increment syncpoints.
|
||||
*/
|
||||
@@ -1490,25 +1488,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
if (refch != NULL) {
|
||||
gk20a_channel_put(ch);
|
||||
}
|
||||
} else if (ch != NULL) {
|
||||
if (refch != NULL) {
|
||||
if (g->fifo.deferred_reset_pending) {
|
||||
g->ops.fifo.disable_channel(ch);
|
||||
} else {
|
||||
if (!fake_fault) {
|
||||
nvgpu_channel_set_ctx_mmu_error(
|
||||
g, refch);
|
||||
}
|
||||
|
||||
verbose = nvgpu_channel_mark_error(g,
|
||||
refch);
|
||||
gk20a_channel_abort(ch, false);
|
||||
}
|
||||
gk20a_channel_put(ch);
|
||||
} else {
|
||||
nvgpu_err(g, "mmu error in freed channel %d",
|
||||
} else if (refch != NULL) {
|
||||
nvgpu_err(g, "mmu error in unbound channel %d",
|
||||
ch->chid);
|
||||
}
|
||||
gk20a_channel_put(ch);
|
||||
} else if (mmfault_info.inst_ptr ==
|
||||
nvgpu_inst_block_addr(g, &g->mm.bar1.inst_block)) {
|
||||
nvgpu_err(g, "mmu fault from bar1");
|
||||
@@ -1742,7 +1725,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids,
|
||||
rc_type, NULL);
|
||||
}
|
||||
|
||||
/* force reset channel and tsg (if it's part of one) */
|
||||
/* force reset channel and tsg */
|
||||
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
u32 err_code, bool verbose)
|
||||
{
|
||||
@@ -1752,7 +1735,6 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
|
||||
|
||||
if (tsg != NULL) {
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
|
||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||
@@ -1767,9 +1749,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
nvgpu_tsg_recover(g, tsg, verbose, RC_TYPE_FORCE_RESET);
|
||||
} else {
|
||||
g->ops.fifo.set_error_notifier(ch, err_code);
|
||||
nvgpu_channel_recover(g, ch, verbose,
|
||||
RC_TYPE_FORCE_RESET);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2265,7 +2265,6 @@ u32 gr_gk20a_get_patch_slots(struct gk20a *g)
|
||||
int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct nvgpu_gr_ctx *gr_ctx;
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
int err = 0;
|
||||
@@ -2288,11 +2287,11 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
}
|
||||
c->obj_class = class_num;
|
||||
|
||||
if (!gk20a_is_channel_marked_as_tsg(c)) {
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (tsg == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tsg = &f->tsg[c->tsgid];
|
||||
gr_ctx = tsg->gr_ctx;
|
||||
|
||||
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
|
||||
@@ -4472,20 +4471,20 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
|
||||
return;
|
||||
}
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||
channel_gk20a, ch_entry) {
|
||||
if (gk20a_channel_get(ch_tsg) != NULL) {
|
||||
g->ops.fifo.set_error_notifier(ch_tsg,
|
||||
error_notifier);
|
||||
error_notifier);
|
||||
gk20a_channel_put(ch_tsg);
|
||||
}
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
} else {
|
||||
g->ops.fifo.set_error_notifier(ch, error_notifier);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4657,12 +4656,21 @@ int gk20a_gr_handle_semaphore_pending(struct gk20a *g,
|
||||
struct gr_gk20a_isr_data *isr_data)
|
||||
{
|
||||
struct channel_gk20a *ch = isr_data->ch;
|
||||
struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
g->ops.fifo.post_event_id(tsg,
|
||||
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
|
||||
if (ch == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_cond_broadcast(&ch->semaphore_wq);
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
g->ops.fifo.post_event_id(tsg,
|
||||
NVGPU_EVENT_ID_GR_SEMAPHORE_WRITE_AWAKEN);
|
||||
|
||||
nvgpu_cond_broadcast(&ch->semaphore_wq);
|
||||
} else {
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4697,7 +4705,12 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g,
|
||||
u32 buffer_size;
|
||||
u32 offset;
|
||||
bool exit;
|
||||
#endif
|
||||
if (ch == NULL || tsg_gk20a_from_ch(ch) == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||
/* GL will never use payload 0 for cycle state */
|
||||
if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0))
|
||||
return 0;
|
||||
@@ -5241,7 +5254,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
||||
u32 chid;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr);
|
||||
nvgpu_log(g, gpu_dbg_intr, "pgraph intr 0x%08x", gr_intr);
|
||||
|
||||
if (gr_intr == 0U) {
|
||||
return 0;
|
||||
@@ -5275,11 +5288,13 @@ int gk20a_gr_isr(struct gk20a *g)
|
||||
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||
|
||||
if (ch == NULL) {
|
||||
nvgpu_err(g, "ch id is INVALID 0xffffffff");
|
||||
}
|
||||
|
||||
if ((ch != NULL) && gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
nvgpu_err(g, "pgraph intr: 0x%08x, chid: INVALID", gr_intr);
|
||||
} else {
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg == NULL) {
|
||||
nvgpu_err(g, "pgraph intr: 0x%08x, chid: %d "
|
||||
"not bound to tsg", gr_intr, chid);
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
||||
@@ -5468,7 +5483,9 @@ int gk20a_gr_isr(struct gk20a *g)
|
||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
||||
"GPC exception pending");
|
||||
|
||||
fault_ch = isr_data.ch;
|
||||
if (tsg != NULL) {
|
||||
fault_ch = isr_data.ch;
|
||||
}
|
||||
|
||||
/* fault_ch can be NULL */
|
||||
/* check if any gpc has an exception */
|
||||
@@ -5497,39 +5514,42 @@ int gk20a_gr_isr(struct gk20a *g)
|
||||
}
|
||||
|
||||
if (need_reset) {
|
||||
if (tsgid != NVGPU_INVALID_TSG_ID) {
|
||||
if (tsg != NULL) {
|
||||
gk20a_fifo_recover(g, gr_engine_id,
|
||||
tsgid, true, true, true,
|
||||
RC_TYPE_GR_FAULT);
|
||||
} else if (ch != NULL) {
|
||||
gk20a_fifo_recover(g, gr_engine_id,
|
||||
ch->chid, false, true, true,
|
||||
RC_TYPE_GR_FAULT);
|
||||
} else {
|
||||
if (ch != NULL) {
|
||||
nvgpu_err(g, "chid: %d referenceable but not "
|
||||
"bound to tsg", chid);
|
||||
}
|
||||
gk20a_fifo_recover(g, gr_engine_id,
|
||||
0, false, false, true,
|
||||
RC_TYPE_GR_FAULT);
|
||||
}
|
||||
}
|
||||
|
||||
if ((gr_intr != 0U) && (ch == NULL)) {
|
||||
/* Clear interrupts for unused channel. This is
|
||||
probably an interrupt during gk20a_free_channel() */
|
||||
nvgpu_err(g,
|
||||
"unhandled gr interrupt 0x%08x for unreferenceable channel, clearing",
|
||||
gr_intr);
|
||||
if (gr_intr != 0U) {
|
||||
/* clear unhandled interrupts */
|
||||
if (ch == NULL) {
|
||||
/*
|
||||
* This is probably an interrupt during
|
||||
* gk20a_free_channel()
|
||||
*/
|
||||
nvgpu_err(g, "unhandled gr intr 0x%08x for "
|
||||
"unreferenceable channel, clearing",
|
||||
gr_intr);
|
||||
} else {
|
||||
nvgpu_err(g, "unhandled gr intr 0x%08x for chid: %d",
|
||||
gr_intr, chid);
|
||||
}
|
||||
gk20a_writel(g, gr_intr_r(), gr_intr);
|
||||
gr_intr = 0;
|
||||
}
|
||||
|
||||
gk20a_writel(g, gr_gpfifo_ctl_r(),
|
||||
grfifo_ctl | gr_gpfifo_ctl_access_f(1) |
|
||||
gr_gpfifo_ctl_semaphore_access_f(1));
|
||||
|
||||
if (gr_intr != 0U) {
|
||||
nvgpu_err(g,
|
||||
"unhandled gr interrupt 0x%08x", gr_intr);
|
||||
}
|
||||
|
||||
/* Posting of BPT events should be the last thing in this function */
|
||||
if ((global_esr != 0U) && (tsg != NULL) && (need_reset == false)) {
|
||||
|
||||
@@ -1551,6 +1551,14 @@ void gr_gp10b_get_access_map(struct gk20a *g,
|
||||
static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a *fault_ch)
|
||||
{
|
||||
int ret = 0;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
tsg = tsg_gk20a_from_ch(fault_ch);
|
||||
if (tsg == NULL) {
|
||||
nvgpu_err(g, "CILP: chid: %d is not bound to tsg",
|
||||
fault_ch->chid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
|
||||
|
||||
@@ -1575,18 +1583,11 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist");
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||
"CILP: tsgid: 0x%x", fault_ch->tsgid);
|
||||
"CILP: tsgid: 0x%x", tsg->tsgid);
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
|
||||
gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true);
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||
gk20a_fifo_issue_preempt(g, tsg->tsgid, true);
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||
"CILP: preempted tsg");
|
||||
} else {
|
||||
gk20a_fifo_issue_preempt(g, fault_ch->chid, false);
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||
"CILP: preempted channel");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -594,9 +594,8 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
|
||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||
@@ -611,8 +610,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
|
||||
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
} else {
|
||||
g->ops.fifo.set_error_notifier(ch, err_code);
|
||||
gk20a_channel_set_timedout(ch);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_FORCE_RESET;
|
||||
@@ -651,9 +649,8 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
struct channel_gk20a *ch_tsg = NULL;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &g->fifo.tsg[ch->tsgid];
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
|
||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||
@@ -666,7 +663,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
|
||||
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
} else {
|
||||
vgpu_fifo_set_ctx_mmu_error_ch(g, ch);
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user