gpu: nvgpu: remove direct tsg retrieval from fifo

Added
- nvgpu_tsg_check_and_get_from_id
- nvgpu_tsg_get_from_id

And removed direct accesses to f->tsg array.

Jira NVGPU-3156

Change-Id: I8610e19c1a6e06521c16a1ec0c3a7a011978d0b7
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2101251
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-03-29 16:40:02 +05:30
committed by mobile promotions
parent 124cdb4509
commit 965062c2bc
12 changed files with 52 additions and 28 deletions

View File

@@ -134,7 +134,7 @@ static u32 nvgpu_runlist_append_prio(struct fifo_gk20a *f,
nvgpu_log_fn(f->g, " "); nvgpu_log_fn(f->g, " ");
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
struct tsg_gk20a *tsg = &f->tsg[tsgid]; struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(f->g, tsgid);
u32 entries; u32 entries;
if (tsg->interleave_level == interleave_level) { if (tsg->interleave_level == interleave_level) {
@@ -177,7 +177,7 @@ static u32 nvgpu_runlist_append_med(struct fifo_gk20a *f,
nvgpu_log_fn(f->g, " "); nvgpu_log_fn(f->g, " ");
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
struct tsg_gk20a *tsg = &f->tsg[tsgid]; struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(f->g, tsgid);
u32 entries; u32 entries;
if (tsg->interleave_level != if (tsg->interleave_level !=
@@ -216,7 +216,7 @@ static u32 nvgpu_runlist_append_low(struct fifo_gk20a *f,
nvgpu_log_fn(f->g, " "); nvgpu_log_fn(f->g, " ");
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
struct tsg_gk20a *tsg = &f->tsg[tsgid]; struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(f->g, tsgid);
u32 entries; u32 entries;
if (tsg->interleave_level != if (tsg->interleave_level !=

View File

@@ -48,6 +48,24 @@ void nvgpu_tsg_disable(struct tsg_gk20a *tsg)
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
struct tsg_gk20a *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid)
{
if (tsgid == NVGPU_INVALID_TSG_ID) {
return NULL;
}
return nvgpu_tsg_get_from_id(g, tsgid);
}
struct tsg_gk20a *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid)
{
struct fifo_gk20a *f = &g->fifo;
return &f->tsg[tsgid];
}
static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;

View File

@@ -85,7 +85,7 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, struct fifo_gk20a *f,
/* Remove channel from runlist */ /* Remove channel from runlist */
id = pbdma_status.id; id = pbdma_status.id;
if (pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_TSGID) { if (pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_TSGID) {
struct tsg_gk20a *tsg = &f->tsg[id]; struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(g, id);
nvgpu_tsg_set_error_notifier(g, tsg, error_notifier); nvgpu_tsg_set_error_notifier(g, tsg, error_notifier);
nvgpu_rc_tsg_and_related_engines(g, tsg, true, nvgpu_rc_tsg_and_related_engines(g, tsg, true,

View File

@@ -1114,7 +1114,7 @@ void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
return; return;
} }
tsg = &g->fifo.tsg[info->tsg_id]; tsg = nvgpu_tsg_check_and_get_from_id(g, info->tsg_id);
if (tsg == NULL) { if (tsg == NULL) {
nvgpu_err(g, "invalid tsg"); nvgpu_err(g, "invalid tsg");
return; return;

View File

@@ -117,9 +117,10 @@ static void vgpu_handle_channel_event(struct gk20a *g,
return; return;
} }
tsg = &g->fifo.tsg[info->id]; tsg = nvgpu_tsg_check_and_get_from_id(g, info->id);
if (tsg != NULL) {
nvgpu_tsg_post_event_id(tsg, info->event_id); nvgpu_tsg_post_event_id(tsg, info->event_id);
}
} }
static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid) static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)

View File

@@ -1146,12 +1146,16 @@ static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
is_esr_error = gr_gv11b_check_warp_esr_error(g, warp_esr_error); is_esr_error = gr_gv11b_check_warp_esr_error(g, warp_esr_error);
if (!is_esr_error) { if (!is_esr_error) {
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"No ESR error, Skip RC recovery and Trigeer CILP"); "No ESR error, Skip RC recovery and Trigger CILP");
return 0; return 0;
} }
if (fault_ch != NULL) { if (fault_ch != NULL) {
tsg = &g->fifo.tsg[fault_ch->tsgid]; tsg = nvgpu_tsg_check_and_get_from_id(g, fault_ch->tsgid);
if (tsg == NULL) {
nvgpu_err(g, "fault ch %u not found", fault_ch->chid);
goto clear_intr;
}
/* /*
* Check SET_EXCEPTION_TYPE_MASK is being set. * Check SET_EXCEPTION_TYPE_MASK is being set.
@@ -1172,6 +1176,7 @@ static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
NVGPU_ERR_NOTIFIER_GR_EXCEPTION); NVGPU_ERR_NOTIFIER_GR_EXCEPTION);
} }
clear_intr:
/* clear interrupt */ /* clear interrupt */
offset = nvgpu_gr_gpc_offset(g, gpc) + offset = nvgpu_gr_gpc_offset(g, gpc) +
nvgpu_gr_tpc_offset(g, tpc) + nvgpu_gr_tpc_offset(g, tpc) +

View File

@@ -652,9 +652,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
/* CE page faults are not reported as replayable */ /* CE page faults are not reported as replayable */
nvgpu_log(g, gpu_dbg_intr, "CE Faulted"); nvgpu_log(g, gpu_dbg_intr, "CE Faulted");
err = gv11b_fb_fix_page_fault(g, mmfault); err = gv11b_fb_fix_page_fault(g, mmfault);
if ((mmfault->refch != NULL) &&
if (mmfault->refch != NULL) { ((u32)mmfault->refch->tsgid != FIFO_INVAL_TSG_ID)) {
tsg = tsg_gk20a_from_ch(mmfault->refch); tsg = nvgpu_tsg_get_from_id(g, mmfault->refch->tsgid);
nvgpu_tsg_reset_faulted_eng_pbdma(g, tsg, true, true); nvgpu_tsg_reset_faulted_eng_pbdma(g, tsg, true, true);
} }
if (err == 0) { if (err == 0) {

View File

@@ -107,7 +107,7 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
} }
if (is_tsg) { if (is_tsg) {
tsg = &f->tsg[id]; tsg = nvgpu_tsg_check_and_get_from_id(g, id);
} else { } else {
ch = gk20a_channel_from_id(g, id); ch = gk20a_channel_from_id(g, id);
if (ch != NULL) { if (ch != NULL) {

View File

@@ -200,19 +200,16 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g)
if ((ctxsw_timeout_engines & if ((ctxsw_timeout_engines &
fifo_intr_ctxsw_timeout_engine_pending_f( fifo_intr_ctxsw_timeout_engine_pending_f(
active_eng_id)) != 0U) { active_eng_id)) != 0U) {
struct fifo_gk20a *f = &g->fifo;
u32 ms = 0; u32 ms = 0;
bool debug_dump = false; bool debug_dump = false;
tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id, tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id,
&info_status); &info_status);
tsg = nvgpu_tsg_check_and_get_from_id(g, tsgid);
if (tsgid == FIFO_INVAL_TSG_ID) { if (tsg == NULL) {
continue; continue;
} }
tsg = &f->tsg[tsgid];
recover = g->ops.tsg.check_ctxsw_timeout(tsg, recover = g->ops.tsg.check_ctxsw_timeout(tsg,
&debug_dump, &ms); &debug_dump, &ms);
if (recover) { if (recover) {

View File

@@ -315,7 +315,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
} }
if (type == ENGINE_STATUS_CTX_ID_TYPE_TSGID) { if (type == ENGINE_STATUS_CTX_ID_TYPE_TSGID) {
tsg = &g->fifo.tsg[id]; tsg = nvgpu_tsg_get_from_id(g, id);
} else if (type == ENGINE_STATUS_CTX_ID_TYPE_CHID) { } else if (type == ENGINE_STATUS_CTX_ID_TYPE_CHID) {
ch = &g->fifo.channel[id]; ch = &g->fifo.channel[id];
refch = gk20a_channel_get(ch); refch = gk20a_channel_get(ch);

View File

@@ -105,6 +105,8 @@ struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch);
void nvgpu_tsg_disable(struct tsg_gk20a *tsg); void nvgpu_tsg_disable(struct tsg_gk20a *tsg);
int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
struct channel_gk20a *ch); struct channel_gk20a *ch);
struct tsg_gk20a *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid);
struct tsg_gk20a *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid);
int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch); int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch);
int nvgpu_tsg_unbind_channel_common(struct tsg_gk20a *tsg, int nvgpu_tsg_unbind_channel_common(struct tsg_gk20a *tsg,
struct channel_gk20a *ch); struct channel_gk20a *ch);

View File

@@ -167,9 +167,10 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
nvgpu_mutex_acquire(&sched->status_lock); nvgpu_mutex_acquire(&sched->status_lock);
for (tsgid = 0; tsgid < f->num_channels; tsgid++) { for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) { if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) {
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
if (tsg->tgid == tgid) if (tsg->tgid == tgid) {
NVGPU_SCHED_SET(tsgid, bitmap); NVGPU_SCHED_SET(tsgid, bitmap);
}
} }
} }
nvgpu_mutex_release(&sched->status_lock); nvgpu_mutex_release(&sched->status_lock);
@@ -198,7 +199,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
return -ENXIO; return -ENXIO;
@@ -233,7 +234,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
return -ENXIO; return -ENXIO;
@@ -268,7 +269,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
return -ENXIO; return -ENXIO;
@@ -336,7 +337,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched,
nvgpu_speculation_barrier(); nvgpu_speculation_barrier();
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) if (!nvgpu_ref_get_unless_zero(&tsg->refcount))
return -ENXIO; return -ENXIO;
@@ -382,7 +383,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched,
NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap);
nvgpu_mutex_release(&sched->status_lock); nvgpu_mutex_release(&sched->status_lock);
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
return 0; return 0;
@@ -527,7 +528,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
/* release any reference to TSGs */ /* release any reference to TSGs */
for (tsgid = 0; tsgid < f->num_channels; tsgid++) { for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) {
tsg = &f->tsg[tsgid]; tsg = nvgpu_tsg_get_from_id(g, tsgid);
nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
} }
} }