gpu: nvgpu: remove gk20a_is_channel_marked_as_tsg

Use tsg_gk20a_from_ch to get tsg pointer for tsgid of a channel. For
invalid tsgid, tsg pointer will be NULL

Bug 2092051
Bug 2429295
Bug 2484211

Change-Id: I82cd6a2dc5fab4acb147202af667ca97a2842a73
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2006722
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2018-12-27 19:23:06 -08:00
committed by mobile promotions
parent 5001308dc4
commit 13f37f9c70
8 changed files with 47 additions and 35 deletions

View File

@@ -844,6 +844,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
int err = 0;
u32 id = FIFO_INVAL_TSG_ID;
unsigned int rc_type = RC_TYPE_NO_RC;
struct tsg_gk20a *tsg = NULL;
if (!mmfault->valid) {
return;
@@ -917,14 +918,17 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
mmfault->refch->mmu_nack_handled = true;
}
rc_type = RC_TYPE_MMU_FAULT;
if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
tsg = tsg_gk20a_from_ch(mmfault->refch);
if (tsg != NULL) {
id = mmfault->refch->tsgid;
if (id != FIFO_INVAL_TSG_ID) {
id_type = ID_TYPE_TSG;
}
id_type = ID_TYPE_TSG;
rc_type = RC_TYPE_MMU_FAULT;
} else {
nvgpu_err(g, "bare channels not supported");
nvgpu_err(g, "chid: %d is referenceable but "
"not bound to tsg",
mmfault->refch->chid);
id_type = ID_TYPE_CHANNEL;
rc_type = RC_TYPE_NO_RC;
}
}

View File

@@ -329,7 +329,12 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
*/
if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
/* abort channel and remove from runlist */
if (gk20a_is_channel_marked_as_tsg(ch)) {
if (tsg_gk20a_from_ch(ch) != NULL) {
/* Between tsg is not null and unbind_channel call,
* ioctl cannot be called anymore because user doesn't
* have an open channel fd anymore to use for the unbind
* ioctl.
*/
err = gk20a_tsg_unbind_channel(ch);
if (err != 0) {
nvgpu_err(g,
@@ -2371,7 +2376,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
if (err != 0) {
goto fail_6;
}
nvgpu_init_list_node(&c->ch_entry);
nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
return 0;
@@ -2511,10 +2516,9 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
nvgpu_cond_broadcast_interruptible(
&c->semaphore_wq);
if (post_events) {
if (gk20a_is_channel_marked_as_tsg(c)) {
struct tsg_gk20a *tsg =
&g->fifo.tsg[c->tsgid];
struct tsg_gk20a *tsg =
tsg_gk20a_from_ch(c);
if (tsg != NULL) {
g->ops.fifo.post_event_id(tsg,
NVGPU_EVENT_ID_BLOCKING_SYNC);
}

View File

@@ -34,11 +34,6 @@
#include "gk20a/gr_gk20a.h"
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
{
return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
}
int gk20a_enable_tsg(struct tsg_gk20a *tsg)
{
struct gk20a *g = tsg->g;
@@ -121,7 +116,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
nvgpu_log_fn(g, " ");
/* check if channel is already bound to some TSG */
if (gk20a_is_channel_marked_as_tsg(ch)) {
if (tsg_gk20a_from_ch(ch) != NULL) {
return -EINVAL;
}
@@ -130,7 +125,6 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
return -EINVAL;
}
ch->tsgid = tsg->tsgid;
/* all the channel part of TSG should need to be same runlist_id */
if (tsg->runlist_id == FIFO_INVAL_TSG_ID) {
@@ -144,6 +138,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list);
ch->tsgid = tsg->tsgid;
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
nvgpu_ref_get(&tsg->refcount);
@@ -175,14 +170,13 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_del(&ch->ch_entry);
ch->tsgid = NVGPU_INVALID_TSG_ID;
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
}
nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d",
tsg->tsgid, ch->chid);
nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
ch->tsgid = NVGPU_INVALID_TSG_ID;
nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n",
tsg->tsgid, ch->chid);
return 0;
}
@@ -588,13 +582,17 @@ void gk20a_tsg_release(struct nvgpu_ref *ref)
struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch)
{
struct tsg_gk20a *tsg = NULL;
u32 tsgid = ch->tsgid;
if (gk20a_is_channel_marked_as_tsg(ch)) {
if (tsgid != NVGPU_INVALID_TSG_ID) {
struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo;
tsg = &f->tsg[ch->tsgid];
}
tsg = &f->tsg[tsgid];
} else {
nvgpu_log(ch->g, gpu_dbg_fn, "tsgid is invalid for chid: %d",
ch->chid);
}
return tsg;
}

View File

@@ -467,7 +467,6 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
{
struct gk20a *g = c->g;
struct fifo_gk20a *f = &g->fifo;
struct nvgpu_gr_ctx *gr_ctx = NULL;
struct tsg_gk20a *tsg = NULL;
int err = 0;
@@ -488,11 +487,11 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
}
c->obj_class = class_num;
if (!gk20a_is_channel_marked_as_tsg(c)) {
tsg = tsg_gk20a_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
tsg = &f->tsg[c->tsgid];
gr_ctx = tsg->gr_ctx;
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {

View File

@@ -1594,11 +1594,15 @@ int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch)
int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
{
struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo;
struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
int err;
bool tsg_timedout = false;
if (tsg == NULL) {
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
return 0;
}
/* If one channel in TSG times out, we disable all channels */
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
tsg_timedout = gk20a_channel_check_timedout(ch);
@@ -1628,6 +1632,7 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
/* Remove channel from TSG and re-enable rest of the channels */
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_del(&ch->ch_entry);
ch->tsgid = NVGPU_INVALID_TSG_ID;
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
/*

View File

@@ -6721,6 +6721,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
struct gk20a *g = ch->g;
struct channel_gk20a *curr_ch;
bool ret = false;
struct tsg_gk20a *tsg;
curr_gr_ctx = gk20a_readl(g, gr_fecs_current_ctx_r());
@@ -6752,7 +6753,8 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
ret = true;
}
if (gk20a_is_channel_marked_as_tsg(ch) && (ch->tsgid == curr_gr_tsgid)) {
tsg = tsg_gk20a_from_ch(ch);
if ((tsg != NULL) && (tsg->tsgid == curr_gr_tsgid)) {
ret = true;
}

View File

@@ -81,7 +81,6 @@ struct tsg_gk20a {
struct nvgpu_mutex sm_exception_mask_lock;
};
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch);
int gk20a_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg);
struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid);
void gk20a_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg);

View File

@@ -1673,6 +1673,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
struct gk20a *g = dbg_s->g;
struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
int err = 0;
struct tsg_gk20a *tsg;
nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
@@ -1715,11 +1716,11 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
nvgpu_err(g,
"per-ctxt reserve: global reservation in effect");
err = -EBUSY;
} else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
} else if ((tsg = tsg_gk20a_from_ch(my_prof_obj->ch)) != NULL) {
/* TSG: check that another channel in the TSG
* doesn't already have the reservation
*/
u32 my_tsgid = my_prof_obj->ch->tsgid;
u32 my_tsgid = tsg->tsgid;
nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
dbg_profiler_object_data, prof_obj_entry) {