mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: remove gk20a_is_channel_marked_as_tsg
Use tsg_gk20a_from_ch to get tsg pointer for tsgid of a channel. For
invalid tsgid, tsg pointer will be NULL
Bug 2092051
Bug 2429295
Bug 2484211
Change-Id: I82cd6a2dc5fab4acb147202af667ca97a2842a73
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2006722
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
(cherry picked from commit 13f37f9c70
in dev-kernel)
Reviewed-on: https://git-master.nvidia.com/r/2025507
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0c47ce7d72
commit
e00804594b
@@ -839,6 +839,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
|
||||
int err = 0;
|
||||
u32 id = FIFO_INVAL_TSG_ID;
|
||||
unsigned int rc_type = RC_TYPE_NO_RC;
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
|
||||
if (!mmfault->valid) {
|
||||
return;
|
||||
@@ -912,14 +913,17 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
|
||||
mmfault->refch->mmu_nack_handled = true;
|
||||
}
|
||||
|
||||
rc_type = RC_TYPE_MMU_FAULT;
|
||||
if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
|
||||
tsg = tsg_gk20a_from_ch(mmfault->refch);
|
||||
if (tsg != NULL) {
|
||||
id = mmfault->refch->tsgid;
|
||||
if (id != FIFO_INVAL_TSG_ID) {
|
||||
id_type = ID_TYPE_TSG;
|
||||
}
|
||||
rc_type = RC_TYPE_MMU_FAULT;
|
||||
} else {
|
||||
nvgpu_err(g, "bare channels not supported");
|
||||
nvgpu_err(g, "chid: %d is referenceable but "
|
||||
"not bound to tsg",
|
||||
mmfault->refch->chid);
|
||||
id_type = ID_TYPE_CHANNEL;
|
||||
rc_type = RC_TYPE_NO_RC;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -326,7 +326,12 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
|
||||
*/
|
||||
if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
|
||||
/* abort channel and remove from runlist */
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
if (tsg_gk20a_from_ch(ch) != NULL) {
|
||||
/* Between tsg is not null and unbind_channel call,
|
||||
* ioctl cannot be called anymore because user doesn't
|
||||
* have an open channel fd anymore to use for the unbind
|
||||
* ioctl.
|
||||
*/
|
||||
err = gk20a_tsg_unbind_channel(ch);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
@@ -2264,7 +2269,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
|
||||
if (err) {
|
||||
goto fail_6;
|
||||
}
|
||||
|
||||
nvgpu_init_list_node(&c->ch_entry);
|
||||
nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
|
||||
|
||||
return 0;
|
||||
@@ -2403,10 +2408,9 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
|
||||
nvgpu_cond_broadcast_interruptible(
|
||||
&c->semaphore_wq);
|
||||
if (post_events) {
|
||||
if (gk20a_is_channel_marked_as_tsg(c)) {
|
||||
struct tsg_gk20a *tsg =
|
||||
&g->fifo.tsg[c->tsgid];
|
||||
|
||||
tsg_gk20a_from_ch(c);
|
||||
if (tsg != NULL) {
|
||||
g->ops.fifo.post_event_id(tsg,
|
||||
NVGPU_EVENT_ID_BLOCKING_SYNC);
|
||||
}
|
||||
|
||||
@@ -28,11 +28,6 @@
|
||||
#include <nvgpu/tsg.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
|
||||
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
|
||||
{
|
||||
return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
|
||||
}
|
||||
|
||||
int gk20a_enable_tsg(struct tsg_gk20a *tsg)
|
||||
{
|
||||
struct gk20a *g = tsg->g;
|
||||
@@ -116,7 +111,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
/* check if channel is already bound to some TSG */
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
if (tsg_gk20a_from_ch(ch) != NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -125,7 +120,6 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ch->tsgid = tsg->tsgid;
|
||||
|
||||
/* all the channel part of TSG should need to be same runlist_id */
|
||||
if (tsg->runlist_id == FIFO_INVAL_TSG_ID) {
|
||||
@@ -139,6 +133,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
|
||||
|
||||
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
|
||||
nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list);
|
||||
ch->tsgid = tsg->tsgid;
|
||||
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
|
||||
|
||||
nvgpu_ref_get(&tsg->refcount);
|
||||
@@ -172,14 +167,13 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
|
||||
|
||||
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
|
||||
nvgpu_list_del(&ch->ch_entry);
|
||||
ch->tsgid = NVGPU_INVALID_TSG_ID;
|
||||
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d",
|
||||
tsg->tsgid, ch->chid);
|
||||
|
||||
nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
|
||||
ch->tsgid = NVGPU_INVALID_TSG_ID;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n",
|
||||
tsg->tsgid, ch->chid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -395,13 +389,17 @@ void gk20a_tsg_release(struct nvgpu_ref *ref)
|
||||
struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch)
|
||||
{
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
u32 tsgid = ch->tsgid;
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
if (tsgid != NVGPU_INVALID_TSG_ID) {
|
||||
struct gk20a *g = ch->g;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
tsg = &f->tsg[ch->tsgid];
|
||||
}
|
||||
|
||||
tsg = &f->tsg[tsgid];
|
||||
} else {
|
||||
nvgpu_log(ch->g, gpu_dbg_fn, "tsgid is invalid for chid: %d",
|
||||
ch->chid);
|
||||
}
|
||||
return tsg;
|
||||
}
|
||||
|
||||
|
||||
@@ -2154,11 +2154,15 @@ int gk20a_fifo_tsg_unbind_channel_verify_status(struct channel_gk20a *ch)
|
||||
int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];
|
||||
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
|
||||
int err;
|
||||
bool tsg_timedout = false;
|
||||
|
||||
if (tsg == NULL) {
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If one channel in TSG times out, we disable all channels */
|
||||
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
|
||||
tsg_timedout = gk20a_channel_check_timedout(ch);
|
||||
@@ -2188,6 +2192,7 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
|
||||
/* Remove channel from TSG and re-enable rest of the channels */
|
||||
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
|
||||
nvgpu_list_del(&ch->ch_entry);
|
||||
ch->tsgid = NVGPU_INVALID_TSG_ID;
|
||||
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
|
||||
|
||||
/*
|
||||
@@ -3485,9 +3490,7 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
|
||||
Otherwise, keep active list untouched for suspend/resume. */
|
||||
if (chid != FIFO_INVAL_CHANNEL_ID) {
|
||||
ch = &f->channel[chid];
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
tsg = &f->tsg[ch->tsgid];
|
||||
}
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
|
||||
if (add) {
|
||||
if (test_and_set_bit(chid,
|
||||
|
||||
@@ -8077,6 +8077,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
|
||||
struct gk20a *g = ch->g;
|
||||
struct channel_gk20a *curr_ch;
|
||||
bool ret = false;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
curr_gr_ctx = gk20a_readl(g, gr_fecs_current_ctx_r());
|
||||
|
||||
@@ -8108,7 +8109,8 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
|
||||
ret = true;
|
||||
}
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch) && (ch->tsgid == curr_gr_tsgid)) {
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if ((tsg != NULL) && (tsg->tsgid == curr_gr_tsgid)) {
|
||||
ret = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
|
||||
struct channel_gk20a;
|
||||
|
||||
bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch);
|
||||
struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid);
|
||||
void gk20a_tsg_release(struct nvgpu_ref *ref);
|
||||
|
||||
|
||||
@@ -1667,6 +1667,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
|
||||
struct gk20a *g = dbg_s->g;
|
||||
struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
|
||||
int err = 0;
|
||||
struct tsg_gk20a *tsg;
|
||||
|
||||
nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
|
||||
|
||||
@@ -1709,11 +1710,11 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
|
||||
nvgpu_err(g,
|
||||
"per-ctxt reserve: global reservation in effect");
|
||||
err = -EBUSY;
|
||||
} else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
|
||||
} else if ((tsg = tsg_gk20a_from_ch(my_prof_obj->ch)) != NULL) {
|
||||
/* TSG: check that another channel in the TSG
|
||||
* doesn't already have the reservation
|
||||
*/
|
||||
u32 my_tsgid = my_prof_obj->ch->tsgid;
|
||||
u32 my_tsgid = tsg->tsgid;
|
||||
|
||||
nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
|
||||
dbg_profiler_object_data, prof_obj_entry) {
|
||||
|
||||
@@ -501,7 +501,6 @@ static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
|
||||
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
{
|
||||
struct gk20a *g = c->g;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
struct nvgpu_gr_ctx *gr_ctx = NULL;
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
int err = 0;
|
||||
@@ -522,10 +521,11 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
|
||||
}
|
||||
c->obj_class = class_num;
|
||||
|
||||
if (!gk20a_is_channel_marked_as_tsg(c))
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (tsg == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tsg = &f->tsg[c->tsgid];
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
|
||||
|
||||
Reference in New Issue
Block a user