gpu: nvgpu: replace input parameter tsgid with pointer to struct tsg_gk20a

gv11b_fifo_preempt_tsg needs to access the runlist_id of the tsg as
well as pass the tsg pointer to other public functions such as
gk20a_fifo_disable_tsg_sched. This qualifies the preempt_tsg to use a
pointer to a struct tsg_gk20a instead of just using the tsgid.

Jira NVGPU-1461

Change-Id: I01fbd2370b5746c2a597a0351e0301b0f7d25175
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1959068
(cherry picked from commit 1e78d47f15
in rel-32)
Reviewed-on: https://git-master.nvidia.com/r/2013725
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2018-11-27 11:05:56 +05:30
committed by mobile promotions
parent 5b8ecbc51f
commit ef9de9e992
8 changed files with 39 additions and 36 deletions

View File

@@ -1562,7 +1562,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
g->ops.fifo.disable_tsg(tsg);
if (preempt) {
g->ops.fifo.preempt_tsg(g, tsg->tsgid);
g->ops.fifo.preempt_tsg(g, tsg);
}
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
@@ -2194,8 +2194,8 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
/* Disable TSG and examine status before unbinding channel */
g->ops.fifo.disable_tsg(tsg);
err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
if (err) {
err = g->ops.fifo.preempt_tsg(g, tsg);
if (err != 0) {
goto fail_enable_tsg;
}
@@ -3000,7 +3000,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
return ret;
}
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
{
struct fifo_gk20a *f = &g->fifo;
u32 ret = 0;
@@ -3008,10 +3008,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
u32 mutex_ret = 0;
u32 i;
nvgpu_log_fn(g, "tsgid: %d", tsgid);
if (tsgid == FIFO_INVAL_TSG_ID) {
return 0;
}
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
/* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++) {
@@ -3020,7 +3017,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
ret = __locked_fifo_preempt(g, tsgid, true);
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
if (!mutex_ret) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3033,9 +3030,11 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
if (ret) {
if (nvgpu_platform_is_silicon(g)) {
nvgpu_err(g, "preempt timed out for tsgid: %u, "
"ctxsw timeout will trigger recovery if needed", tsgid);
"ctxsw timeout will trigger recovery if needed",
tsg->tsgid);
} else {
gk20a_fifo_preempt_timeout_rc(g, tsgid, true);
gk20a_fifo_preempt_timeout_rc(g,
tsg->tsgid, ID_TYPE_TSG);
}
}
@@ -3045,9 +3044,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
{
int err;
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
if (gk20a_is_channel_marked_as_tsg(ch)) {
err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
if (tsg != NULL) {
err = g->ops.fifo.preempt_tsg(ch->g, tsg);
} else {
err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
}

View File

@@ -231,7 +231,7 @@ void gk20a_fifo_isr(struct gk20a *g);
u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
int gk20a_fifo_enable_engine_activity(struct gk20a *g,

View File

@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/bug.h>
#include <nvgpu/semaphore.h>
#include <nvgpu/timers.h>
#include <nvgpu/log.h>
@@ -803,17 +804,22 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid)
{
struct fifo_gk20a *f = &g->fifo;
u32 tsgid;
struct tsg_gk20a *tsg = NULL;
if (chid == FIFO_INVAL_CHANNEL_ID) {
return 0;
}
tsgid = f->channel[chid].tsgid;
nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid);
tsg = tsg_gk20a_from_ch(&f->channel[chid]);
if (tsg == NULL) {
return 0;
}
nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsg->tsgid);
/* Preempt tsg. Channel preempt is NOOP */
return g->ops.fifo.preempt_tsg(g, tsgid);
return g->ops.fifo.preempt_tsg(g, tsg);
}
/* TSG enable sequence applicable for Volta and onwards */
@@ -837,7 +843,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
return 0;
}
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
{
struct fifo_gk20a *f = &g->fifo;
u32 ret = 0;
@@ -845,12 +851,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
u32 mutex_ret = 0;
u32 runlist_id;
nvgpu_log_fn(g, "tsgid: %d", tsgid);
if (tsgid == FIFO_INVAL_TSG_ID) {
return 0;
}
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
runlist_id = f->tsg[tsgid].runlist_id;
runlist_id = tsg->runlist_id;
nvgpu_log_fn(g, "runlist_id: %d", runlist_id);
if (runlist_id == FIFO_INVAL_RUNLIST_ID) {
return 0;
@@ -859,27 +862,27 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
/* WAR for Bug 2065990 */
gk20a_fifo_disable_tsg_sched(g, &f->tsg[tsgid]);
gk20a_fifo_disable_tsg_sched(g, tsg);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
ret = __locked_fifo_preempt(g, tsgid, true);
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
if (!mutex_ret) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
/* WAR for Bug 2065990 */
gk20a_fifo_enable_tsg_sched(g, &f->tsg[tsgid]);
gk20a_fifo_enable_tsg_sched(g, tsg);
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
if (ret) {
if (nvgpu_platform_is_silicon(g)) {
nvgpu_err(g, "preempt timed out for tsgid: %u, "
"ctxsw timeout will trigger recovery if needed", tsgid);
"ctxsw timeout will trigger recovery if needed", tsg->tsgid);
} else {
gk20a_fifo_preempt_timeout_rc(g, tsgid, true);
gk20a_fifo_preempt_timeout_rc(g, tsg->tsgid, true);
}
}

View File

@@ -82,7 +82,7 @@ int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next);
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type);
int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
u32 id, unsigned int id_type, unsigned int rc_type,

View File

@@ -646,7 +646,7 @@ struct gpu_ops {
u32 flags);
int (*resetup_ramfc)(struct channel_gk20a *c);
int (*preempt_channel)(struct gk20a *g, u32 chid);
int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
int (*preempt_tsg)(struct gk20a *g, struct tsg_gk20a *tsg);
int (*enable_tsg)(struct tsg_gk20a *tsg);
int (*disable_tsg)(struct tsg_gk20a *tsg);
int (*tsg_verify_channel_status)(struct channel_gk20a *ch);

View File

@@ -699,7 +699,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
return err;
}
/* preempt TSG */
err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
err = g->ops.fifo.preempt_tsg(g, tsg);
gk20a_idle(g);
break;
}

View File

@@ -471,7 +471,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
return err;
}
int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_tsg_preempt_params *p =
@@ -482,13 +482,13 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
msg.handle = vgpu_get_handle(g);
p->tsg_id = tsgid;
p->tsg_id = tsg->tsgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (err) {
nvgpu_err(g,
"preempt tsg %u failed", tsgid);
"preempt tsg %u failed", tsg->tsgid);
}
return err;

View File

@@ -42,7 +42,7 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
unsigned long acquire_timeout, u32 flags);
int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid);
int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
u32 chid, bool add, bool wait_for_finish);
int vgpu_fifo_wait_engine_idle(struct gk20a *g);