gpu: nvgpu: rename mutex to runlist_lock

Rename mutex to runlist_lock in fifo_runlist_info_gk20a
struct. This is good to have for code readability.

Bug 2065990
Bug 2043838

Change-Id: I716685e3fad538458181d2a9fe592410401862b9
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1662587
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2018-01-08 15:41:39 -08:00
committed by mobile promotions
parent ea92688965
commit bf03799977
4 changed files with 21 additions and 20 deletions

View File

@@ -522,7 +522,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
nvgpu_kfree(g, runlist->active_tsgs);
runlist->active_tsgs = NULL;
nvgpu_mutex_destroy(&runlist->mutex);
nvgpu_mutex_destroy(&runlist->runlist_lock);
}
memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
@@ -716,7 +716,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
goto clean_up_runlist;
}
}
nvgpu_mutex_init(&runlist->mutex);
nvgpu_mutex_init(&runlist->runlist_lock);
/* None of buffers is pinned if this value doesn't change.
Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -2771,7 +2771,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
/* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++)
nvgpu_mutex_acquire(&f->runlist_info[i].mutex);
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2781,7 +2781,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
for (i = 0; i < g->fifo.max_runlists; i++)
nvgpu_mutex_release(&f->runlist_info[i].mutex);
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
return ret;
}
@@ -2798,7 +2798,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
/* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++)
nvgpu_mutex_acquire(&f->runlist_info[i].mutex);
nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2808,7 +2808,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
for (i = 0; i < g->fifo.max_runlists; i++)
nvgpu_mutex_release(&f->runlist_info[i].mutex);
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
return ret;
}
@@ -3385,7 +3385,7 @@ int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id)
int ret = 0;
runlist = &g->fifo.runlist_info[runlist_id];
if (nvgpu_mutex_tryacquire(&runlist->mutex)) {
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock)) {
mutex_ret = nvgpu_pmu_mutex_acquire(
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3396,7 +3396,7 @@ int gk20a_fifo_reschedule_runlist(struct gk20a *g, u32 runlist_id)
if (!mutex_ret)
nvgpu_pmu_mutex_release(
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
nvgpu_mutex_release(&runlist->mutex);
nvgpu_mutex_release(&runlist->runlist_lock);
} else {
/* someone else is writing fifo_runlist_r so not needed here */
ret = -EBUSY;
@@ -3421,7 +3421,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
runlist = &f->runlist_info[runlist_id];
nvgpu_mutex_acquire(&runlist->mutex);
nvgpu_mutex_acquire(&runlist->runlist_lock);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -3431,7 +3431,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
if (!mutex_ret)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
nvgpu_mutex_release(&runlist->mutex);
nvgpu_mutex_release(&runlist->runlist_lock);
return ret;
}

View File

@@ -91,7 +91,8 @@ struct fifo_runlist_info_gk20a {
u32 reset_eng_bitmask; /* engines to be reset during recovery */
bool stopped;
bool support_tsg;
struct nvgpu_mutex mutex; /* protect channel preempt and runlist update */
/* protect ch/tsg/runlist preempt & runlist update */
struct nvgpu_mutex runlist_lock;
};
enum {

View File

@@ -817,7 +817,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
runlist_id = f->tsg[tsgid].runlist_id;
gk20a_dbg_fn("runlist_id %d", runlist_id);
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -826,7 +826,7 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
if (!mutex_ret)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
return ret;
}
@@ -844,7 +844,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
nvgpu_mutex_acquire(&g->fifo.
runlist_info[runlist_id].mutex);
runlist_info[runlist_id].runlist_lock);
}
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -861,7 +861,7 @@ static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
g->fifo.runlist_info[runlist_id].reset_eng_bitmask =
g->fifo.runlist_info[runlist_id].eng_bitmask;
nvgpu_mutex_release(&g->fifo.
runlist_info[runlist_id].mutex);
runlist_info[runlist_id].runlist_lock);
}
}
@@ -916,7 +916,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id);
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -925,7 +925,7 @@ int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
if (!mutex_ret)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
return ret;

View File

@@ -249,7 +249,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
goto clean_up_runlist;
}
}
nvgpu_mutex_init(&runlist->mutex);
nvgpu_mutex_init(&runlist->runlist_lock);
/* None of buffers is pinned if this value doesn't change.
Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -585,12 +585,12 @@ int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
runlist = &f->runlist_info[runlist_id];
nvgpu_mutex_acquire(&runlist->mutex);
nvgpu_mutex_acquire(&runlist->runlist_lock);
ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
wait_for_finish);
nvgpu_mutex_release(&runlist->mutex);
nvgpu_mutex_release(&runlist->runlist_lock);
return ret;
}