mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: make jobs_lock more fine grained
While processing all the jobs in gk20a_channel_clean_up_jobs(), We currently acquire jobs_lock, traverse the list, clean up the jobs, and then release the lock But in this case we might hold the lock for too long blocking the submit path Hence make jobs_lock more fine grained by restricting it for list accesses only Bug 200187553 Change-Id: If82af8ff386f7bc29061cfd57fdda7df62f11c17 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1120412 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
Terje Bergstrom
parent
c8d17e9167
commit
2ac8c9729a
@@ -1862,23 +1862,33 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work)
|
|||||||
struct channel_gk20a *c = container_of(to_delayed_work(work),
|
struct channel_gk20a *c = container_of(to_delayed_work(work),
|
||||||
struct channel_gk20a, clean_up.wq);
|
struct channel_gk20a, clean_up.wq);
|
||||||
struct vm_gk20a *vm;
|
struct vm_gk20a *vm;
|
||||||
struct channel_gk20a_job *job, *n;
|
struct channel_gk20a_job *job;
|
||||||
struct gk20a_platform *platform;
|
struct gk20a_platform *platform;
|
||||||
|
struct gk20a *g;
|
||||||
|
|
||||||
c = gk20a_channel_get(c);
|
c = gk20a_channel_get(c);
|
||||||
if (!c)
|
if (!c)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vm = c->vm;
|
vm = c->vm;
|
||||||
platform = gk20a_get_platform(c->g->dev);
|
g = c->g;
|
||||||
|
platform = gk20a_get_platform(g->dev);
|
||||||
|
|
||||||
gk20a_channel_cancel_job_clean_up(c, false);
|
gk20a_channel_cancel_job_clean_up(c, false);
|
||||||
|
|
||||||
mutex_lock(&c->jobs_lock);
|
while (1) {
|
||||||
list_for_each_entry_safe(job, n, &c->jobs, list) {
|
bool completed;
|
||||||
struct gk20a *g = c->g;
|
|
||||||
|
|
||||||
bool completed = gk20a_fence_is_expired(job->post_fence);
|
mutex_lock(&c->jobs_lock);
|
||||||
|
if (list_empty(&c->jobs)) {
|
||||||
|
mutex_unlock(&c->jobs_lock);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
job = list_first_entry(&c->jobs,
|
||||||
|
struct channel_gk20a_job, list);
|
||||||
|
mutex_unlock(&c->jobs_lock);
|
||||||
|
|
||||||
|
completed = gk20a_fence_is_expired(job->post_fence);
|
||||||
if (!completed) {
|
if (!completed) {
|
||||||
gk20a_channel_timeout_start(c, job);
|
gk20a_channel_timeout_start(c, job);
|
||||||
break;
|
break;
|
||||||
@@ -1919,13 +1929,15 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work)
|
|||||||
* so this wouldn't get freed here. */
|
* so this wouldn't get freed here. */
|
||||||
gk20a_channel_put(c);
|
gk20a_channel_put(c);
|
||||||
|
|
||||||
|
mutex_lock(&c->jobs_lock);
|
||||||
list_del_init(&job->list);
|
list_del_init(&job->list);
|
||||||
|
mutex_unlock(&c->jobs_lock);
|
||||||
|
|
||||||
kfree(job);
|
kfree(job);
|
||||||
|
|
||||||
gk20a_idle(g->dev);
|
gk20a_idle(g->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&c->jobs_lock);
|
|
||||||
|
|
||||||
if (c->update_fn)
|
if (c->update_fn)
|
||||||
schedule_work(&c->update_fn_work);
|
schedule_work(&c->update_fn_work);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user