gpu: nvgpu: defer channel worker initialization

kthread_run can fail if SIGKILL is triggered on an application during
driver load.

On this change we defer the channel worker init to the enqueue to avoid
this condition during driver power on which would cause the driver state to be
corrupted leaving subsequent attempts to load the driver unsuccesful.

By moving this code to a later time, it is now needed to protect the task
structure with a mutex.

JIRA: EVLR-956
Bug 1816515

Change-Id: I3a159de2d1f03e70b2a3969730a927532ede2d6e
Signed-off-by: David Nieto <dmartineznie@nvidia.com>
Reviewed-on: http://git-master/r/1462490
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Vladislav Buzov <vbuzov@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1460689
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
David Nieto
2017-04-06 15:46:36 -07:00
committed by mobile promotions
parent 90568a2ce5
commit 7eabc16b84
2 changed files with 51 additions and 6 deletions

View File

@@ -1878,34 +1878,70 @@ static int gk20a_channel_poll_worker(void *arg)
return 0;
}
static int __nvgpu_channel_worker_start(struct gk20a *g)
{
char thread_name[64];
int err = 0;
if (nvgpu_thread_is_running(&g->channel_worker.poll_task))
return err;
nvgpu_mutex_acquire(&g->channel_worker.start_lock);
/*
* We don't want to grab a mutex on every channel update so we check
* again if the worker has been initialized before creating a new thread
*/
/*
* Mutexes have implicit barriers, so there is no risk of a thread
* having a stale copy of the poll_task variable as the call to
* thread_is_running is volatile
*/
if (nvgpu_thread_is_running(&g->channel_worker.poll_task)) {
nvgpu_mutex_release(&g->channel_worker.start_lock);
return err;
}
snprintf(thread_name, sizeof(thread_name),
"nvgpu_channel_poll_%s", g->name);
err = nvgpu_thread_create(&g->channel_worker.poll_task, g,
gk20a_channel_poll_worker, thread_name);
nvgpu_mutex_release(&g->channel_worker.start_lock);
return err;
}
/**
* Initialize the channel worker's metadata and start the background thread.
*/
int nvgpu_channel_worker_init(struct gk20a *g)
{
int err;
char thread_name[64];
nvgpu_atomic_set(&g->channel_worker.put, 0);
nvgpu_cond_init(&g->channel_worker.wq);
nvgpu_init_list_node(&g->channel_worker.items);
nvgpu_spinlock_init(&g->channel_worker.items_lock);
snprintf(thread_name, sizeof(thread_name),
"nvgpu_channel_poll_%s", g->name);
err = nvgpu_mutex_init(&g->channel_worker.start_lock);
if (err)
goto error_check;
err = nvgpu_thread_create(&g->channel_worker.poll_task, g,
gk20a_channel_poll_worker, thread_name);
err = __nvgpu_channel_worker_start(g);
error_check:
if (err) {
nvgpu_err(g, "failed to start channel poller thread");
return err;
}
return 0;
}
void nvgpu_channel_worker_deinit(struct gk20a *g)
{
nvgpu_mutex_acquire(&g->channel_worker.start_lock);
nvgpu_thread_stop(&g->channel_worker.poll_task);
nvgpu_mutex_release(&g->channel_worker.start_lock);
}
/**
@@ -1923,6 +1959,14 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
gk20a_dbg_fn("");
/*
* Warn if worker thread cannot run
*/
if (WARN_ON(__nvgpu_channel_worker_start(g))) {
nvgpu_warn(g, "channel worker cannot run!");
return;
}
/*
* Ref released when this item gets processed. The caller should hold
* one ref already, so normally shouldn't fail, but the channel could

View File

@@ -1215,6 +1215,7 @@ struct gk20a {
struct nvgpu_cond wq;
struct nvgpu_list_node items;
struct nvgpu_spinlock items_lock;
struct nvgpu_mutex start_lock;
} channel_worker;
struct gk20a_scale_profile *scale_profile;