gpu: nvgpu: make sure vidmem is cleared only once

Protect the initial vidmem zeroing performed during the first userspace
alloc with a mutex, so that it blocks next concurrent users and is run
only once. Otherwise, multiple clears could end up running in parallel,
so that the next ones corrupt memory allocated by the thread that has
finished earlier and advanced to allocate and use memory.

Jira DNVGPU-84

Change-Id: If497749abf481b230835250191d011c4a9d1483b
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: http://git-master/r/1232461
(cherry picked from commit 79435a68e6d2713b78acdb0ec6f77cfd78651d7f)
Reviewed-on: http://git-master/r/1234990
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Konsta Holtta
2016-10-06 13:13:36 +03:00
committed by mobile promotions
parent 1b2529ba68
commit f5069622bb
2 changed files with 12 additions and 4 deletions

View File

@@ -947,6 +947,8 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
mm->vidmem.bootstrap_base = bootstrap_base;
mm->vidmem.bootstrap_size = bootstrap_size;
mutex_init(&mm->vidmem.first_clear_mutex);
INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker);
atomic_set(&mm->vidmem.clears_pending, 0);
INIT_LIST_HEAD(&mm->vidmem.clear_list_head);
@@ -2190,11 +2192,16 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
buf->g = g;
if (!g->mm.vidmem.cleared) {
err = gk20a_vidmem_clear_all(g);
if (err) {
gk20a_err(g->dev, "failed to clear whole vidmem");
goto err_kfree;
mutex_lock(&g->mm.vidmem.first_clear_mutex);
if (!g->mm.vidmem.cleared) {
err = gk20a_vidmem_clear_all(g);
if (err) {
gk20a_err(g->dev,
"failed to clear whole vidmem");
goto err_kfree;
}
}
mutex_unlock(&g->mm.vidmem.first_clear_mutex);
}
buf->mem = kzalloc(sizeof(struct mem_desc), GFP_KERNEL);

View File

@@ -417,6 +417,7 @@ struct mm_gk20a {
u32 ce_ctx_id;
bool cleared;
struct mutex first_clear_mutex;
struct list_head clear_list_head;
struct mutex clear_list_mutex;