gpu: nvgpu: Remove last Linux kmem usage

Replace the last of the Linux kmem API usage with nvgpu kmem
calls instead. Several places are left alone - allocating the
struct gk20a in particular.

Also one function was updated in the clk code to take a struct
gk20a as an argument so that it could use nvgpu_kmalloc().

Bug 1799159
Bug 1823380

Change-Id: I84fc3f8e19c63d6265bac6098dc727d93e3ff613
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1331702
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-03-30 21:16:51 +01:00
committed by mobile promotions
parent e3bd4ae2a5
commit c86e940e11
13 changed files with 32 additions and 29 deletions

View File

@@ -472,12 +472,12 @@ void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g)
destroy_workqueue(arb->update_work_queue);
arb->update_work_queue = NULL;
kfree(arb->gpc2clk_f_points);
kfree(arb->mclk_f_points);
nvgpu_kfree(g, arb->gpc2clk_f_points);
nvgpu_kfree(g, arb->mclk_f_points);
for (index = 0; index < 2; index++) {
kfree(arb->vf_table_pool[index].gpc2clk_points);
kfree(arb->vf_table_pool[index].mclk_points);
nvgpu_kfree(g, arb->vf_table_pool[index].gpc2clk_points);
nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
}
}

View File

@@ -712,7 +712,7 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
return 0;
clean_up_free:
kfree(event_id_data);
nvgpu_kfree(g, event_id_data);
clean_up_file:
fput(file);
clean_up:

View File

@@ -161,7 +161,7 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
return 0;
clean_up_free:
kfree(event_id_data);
nvgpu_kfree(g, event_id_data);
clean_up_file:
fput(file);
clean_up:

View File

@@ -106,7 +106,7 @@ static void nvgpu_lockless_alloc_destroy(struct nvgpu_allocator *a)
nvgpu_fini_alloc_debug(a);
vfree(pa->next);
nvgpu_vfree(a->g, pa->next);
nvgpu_kfree(nvgpu_alloc_to_gpu(a), pa);
}

View File

@@ -74,7 +74,7 @@ void gk20a_semaphore_sea_destroy(struct gk20a *g)
return;
nvgpu_mutex_destroy(&g->sema_sea->sea_lock);
kfree(g->sema_sea);
nvgpu_kfree(g, g->sema_sea);
g->sema_sea = NULL;
}

View File

@@ -436,7 +436,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex);
if (err) {
kfree(ce_ctx);
nvgpu_kfree(g, ce_ctx);
return ctx_id;
}

View File

@@ -85,14 +85,15 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a **_dbg_s)
return 0;
}
static int alloc_profiler(struct dbg_profiler_object_data **_prof)
static int alloc_profiler(struct gk20a *g,
struct dbg_profiler_object_data **_prof)
{
struct dbg_profiler_object_data *prof;
*_prof = NULL;
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
prof = kzalloc(sizeof(*prof), GFP_KERNEL);
prof = nvgpu_kzalloc(g, sizeof(*prof));
if (!prof)
return -ENOMEM;
@@ -152,7 +153,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
err_destroy_lock:
nvgpu_mutex_destroy(&dbg_session->ch_list_lock);
err_free_session:
kfree(dbg_session);
nvgpu_kfree(g, dbg_session);
free_ref:
gk20a_put(g);
return err;
@@ -435,7 +436,7 @@ int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
release_profiler_reservation(dbg_s, prof_obj);
}
list_del(&prof_obj->prof_obj_entry);
kfree(prof_obj);
nvgpu_kfree(g, prof_obj);
}
}
@@ -537,7 +538,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
g->ops.dbg_session_ops.
release_profiler_reservation(dbg_s, prof_obj);
list_del(&prof_obj->prof_obj_entry);
kfree(prof_obj);
nvgpu_kfree(g, prof_obj);
}
}
nvgpu_mutex_release(&g->dbg_sessions_lock);
@@ -1527,7 +1528,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = alloc_profiler(&prof_obj);
err = alloc_profiler(g, &prof_obj);
if (err)
goto clean_up;
@@ -1540,7 +1541,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
if (prof_obj->ch == NULL) {
gk20a_err(dev_from_gk20a(g),
"bind a channel for dbg session");
kfree(prof_obj);
nvgpu_kfree(g, prof_obj);
err = -EINVAL;
goto clean_up;
}
@@ -1586,7 +1587,7 @@ static int nvgpu_ioctl_free_profiler_object(
g->ops.dbg_session_ops.
release_profiler_reservation(dbg_s, prof_obj);
list_del(&prof_obj->prof_obj_entry);
kfree(prof_obj);
nvgpu_kfree(g, prof_obj);
obj_found = true;
break;
}

View File

@@ -61,8 +61,7 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
u32 chid;
struct ch_state **ch_state;
ch_state = kzalloc(sizeof(*ch_state)
* f->num_channels, GFP_KERNEL);
ch_state = nvgpu_kzalloc(g, sizeof(*ch_state) * f->num_channels);
if (!ch_state) {
gk20a_debug_output(o, "cannot alloc memory for channels\n");
return;

View File

@@ -3489,8 +3489,8 @@ static void __gk20a_fifo_profile_free(struct kref *ref)
{
struct fifo_gk20a *f = container_of(ref, struct fifo_gk20a,
profile.ref);
vfree(f->profile.data);
vfree(f->profile.sorted);
nvgpu_vfree(f->g, f->profile.data);
nvgpu_vfree(f->g, f->profile.sorted);
}
static int gk20a_fifo_profile_enable(void *data, u64 val)
@@ -3518,8 +3518,8 @@ static int gk20a_fifo_profile_enable(void *data, u64 val)
FIFO_PROFILING_ENTRIES *
sizeof(u64));
if (!(f->profile.data && f->profile.sorted)) {
vfree(f->profile.data);
vfree(f->profile.sorted);
nvgpu_vfree(g, f->profile.data);
nvgpu_vfree(g, f->profile.sorted);
nvgpu_mutex_release(&f->profile.lock);
return -ENOMEM;
}

View File

@@ -3343,13 +3343,16 @@ int gk20a_comptag_allocator_init(struct gk20a_comptag_allocator *allocator,
void gk20a_comptag_allocator_destroy(struct gk20a_comptag_allocator *allocator)
{
struct gr_gk20a *gr = container_of(allocator,
struct gr_gk20a, comp_tags);
/*
* called only when exiting the driver (gk20a_remove, or unwinding the
* init stage); no users should be active, so taking the mutex is
* unnecessary here.
*/
allocator->size = 0;
vfree(allocator->bitmap);
nvgpu_vfree(gr->g, allocator->bitmap);
}
static void gk20a_remove_gr_support(struct gr_gk20a *gr)
@@ -3419,7 +3422,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ltc.l);
nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_fbpa.l);
vfree(gr->ctx_vars.local_golden_image);
nvgpu_vfree(g, gr->ctx_vars.local_golden_image);
gr->ctx_vars.local_golden_image = NULL;
if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map)

View File

@@ -4188,7 +4188,7 @@ static int init_vm_page_tables(struct vm_gk20a *vm)
err = gk20a_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0],
&vm->pdb, NULL);
if (err) {
vfree(vm->pdb.entries);
nvgpu_vfree(vm->mm->g, vm->pdb.entries);
return err;
}

View File

@@ -260,7 +260,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g)
if (g->ops.fifo.tsg_open) {
err = g->ops.fifo.tsg_open(tsg);
if (err) {
gk20a_err(dev_from_gk20a(g),
gk20a_err(dev_from_gk20a(g),
"tsg %d fifo open failed %d",
tsg->tsgid, err);
goto clean_up;

View File

@@ -1126,7 +1126,7 @@ static int vgpu_gr_suspend_resume_contexts(struct gk20a *g,
size_in = size_out + n * sizeof(u16);
msg = kmalloc(size_in, GFP_KERNEL);
msg = nvgpu_kmalloc(g, size_in);
if (!msg)
return -ENOMEM;
@@ -1159,7 +1159,7 @@ fail:
nvgpu_mutex_release(&g->dbg_sessions_lock);
*ctx_resident_ch_fd = channel_fd;
kfree(msg);
nvgpu_kfree(g, msg);
return err;
}