gpu: nvgpu: make nvgpu_init_mutex return void

Make the nvgpu_init_mutex function return void.
In linux case, this doesn't affect anything since mutex_init
returns void.
For posix, we assert() and die if pthread_mutex_init fails.

This alleviates the need to error inject for _every_
nvgpu_mutex_init function in the driver.

Jira NVGPU-3476

Change-Id: Ibc801116dc82cdfcedcba2c352785f2640b7d54f
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2130538
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-06-04 13:38:29 -07:00
committed by mobile promotions
parent c74b89194d
commit 97762279b7
45 changed files with 81 additions and 419 deletions

View File

@@ -403,7 +403,6 @@ u32 nvgpu_ce_prepare_submit(u64 src_buf,
int nvgpu_ce_init_support(struct gk20a *g) int nvgpu_ce_init_support(struct gk20a *g)
{ {
struct nvgpu_ce_app *ce_app = g->ce_app; struct nvgpu_ce_app *ce_app = g->ce_app;
int err;
u32 ce_reset_mask; u32 ce_reset_mask;
if (unlikely(ce_app == NULL)) { if (unlikely(ce_app == NULL)) {
@@ -430,10 +429,7 @@ int nvgpu_ce_init_support(struct gk20a *g)
nvgpu_log(g, gpu_dbg_fn, "ce: init"); nvgpu_log(g, gpu_dbg_fn, "ce: init");
err = nvgpu_mutex_init(&ce_app->app_mutex); nvgpu_mutex_init(&ce_app->app_mutex);
if (err != 0) {
return err;
}
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
@@ -516,16 +512,10 @@ u32 nvgpu_ce_create_context(struct gk20a *g,
return ctx_id; return ctx_id;
} }
err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex); nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex);
if (err != 0) {
nvgpu_kfree(g, ce_ctx);
return ctx_id;
}
ce_ctx->g = g; ce_ctx->g = g;
ce_ctx->cmd_buf_read_queue_offset = 0; ce_ctx->cmd_buf_read_queue_offset = 0;
ce_ctx->vm = g->mm.ce.vm; ce_ctx->vm = g->mm.ce.vm;
/* allocate a tsg if needed */ /* allocate a tsg if needed */

View File

@@ -129,10 +129,7 @@ int gp10b_init_clk_arbiter(struct gk20a *g)
return -ENOMEM; return -ENOMEM;
} }
err = nvgpu_mutex_init(&arb->pstate_lock); nvgpu_mutex_init(&arb->pstate_lock);
if (err != 0) {
goto mutex_fail;
}
nvgpu_spinlock_init(&arb->sessions_lock); nvgpu_spinlock_init(&arb->sessions_lock);
nvgpu_spinlock_init(&arb->users_lock); nvgpu_spinlock_init(&arb->users_lock);
@@ -231,8 +228,6 @@ init_fail:
} }
nvgpu_mutex_destroy(&arb->pstate_lock); nvgpu_mutex_destroy(&arb->pstate_lock);
mutex_fail:
nvgpu_kfree(g, arb); nvgpu_kfree(g, arb);
return err; return err;

View File

@@ -153,10 +153,7 @@ int gv100_init_clk_arbiter(struct gk20a *g)
return -ENOMEM; return -ENOMEM;
} }
err = nvgpu_mutex_init(&arb->pstate_lock); nvgpu_mutex_init(&arb->pstate_lock);
if (err != 0) {
goto mutex_fail;
}
nvgpu_spinlock_init(&arb->sessions_lock); nvgpu_spinlock_init(&arb->sessions_lock);
nvgpu_spinlock_init(&arb->users_lock); nvgpu_spinlock_init(&arb->users_lock);
nvgpu_spinlock_init(&arb->requests_lock); nvgpu_spinlock_init(&arb->requests_lock);
@@ -278,8 +275,6 @@ init_fail:
} }
nvgpu_mutex_destroy(&arb->pstate_lock); nvgpu_mutex_destroy(&arb->pstate_lock);
mutex_fail:
nvgpu_kfree(g, arb); nvgpu_kfree(g, arb);
return err; return err;

View File

@@ -571,16 +571,10 @@ int nvgpu_engine_fb_queue_init(struct nvgpu_engine_fb_queue **queue_p,
queue->tail = engine_fb_queue_tail; queue->tail = engine_fb_queue_tail;
/* init mutex */ /* init mutex */
err = nvgpu_mutex_init(&queue->mutex); nvgpu_mutex_init(&queue->mutex);
if (err != 0) {
goto free_queue;
}
/* init mutex */ /* init mutex */
err = nvgpu_mutex_init(&queue->fbq.work_buffer_mutex); nvgpu_mutex_init(&queue->fbq.work_buffer_mutex);
if (err != 0) {
goto free_mutex;
}
queue->fbq.work_buffer = nvgpu_kzalloc(g, queue->fbq.element_size); queue->fbq.work_buffer = nvgpu_kzalloc(g, queue->fbq.element_size);
if (queue->fbq.work_buffer == NULL) { if (queue->fbq.work_buffer == NULL) {
@@ -599,9 +593,7 @@ int nvgpu_engine_fb_queue_init(struct nvgpu_engine_fb_queue **queue_p,
free_work_mutex: free_work_mutex:
nvgpu_mutex_destroy(&queue->fbq.work_buffer_mutex); nvgpu_mutex_destroy(&queue->fbq.work_buffer_mutex);
free_mutex:
nvgpu_mutex_destroy(&queue->mutex); nvgpu_mutex_destroy(&queue->mutex);
free_queue:
nvgpu_kfree(g, queue); nvgpu_kfree(g, queue);
return err; return err;

View File

@@ -429,10 +429,7 @@ int nvgpu_engine_mem_queue_init(struct nvgpu_engine_mem_queue **queue_p,
} }
/* init mutex */ /* init mutex */
err = nvgpu_mutex_init(&queue->mutex); nvgpu_mutex_init(&queue->mutex);
if (err != 0) {
goto exit;
}
*queue_p = queue; *queue_p = queue;
exit: exit:

View File

@@ -733,28 +733,11 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
return 0; return 0;
} }
err = nvgpu_mutex_init(&flcn->imem_lock); nvgpu_mutex_init(&flcn->imem_lock);
if (err != 0) { nvgpu_mutex_init(&flcn->dmem_lock);
nvgpu_err(g, "Error in flcn.imem_lock mutex initialization");
return err;
}
err = nvgpu_mutex_init(&flcn->dmem_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.dmem_lock mutex initialization");
nvgpu_mutex_destroy(&flcn->imem_lock);
return err;
}
if (flcn->emem_supported) { if (flcn->emem_supported) {
err = nvgpu_mutex_init(&flcn->emem_lock); nvgpu_mutex_init(&flcn->emem_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.emem_lock "
"mutex initialization");
nvgpu_mutex_destroy(&flcn->dmem_lock);
nvgpu_mutex_destroy(&flcn->imem_lock);
return err;
}
} }
return 0; return 0;

View File

@@ -2377,57 +2377,19 @@ int nvgpu_channel_init_support(struct gk20a *g, u32 chid)
nvgpu_init_list_node(&c->dbg_s_list); nvgpu_init_list_node(&c->dbg_s_list);
nvgpu_init_list_node(&c->worker_item); nvgpu_init_list_node(&c->worker_item);
err = nvgpu_mutex_init(&c->ioctl_lock); nvgpu_mutex_init(&c->ioctl_lock);
if (err != 0) { nvgpu_mutex_init(&c->joblist.cleanup_lock);
return err; nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
} nvgpu_mutex_init(&c->sync_lock);
err = nvgpu_mutex_init(&c->joblist.cleanup_lock);
if (err != 0) {
goto fail_1;
}
err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
if (err != 0) {
goto fail_2;
}
err = nvgpu_mutex_init(&c->sync_lock);
if (err != 0) {
goto fail_3;
}
#if defined(CONFIG_GK20A_CYCLE_STATS) #if defined(CONFIG_GK20A_CYCLE_STATS)
err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
if (err != 0) { nvgpu_mutex_init(&c->cs_client_mutex);
goto fail_4;
}
err = nvgpu_mutex_init(&c->cs_client_mutex);
if (err != 0) {
goto fail_5;
}
#endif #endif
err = nvgpu_mutex_init(&c->dbg_s_lock); nvgpu_mutex_init(&c->dbg_s_lock);
if (err != 0) {
goto fail_6;
}
nvgpu_init_list_node(&c->ch_entry); nvgpu_init_list_node(&c->ch_entry);
nvgpu_list_add(&c->free_chs, &g->fifo.free_chs); nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
return 0; return 0;
fail_6:
#if defined(CONFIG_GK20A_CYCLE_STATS)
nvgpu_mutex_destroy(&c->cs_client_mutex);
fail_5:
nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex);
fail_4:
#endif
nvgpu_mutex_destroy(&c->sync_lock);
fail_3:
nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock);
fail_2:
nvgpu_mutex_destroy(&c->joblist.cleanup_lock);
fail_1:
nvgpu_mutex_destroy(&c->ioctl_lock);
return err;
} }
int nvgpu_channel_setup_sw(struct gk20a *g) int nvgpu_channel_setup_sw(struct gk20a *g)
@@ -2438,11 +2400,7 @@ int nvgpu_channel_setup_sw(struct gk20a *g)
f->num_channels = g->ops.channel.count(g); f->num_channels = g->ops.channel.count(g);
err = nvgpu_mutex_init(&f->free_chs_mutex); nvgpu_mutex_init(&f->free_chs_mutex);
if (err != 0) {
nvgpu_err(g, "mutex init failed");
return err;
}
f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel)); f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
if (f->channel == NULL) { if (f->channel == NULL) {

View File

@@ -67,38 +67,6 @@ static void nvgpu_fifo_remove_support(struct nvgpu_fifo *f)
g->ops.fifo.cleanup_sw(g); g->ops.fifo.cleanup_sw(g);
} }
static int nvgpu_fifo_init_locks(struct gk20a *g, struct nvgpu_fifo *f)
{
int err;
err = nvgpu_mutex_init(&f->intr.isr.mutex);
if (err != 0) {
goto destroy_0;
}
err = nvgpu_mutex_init(&f->engines_reset_mutex);
if (err != 0) {
goto destroy_1;
}
err = nvgpu_mutex_init(&f->deferred_reset_mutex);
if (err != 0) {
goto destroy_2;
}
return 0;
destroy_2:
nvgpu_mutex_destroy(&f->engines_reset_mutex);
destroy_1:
nvgpu_mutex_destroy(&f->intr.isr.mutex);
destroy_0:
nvgpu_err(g, "failed to init mutex");
return err;
}
int nvgpu_fifo_setup_sw_common(struct gk20a *g) int nvgpu_fifo_setup_sw_common(struct gk20a *g)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -108,10 +76,9 @@ int nvgpu_fifo_setup_sw_common(struct gk20a *g)
f->g = g; f->g = g;
err = nvgpu_fifo_init_locks(g, f); nvgpu_mutex_init(&f->intr.isr.mutex);
if (err != 0) { nvgpu_mutex_init(&f->engines_reset_mutex);
nvgpu_err(g, "failed to init mutexes"); nvgpu_mutex_init(&f->deferred_reset_mutex);
}
err = nvgpu_channel_setup_sw(g); err = nvgpu_channel_setup_sw(g);
if (err != 0) { if (err != 0) {

View File

@@ -789,12 +789,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
} }
} }
err = nvgpu_mutex_init(&runlist->runlist_lock); nvgpu_mutex_init(&runlist->runlist_lock);
if (err != 0) {
nvgpu_err(g,
"Error in runlist_lock mutex initialization");
goto clean_up_runlist;
}
/* None of buffers is pinned if this value doesn't change. /* None of buffers is pinned if this value doesn't change.
Otherwise, one of them (cur_buffer) must have been pinned. */ Otherwise, one of them (cur_buffer) must have been pinned. */

View File

@@ -354,7 +354,8 @@ int nvgpu_tsg_init_support(struct gk20a *g, u32 tsgid)
nvgpu_init_list_node(&tsg->event_id_list); nvgpu_init_list_node(&tsg->event_id_list);
return nvgpu_mutex_init(&tsg->event_id_list_lock); nvgpu_mutex_init(&tsg->event_id_list_lock);
return 0;
} }
int nvgpu_tsg_setup_sw(struct gk20a *g) int nvgpu_tsg_setup_sw(struct gk20a *g)
@@ -363,11 +364,7 @@ int nvgpu_tsg_setup_sw(struct gk20a *g)
u32 tsgid, i; u32 tsgid, i;
int err; int err;
err = nvgpu_mutex_init(&f->tsg_inuse_mutex); nvgpu_mutex_init(&f->tsg_inuse_mutex);
if (err != 0) {
nvgpu_err(g, "mutex init failed");
return err;
}
f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg)); f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
if (f->tsg == NULL) { if (f->tsg == NULL) {
@@ -797,10 +794,7 @@ int nvgpu_tsg_alloc_sm_error_states_mem(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
err = nvgpu_mutex_init(&tsg->sm_exception_mask_lock); nvgpu_mutex_init(&tsg->sm_exception_mask_lock);
if (err != 0) {
return err;
}
tsg->sm_error_states = nvgpu_kzalloc(g, tsg->sm_error_states = nvgpu_kzalloc(g,
sizeof(struct nvgpu_tsg_sm_error_state) sizeof(struct nvgpu_tsg_sm_error_state)

View File

@@ -37,11 +37,7 @@ int nvgpu_userd_init_slabs(struct gk20a *g)
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
int err; int err;
err = nvgpu_mutex_init(&f->userd_mutex); nvgpu_mutex_init(&f->userd_mutex);
if (err != 0) {
nvgpu_err(g, "failed to init userd_mutex");
return err;
}
f->num_channels_per_slab = PAGE_SIZE / f->userd_entry_size; f->num_channels_per_slab = PAGE_SIZE / f->userd_entry_size;
f->num_userd_slabs = f->num_userd_slabs =

View File

@@ -133,7 +133,6 @@ void nvgpu_gr_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr,
int nvgpu_gr_fecs_trace_init(struct gk20a *g) int nvgpu_gr_fecs_trace_init(struct gk20a *g)
{ {
struct nvgpu_gr_fecs_trace *trace; struct nvgpu_gr_fecs_trace *trace;
int err;
if (!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)) { if (!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)) {
nvgpu_err(g, "invalid NUM_RECORDS chosen"); nvgpu_err(g, "invalid NUM_RECORDS chosen");
@@ -147,20 +146,9 @@ int nvgpu_gr_fecs_trace_init(struct gk20a *g)
} }
g->fecs_trace = trace; g->fecs_trace = trace;
err = nvgpu_mutex_init(&trace->poll_lock); nvgpu_mutex_init(&trace->poll_lock);
if (err != 0) { nvgpu_mutex_init(&trace->list_lock);
goto clean; nvgpu_mutex_init(&trace->enable_lock);
}
err = nvgpu_mutex_init(&trace->list_lock);
if (err != 0) {
goto clean_poll_lock;
}
err = nvgpu_mutex_init(&trace->enable_lock);
if (err != 0) {
goto clean_list_lock;
}
nvgpu_init_list_node(&trace->context_list); nvgpu_init_list_node(&trace->context_list);
@@ -169,15 +157,6 @@ int nvgpu_gr_fecs_trace_init(struct gk20a *g)
trace->enable_count = 0; trace->enable_count = 0;
return 0; return 0;
clean_list_lock:
nvgpu_mutex_destroy(&trace->list_lock);
clean_poll_lock:
nvgpu_mutex_destroy(&trace->poll_lock);
clean:
nvgpu_kfree(g, trace);
g->fecs_trace = NULL;
return err;
} }
int nvgpu_gr_fecs_trace_deinit(struct gk20a *g) int nvgpu_gr_fecs_trace_deinit(struct gk20a *g)

View File

@@ -387,11 +387,7 @@ static int gr_init_setup_sw(struct gk20a *g)
gr->g = g; gr->g = g;
err = nvgpu_mutex_init(&gr->ctxsw_disable_mutex); nvgpu_mutex_init(&gr->ctxsw_disable_mutex);
if (err != 0) {
nvgpu_err(g, "Error in ctxsw_disable_mutex init");
return err;
}
gr->ctxsw_disable_count = 0; gr->ctxsw_disable_count = 0;
err = nvgpu_gr_obj_ctx_init(g, &gr->golden_image, err = nvgpu_gr_obj_ctx_init(g, &gr->golden_image,

View File

@@ -46,7 +46,6 @@
struct nvgpu_gr_falcon *nvgpu_gr_falcon_init_support(struct gk20a *g) struct nvgpu_gr_falcon *nvgpu_gr_falcon_init_support(struct gk20a *g)
{ {
struct nvgpu_gr_falcon *falcon; struct nvgpu_gr_falcon *falcon;
int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -55,17 +54,8 @@ struct nvgpu_gr_falcon *nvgpu_gr_falcon_init_support(struct gk20a *g)
return falcon; return falcon;
} }
err = nvgpu_mutex_init(&falcon->fecs_mutex); nvgpu_mutex_init(&falcon->fecs_mutex);
if (err != 0) {
nvgpu_err(g, "Error in fecs_mutex init");
goto done;
}
done:
if (err != 0) {
nvgpu_kfree(g, falcon);
falcon = NULL;
}
return falcon; return falcon;
} }

View File

@@ -701,7 +701,6 @@ int nvgpu_gr_obj_ctx_init(struct gk20a *g,
struct nvgpu_gr_obj_ctx_golden_image **gr_golden_image, u32 size) struct nvgpu_gr_obj_ctx_golden_image **gr_golden_image, u32 size)
{ {
struct nvgpu_gr_obj_ctx_golden_image *golden_image; struct nvgpu_gr_obj_ctx_golden_image *golden_image;
int err;
golden_image = nvgpu_kzalloc(g, sizeof(*golden_image)); golden_image = nvgpu_kzalloc(g, sizeof(*golden_image));
if (golden_image == NULL) { if (golden_image == NULL) {
@@ -710,11 +709,7 @@ int nvgpu_gr_obj_ctx_init(struct gk20a *g,
nvgpu_gr_obj_ctx_set_golden_image_size(golden_image, size); nvgpu_gr_obj_ctx_set_golden_image_size(golden_image, size);
err = nvgpu_mutex_init(&golden_image->ctx_mutex); nvgpu_mutex_init(&golden_image->ctx_mutex);
if (err != 0) {
nvgpu_kfree(g, golden_image);
return err;
}
*gr_golden_image = golden_image; *gr_golden_image = golden_image;

View File

@@ -197,11 +197,7 @@ static int nvgpu_gr_zbc_load_default_table(struct gk20a *g,
u32 i = 0; u32 i = 0;
int err = 0; int err = 0;
err = nvgpu_mutex_init(&zbc->zbc_lock); nvgpu_mutex_init(&zbc->zbc_lock);
if (err != 0) {
nvgpu_err(g, "Error in zbc_lock mutex initialization");
return err;
}
/* load default color table */ /* load default color table */
zbc_val.type = NVGPU_GR_ZBC_TYPE_COLOR; zbc_val.type = NVGPU_GR_ZBC_TYPE_COLOR;

View File

@@ -148,8 +148,6 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
const char *name, void *priv, bool dbg, const char *name, void *priv, bool dbg,
const struct nvgpu_allocator_ops *ops) const struct nvgpu_allocator_ops *ops)
{ {
int err;
if (ops == NULL) { if (ops == NULL) {
return -EINVAL; return -EINVAL;
} }
@@ -163,10 +161,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
err = nvgpu_mutex_init(&a->lock); nvgpu_mutex_init(&a->lock);
if (err != 0) {
return err;
}
a->g = g; a->g = g;
a->ops = ops; a->ops = ops;

View File

@@ -74,12 +74,7 @@ int gk20a_comptag_allocator_init(struct gk20a *g,
struct gk20a_comptag_allocator *allocator, struct gk20a_comptag_allocator *allocator,
unsigned long size) unsigned long size)
{ {
int err = nvgpu_mutex_init(&allocator->lock); nvgpu_mutex_init(&allocator->lock);
if (err != 0) {
nvgpu_err(g, "Error in allocator.lock mutex initialization");
return err;
}
/* /*
* 0th comptag is special and is never used. The base for this bitmap * 0th comptag is special and is never used. The base for this bitmap

View File

@@ -94,8 +94,6 @@ int nvgpu_pd_cache_init(struct gk20a *g)
{ {
struct nvgpu_pd_cache *cache; struct nvgpu_pd_cache *cache;
u32 i; u32 i;
int err = 0;
/* /*
* This gets called from finalize_poweron() so we need to make sure we * This gets called from finalize_poweron() so we need to make sure we
@@ -118,12 +116,7 @@ int nvgpu_pd_cache_init(struct gk20a *g)
cache->mem_tree = NULL; cache->mem_tree = NULL;
err = nvgpu_mutex_init(&cache->lock); nvgpu_mutex_init(&cache->lock);
if (err != 0) {
nvgpu_err(g, "Error in cache.lock initialization");
nvgpu_kfree(g, cache);
return err;
}
g->mm.pd_cache = cache; g->mm.pd_cache = cache;

View File

@@ -489,11 +489,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
} }
mm->g = g; mm->g = g;
err = nvgpu_mutex_init(&mm->l2_op_lock); nvgpu_mutex_init(&mm->l2_op_lock);
if (err != 0) {
nvgpu_err(g, "Error in l2_op_lock mutex initialization");
return err;
}
/*TBD: make channel vm size configurable */ /*TBD: make channel vm size configurable */
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -

View File

@@ -384,26 +384,9 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0); nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
nvgpu_init_list_node(&mm->vidmem.clear_list_head); nvgpu_init_list_node(&mm->vidmem.clear_list_head);
err = nvgpu_mutex_init(&mm->vidmem.clear_list_mutex); nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
if (err != 0) { nvgpu_mutex_init(&mm->vidmem.clearing_thread_lock);
nvgpu_err(g, "nvgpu_mutex_init(list_mutex) failed err=%d", nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
err);
goto fail;
}
err = nvgpu_mutex_init(&mm->vidmem.clearing_thread_lock);
if (err != 0) {
nvgpu_err(g, "nvgpu_mutex_init(thread_lock) failed err=%d",
err);
goto fail;
}
err = nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
if (err != 0) {
nvgpu_err(g, "nvgpu_mutex_init(first_clear) failed err=%d",
err);
goto fail;
}
nvgpu_atomic_set(&mm->vidmem.pause_count, 0); nvgpu_atomic_set(&mm->vidmem.pause_count, 0);

View File

@@ -529,18 +529,8 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
vm->mapped_buffers = NULL; vm->mapped_buffers = NULL;
err = nvgpu_mutex_init(&vm->syncpt_ro_map_lock); nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
if (err != 0) { nvgpu_mutex_init(&vm->update_gmmu_lock);
nvgpu_err(g,
"Error in syncpt_ro_map_lock mutex initialization");
goto clean_up_allocators;
}
err = nvgpu_mutex_init(&vm->update_gmmu_lock);
if (err != 0) {
nvgpu_err(g, "Error in update_gmmu_lock mutex initialization");
goto clean_up_ro_map_lock;
}
nvgpu_ref_init(&vm->ref); nvgpu_ref_init(&vm->ref);
nvgpu_init_list_node(&vm->vm_area_list); nvgpu_init_list_node(&vm->vm_area_list);
@@ -561,7 +551,6 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
clean_up_gmmu_lock: clean_up_gmmu_lock:
nvgpu_mutex_destroy(&vm->update_gmmu_lock); nvgpu_mutex_destroy(&vm->update_gmmu_lock);
clean_up_ro_map_lock:
nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock); nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock);
clean_up_allocators: clean_up_allocators:
if (nvgpu_alloc_initialized(&vm->kernel)) { if (nvgpu_alloc_initialized(&vm->kernel)) {

View File

@@ -75,12 +75,7 @@ int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu,
return -ENOMEM; return -ENOMEM;
} }
err = nvgpu_mutex_init(&sequences->pmu_seq_lock); nvgpu_mutex_init(&sequences->pmu_seq_lock);
if (err != 0) {
nvgpu_kfree(g, sequences->seq);
nvgpu_kfree(g, sequences);
return err;
}
*sequences_p = sequences; *sequences_p = sequences;
exit: exit:

View File

@@ -212,10 +212,7 @@ int nvgpu_pmu_perf_pstate_sw_setup(struct gk20a *g)
nvgpu_cond_init(&g->perf_pmu->pstatesobjs.pstate_notifier_wq); nvgpu_cond_init(&g->perf_pmu->pstatesobjs.pstate_notifier_wq);
err = nvgpu_mutex_init(&g->perf_pmu->pstatesobjs.pstate_mutex); nvgpu_mutex_init(&g->perf_pmu->pstatesobjs.pstate_mutex);
if (err != 0) {
return err;
}
err = nvgpu_boardobjgrp_construct_e32(g, &g->perf_pmu->pstatesobjs.super); err = nvgpu_boardobjgrp_construct_e32(g, &g->perf_pmu->pstatesobjs.super);
if (err != 0) { if (err != 0) {

View File

@@ -998,18 +998,8 @@ int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
pg->aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US; pg->aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
pg->aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT; pg->aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
err = nvgpu_mutex_init(&pg->elpg_mutex); nvgpu_mutex_init(&pg->elpg_mutex);
if (err != 0) { nvgpu_mutex_init(&pg->pg_mutex);
nvgpu_kfree(g, pg);
goto exit;
}
err = nvgpu_mutex_init(&pg->pg_mutex);
if (err != 0) {
nvgpu_mutex_destroy(&pg->elpg_mutex);
nvgpu_kfree(g, pg);
goto exit;
}
*pg_p = pg; *pg_p = pg;

View File

@@ -322,10 +322,7 @@ int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p)
goto exit; goto exit;
} }
#ifdef NVGPU_FEATURE_LS_PMU #ifdef NVGPU_FEATURE_LS_PMU
err = nvgpu_mutex_init(&pmu->isr_mutex); nvgpu_mutex_init(&pmu->isr_mutex);
if (err != 0) {
goto init_failed;
}
/* Allocate memory for pmu_perfmon */ /* Allocate memory for pmu_perfmon */
err = nvgpu_pmu_initialize_perfmon(g, pmu, &pmu->pmu_perfmon); err = nvgpu_pmu_initialize_perfmon(g, pmu, &pmu->pmu_perfmon);

View File

@@ -29,19 +29,13 @@
int nvgpu_sec2_sequences_alloc(struct gk20a *g, int nvgpu_sec2_sequences_alloc(struct gk20a *g,
struct sec2_sequences *sequences) struct sec2_sequences *sequences)
{ {
int err;
sequences->seq = nvgpu_kzalloc(g, SEC2_MAX_NUM_SEQUENCES * sequences->seq = nvgpu_kzalloc(g, SEC2_MAX_NUM_SEQUENCES *
sizeof(struct sec2_sequence)); sizeof(struct sec2_sequence));
if (sequences->seq == NULL) { if (sequences->seq == NULL) {
return -ENOMEM; return -ENOMEM;
} }
err = nvgpu_mutex_init(&sequences->sec2_seq_lock); nvgpu_mutex_init(&sequences->sec2_seq_lock);
if (err != 0) {
nvgpu_kfree(g, sequences->seq);
return err;
}
return 0; return 0;
} }

View File

@@ -48,24 +48,15 @@ int nvgpu_init_sec2_setup_sw(struct gk20a *g, struct nvgpu_sec2 *sec2)
err = nvgpu_sec2_sequences_alloc(g, &sec2->sequences); err = nvgpu_sec2_sequences_alloc(g, &sec2->sequences);
if (err != 0) { if (err != 0) {
goto exit; return err;
} }
nvgpu_sec2_sequences_init(g, &sec2->sequences); nvgpu_sec2_sequences_init(g, &sec2->sequences);
err = nvgpu_mutex_init(&sec2->isr_mutex); nvgpu_mutex_init(&sec2->isr_mutex);
if (err != 0) {
goto free_sequences;
}
sec2->remove_support = nvgpu_remove_sec2_support; sec2->remove_support = nvgpu_remove_sec2_support;
goto exit;
free_sequences:
nvgpu_sec2_sequences_free(g, &sec2->sequences);
exit:
return err; return err;
} }

View File

@@ -50,15 +50,12 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
nvgpu_semaphore_sea_lock(sea); nvgpu_semaphore_sea_lock(sea);
ret = nvgpu_mutex_init(&p->pool_lock); nvgpu_mutex_init(&p->pool_lock);
if (ret != 0) {
goto fail;
}
ret = semaphore_bitmap_alloc(sea->pools_alloced, ret = semaphore_bitmap_alloc(sea->pools_alloced,
SEMAPHORE_POOL_COUNT); SEMAPHORE_POOL_COUNT);
if (ret < 0) { if (ret < 0) {
goto fail_alloc; goto fail;
} }
page_idx = (unsigned long)ret; page_idx = (unsigned long)ret;
@@ -78,9 +75,8 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
*pool = p; *pool = p;
return 0; return 0;
fail_alloc:
nvgpu_mutex_destroy(&p->pool_lock);
fail: fail:
nvgpu_mutex_destroy(&p->pool_lock);
nvgpu_semaphore_sea_unlock(sea); nvgpu_semaphore_sea_unlock(sea);
nvgpu_kfree(sea->gk20a, p); nvgpu_kfree(sea->gk20a, p);
gpu_sema_dbg(sea->gk20a, "Failed to allocate semaphore pool!"); gpu_sema_dbg(sea->gk20a, "Failed to allocate semaphore pool!");

View File

@@ -112,20 +112,17 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
g->sema_sea->page_count = 0; g->sema_sea->page_count = 0;
g->sema_sea->gk20a = g; g->sema_sea->gk20a = g;
nvgpu_init_list_node(&g->sema_sea->pool_list); nvgpu_init_list_node(&g->sema_sea->pool_list);
if (nvgpu_mutex_init(&g->sema_sea->sea_lock) != 0) { nvgpu_mutex_init(&g->sema_sea->sea_lock);
goto cleanup_free;
}
if (semaphore_sea_grow(g->sema_sea) != 0) { if (semaphore_sea_grow(g->sema_sea) != 0) {
goto cleanup_destroy; goto cleanup;
} }
gpu_sema_dbg(g, "Created semaphore sea!"); gpu_sema_dbg(g, "Created semaphore sea!");
return g->sema_sea; return g->sema_sea;
cleanup_destroy: cleanup:
nvgpu_mutex_destroy(&g->sema_sea->sea_lock); nvgpu_mutex_destroy(&g->sema_sea->sea_lock);
cleanup_free:
nvgpu_kfree(g, g->sema_sea); nvgpu_kfree(g, g->sema_sea);
g->sema_sea = NULL; g->sema_sea = NULL;
gpu_sema_dbg(g, "Failed to creat semaphore sea!"); gpu_sema_dbg(g, "Failed to creat semaphore sea!");

View File

@@ -256,16 +256,11 @@ int nvgpu_worker_init(struct gk20a *g, struct nvgpu_worker *worker,
nvgpu_cond_init(&worker->wq); nvgpu_cond_init(&worker->wq);
nvgpu_init_list_node(&worker->items); nvgpu_init_list_node(&worker->items);
nvgpu_spinlock_init(&worker->items_lock); nvgpu_spinlock_init(&worker->items_lock);
err = nvgpu_mutex_init(&worker->start_lock); nvgpu_mutex_init(&worker->start_lock);
worker->ops = ops; worker->ops = ops;
if (err != 0 && ops == NULL) {
goto error_check;
}
err = nvgpu_worker_start(worker); err = nvgpu_worker_start(worker);
error_check:
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to start worker poller thread %s", nvgpu_err(g, "failed to start worker poller thread %s",
worker->thread_name); worker->thread_name);

View File

@@ -1184,10 +1184,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&clk->clk_mutex); nvgpu_mutex_init(&clk->clk_mutex);
if (err != 0) {
return err;
}
if (clk->sw_ready) { if (clk->sw_ready) {
nvgpu_log_fn(g, "skip init"); nvgpu_log_fn(g, "skip init");

View File

@@ -92,14 +92,10 @@ unsigned long gv100_clk_measure_freq(struct gk20a *g, u32 api_domain)
int gv100_init_clk_support(struct gk20a *g) int gv100_init_clk_support(struct gk20a *g)
{ {
struct clk_gk20a *clk = &g->clk; struct clk_gk20a *clk = &g->clk;
int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&clk->clk_mutex); nvgpu_mutex_init(&clk->clk_mutex);
if (err != 0) {
return err;
}
clk->clk_namemap = (struct namemap_cfg *) clk->clk_namemap = (struct namemap_cfg *)
nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS); nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS);
@@ -169,7 +165,7 @@ int gv100_init_clk_support(struct gk20a *g)
clk->g = g; clk->g = g;
return err; return 0;
} }
u32 gv100_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) { u32 gv100_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) {

View File

@@ -712,11 +712,7 @@ int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&g->mm.hub_isr_mutex); nvgpu_mutex_init(&g->mm.hub_isr_mutex);
if (err != 0) {
nvgpu_err(g, "Error in hub_isr_mutex initialization");
return err;
}
err = gv11b_mm_mmu_fault_info_buf_init(g); err = gv11b_mm_mmu_fault_info_buf_init(g);

View File

@@ -30,10 +30,9 @@ struct nvgpu_raw_spinlock {
raw_spinlock_t spinlock; raw_spinlock_t spinlock;
}; };
static inline int nvgpu_mutex_init(struct nvgpu_mutex *mutex) static inline void nvgpu_mutex_init(struct nvgpu_mutex *mutex)
{ {
mutex_init(&mutex->mutex); mutex_init(&mutex->mutex);
return 0;
}; };
static inline void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex) static inline void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex)
{ {

View File

@@ -56,7 +56,7 @@ struct nvgpu_spinlock;
*/ */
struct nvgpu_raw_spinlock; struct nvgpu_raw_spinlock;
int nvgpu_mutex_init(struct nvgpu_mutex *mutex); void nvgpu_mutex_init(struct nvgpu_mutex *mutex);
void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex); void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex);
void nvgpu_mutex_release(struct nvgpu_mutex *mutex); void nvgpu_mutex_release(struct nvgpu_mutex *mutex);
int nvgpu_mutex_tryacquire(struct nvgpu_mutex *mutex); int nvgpu_mutex_tryacquire(struct nvgpu_mutex *mutex);

View File

@@ -1450,10 +1450,7 @@ __releases(&cde_app->mutex)
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
err = nvgpu_mutex_init(&cde_app->mutex); nvgpu_mutex_init(&cde_app->mutex);
if (err)
return err;
nvgpu_mutex_acquire(&cde_app->mutex); nvgpu_mutex_acquire(&cde_app->mutex);
nvgpu_init_list_node(&cde_app->free_contexts); nvgpu_init_list_node(&cde_app->free_contexts);

View File

@@ -551,7 +551,6 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
{ {
struct gk20a_ctxsw_trace *trace = g->ctxsw_trace; struct gk20a_ctxsw_trace *trace = g->ctxsw_trace;
struct gk20a_ctxsw_dev *dev = trace->devs; struct gk20a_ctxsw_dev *dev = trace->devs;
int err;
int i; int i;
for (i = 0; i < GK20A_CTXSW_TRACE_NUM_DEVS; i++) { for (i = 0; i < GK20A_CTXSW_TRACE_NUM_DEVS; i++) {
@@ -559,9 +558,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
dev->hdr = NULL; dev->hdr = NULL;
dev->write_enabled = false; dev->write_enabled = false;
nvgpu_cond_init(&dev->readout_wq); nvgpu_cond_init(&dev->readout_wq);
err = nvgpu_mutex_init(&dev->write_lock); nvgpu_mutex_init(&dev->write_lock);
if (err)
return err;
nvgpu_atomic_set(&dev->vma_ref, 0); nvgpu_atomic_set(&dev->vma_ref, 0);
dev++; dev++;
} }

View File

@@ -435,21 +435,13 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
nvgpu_cond_init(&dbg_s->dbg_events.wait_queue); nvgpu_cond_init(&dbg_s->dbg_events.wait_queue);
nvgpu_init_list_node(&dbg_s->ch_list); nvgpu_init_list_node(&dbg_s->ch_list);
err = nvgpu_mutex_init(&dbg_s->ch_list_lock); nvgpu_mutex_init(&dbg_s->ch_list_lock);
if (err) nvgpu_mutex_init(&dbg_s->ioctl_lock);
goto err_free_session;
err = nvgpu_mutex_init(&dbg_s->ioctl_lock);
if (err)
goto err_destroy_lock;
dbg_s->dbg_events.events_enabled = false; dbg_s->dbg_events.events_enabled = false;
dbg_s->dbg_events.num_pending_events = 0; dbg_s->dbg_events.num_pending_events = 0;
return 0; return 0;
err_destroy_lock:
nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
err_free_session:
nvgpu_kfree(g, dbg_session_linux);
free_ref: free_ref:
gk20a_put(g); gk20a_put(g);
return err; return err;

View File

@@ -339,9 +339,7 @@ static int gk20a_tsg_event_id_enable(struct nvgpu_tsg *tsg,
event_id_data->event_id = event_id; event_id_data->event_id = event_id;
nvgpu_cond_init(&event_id_data->event_id_wq); nvgpu_cond_init(&event_id_data->event_id_wq);
err = nvgpu_mutex_init(&event_id_data->lock); nvgpu_mutex_init(&event_id_data->lock);
if (err)
goto clean_up_free;
nvgpu_init_list_node(&event_id_data->event_id_node); nvgpu_init_list_node(&event_id_data->event_id_node);
@@ -356,8 +354,6 @@ static int gk20a_tsg_event_id_enable(struct nvgpu_tsg *tsg,
return 0; return 0;
clean_up_free:
nvgpu_kfree(g, event_id_data);
clean_up_file: clean_up_file:
fput(file); fput(file);
clean_up: clean_up:

View File

@@ -283,7 +283,6 @@ static void nvgpu_channel_close_linux(struct nvgpu_channel *ch, bool force)
static int nvgpu_channel_alloc_linux(struct gk20a *g, struct nvgpu_channel *ch) static int nvgpu_channel_alloc_linux(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct nvgpu_channel_linux *priv; struct nvgpu_channel_linux *priv;
int err;
priv = nvgpu_kzalloc(g, sizeof(*priv)); priv = nvgpu_kzalloc(g, sizeof(*priv));
if (!priv) if (!priv)
@@ -296,11 +295,7 @@ static int nvgpu_channel_alloc_linux(struct gk20a *g, struct nvgpu_channel *ch)
ch->has_os_fence_framework_support = true; ch->has_os_fence_framework_support = true;
#endif #endif
err = nvgpu_mutex_init(&priv->error_notifier.mutex); nvgpu_mutex_init(&priv->error_notifier.mutex);
if (err) {
nvgpu_kfree(g, priv);
return err;
}
nvgpu_channel_work_completion_init(ch); nvgpu_channel_work_completion_init(ch);

View File

@@ -627,28 +627,14 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
nvgpu_cond_init(&sched->readout_wq); nvgpu_cond_init(&sched->readout_wq);
err = nvgpu_mutex_init(&sched->status_lock); nvgpu_mutex_init(&sched->status_lock);
if (err) nvgpu_mutex_init(&sched->control_lock);
goto free_ref; nvgpu_mutex_init(&sched->busy_lock);
err = nvgpu_mutex_init(&sched->control_lock);
if (err)
goto free_status_lock;
err = nvgpu_mutex_init(&sched->busy_lock);
if (err)
goto free_control_lock;
sched->sw_ready = true; sched->sw_ready = true;
return 0; return 0;
free_control_lock:
nvgpu_mutex_destroy(&sched->control_lock);
free_status_lock:
nvgpu_mutex_destroy(&sched->status_lock);
free_ref:
nvgpu_kfree(g, sched->ref_tsg_bitmap);
free_recent: free_recent:
nvgpu_kfree(g, sched->recent_tsg_bitmap); nvgpu_kfree(g, sched->recent_tsg_bitmap);
free_active: free_active:

View File

@@ -36,11 +36,7 @@ int nvgpu_cond_init(struct nvgpu_cond *cond)
return ret; return ret;
} }
ret = nvgpu_mutex_init(&cond->mutex); nvgpu_mutex_init(&cond->mutex);
if (ret != 0) {
(void) pthread_condattr_destroy(&cond->attr);
return ret;
}
ret = pthread_cond_init(&cond->cond, &cond->attr); ret = pthread_cond_init(&cond->cond, &cond->attr);
if (ret != 0) { if (ret != 0) {

View File

@@ -20,11 +20,13 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/bug.h>
#include <nvgpu/lock.h> #include <nvgpu/lock.h>
int nvgpu_mutex_init(struct nvgpu_mutex *mutex) void nvgpu_mutex_init(struct nvgpu_mutex *mutex)
{ {
return pthread_mutex_init(&mutex->lock.mutex, NULL); int err = pthread_mutex_init(&mutex->lock.mutex, NULL);
nvgpu_assert(err == 0);
} }
void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex) void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex)

View File

@@ -54,12 +54,8 @@ static int test_mutex_init(struct unit_module *m, struct gk20a *g,
void *args) void *args)
{ {
struct nvgpu_mutex mutex; struct nvgpu_mutex mutex;
int err = nvgpu_mutex_init(&mutex);
if (err != 0) {
unit_return_fail(m, "mutex_init failure: %d\n", err);
}
nvgpu_mutex_init(&mutex);
nvgpu_mutex_destroy(&mutex); nvgpu_mutex_destroy(&mutex);
return UNIT_SUCCESS; return UNIT_SUCCESS;
@@ -74,9 +70,7 @@ static int test_mutex_tryacquire(struct unit_module *m, struct gk20a *g,
struct nvgpu_mutex mutex; struct nvgpu_mutex mutex;
int status; int status;
if (nvgpu_mutex_init(&mutex) != 0) { nvgpu_mutex_init(&mutex);
unit_return_fail(m, "mutex_init failure\n");
}
nvgpu_mutex_acquire(&mutex); nvgpu_mutex_acquire(&mutex);
@@ -175,9 +169,7 @@ static int test_lock_acquire_release(struct unit_module *m, struct gk20a *g,
switch (type) { switch (type) {
case TYPE_MUTEX: case TYPE_MUTEX:
if (nvgpu_mutex_init(&mutex) != 0) { nvgpu_mutex_init(&mutex);
unit_return_fail(m, "mutex_init failure\n");
}
worker_params.mutex = &mutex; worker_params.mutex = &mutex;
break; break;
case TYPE_SPINLOCK: case TYPE_SPINLOCK: