mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: make nvgpu_init_mutex return void
Make the nvgpu_init_mutex function return void. In linux case, this doesn't affect anything since mutex_init returns void. For posix, we assert() and die if pthread_mutex_init fails. This alleviates the need to error inject for _every_ nvgpu_mutex_init function in the driver. Jira NVGPU-3476 Change-Id: Ibc801116dc82cdfcedcba2c352785f2640b7d54f Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2130538 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
c74b89194d
commit
97762279b7
@@ -403,7 +403,6 @@ u32 nvgpu_ce_prepare_submit(u64 src_buf,
|
||||
int nvgpu_ce_init_support(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_ce_app *ce_app = g->ce_app;
|
||||
int err;
|
||||
u32 ce_reset_mask;
|
||||
|
||||
if (unlikely(ce_app == NULL)) {
|
||||
@@ -430,10 +429,7 @@ int nvgpu_ce_init_support(struct gk20a *g)
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn, "ce: init");
|
||||
|
||||
err = nvgpu_mutex_init(&ce_app->app_mutex);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&ce_app->app_mutex);
|
||||
|
||||
nvgpu_mutex_acquire(&ce_app->app_mutex);
|
||||
|
||||
@@ -516,16 +512,10 @@ u32 nvgpu_ce_create_context(struct gk20a *g,
|
||||
return ctx_id;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_kfree(g, ce_ctx);
|
||||
return ctx_id;
|
||||
}
|
||||
nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex);
|
||||
|
||||
ce_ctx->g = g;
|
||||
|
||||
ce_ctx->cmd_buf_read_queue_offset = 0;
|
||||
|
||||
ce_ctx->vm = g->mm.ce.vm;
|
||||
|
||||
/* allocate a tsg if needed */
|
||||
|
||||
@@ -129,10 +129,7 @@ int gp10b_init_clk_arbiter(struct gk20a *g)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&arb->pstate_lock);
|
||||
if (err != 0) {
|
||||
goto mutex_fail;
|
||||
}
|
||||
nvgpu_mutex_init(&arb->pstate_lock);
|
||||
|
||||
nvgpu_spinlock_init(&arb->sessions_lock);
|
||||
nvgpu_spinlock_init(&arb->users_lock);
|
||||
@@ -231,8 +228,6 @@ init_fail:
|
||||
}
|
||||
|
||||
nvgpu_mutex_destroy(&arb->pstate_lock);
|
||||
|
||||
mutex_fail:
|
||||
nvgpu_kfree(g, arb);
|
||||
|
||||
return err;
|
||||
|
||||
@@ -153,10 +153,7 @@ int gv100_init_clk_arbiter(struct gk20a *g)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&arb->pstate_lock);
|
||||
if (err != 0) {
|
||||
goto mutex_fail;
|
||||
}
|
||||
nvgpu_mutex_init(&arb->pstate_lock);
|
||||
nvgpu_spinlock_init(&arb->sessions_lock);
|
||||
nvgpu_spinlock_init(&arb->users_lock);
|
||||
nvgpu_spinlock_init(&arb->requests_lock);
|
||||
@@ -278,8 +275,6 @@ init_fail:
|
||||
}
|
||||
|
||||
nvgpu_mutex_destroy(&arb->pstate_lock);
|
||||
|
||||
mutex_fail:
|
||||
nvgpu_kfree(g, arb);
|
||||
|
||||
return err;
|
||||
|
||||
@@ -571,16 +571,10 @@ int nvgpu_engine_fb_queue_init(struct nvgpu_engine_fb_queue **queue_p,
|
||||
queue->tail = engine_fb_queue_tail;
|
||||
|
||||
/* init mutex */
|
||||
err = nvgpu_mutex_init(&queue->mutex);
|
||||
if (err != 0) {
|
||||
goto free_queue;
|
||||
}
|
||||
nvgpu_mutex_init(&queue->mutex);
|
||||
|
||||
/* init mutex */
|
||||
err = nvgpu_mutex_init(&queue->fbq.work_buffer_mutex);
|
||||
if (err != 0) {
|
||||
goto free_mutex;
|
||||
}
|
||||
nvgpu_mutex_init(&queue->fbq.work_buffer_mutex);
|
||||
|
||||
queue->fbq.work_buffer = nvgpu_kzalloc(g, queue->fbq.element_size);
|
||||
if (queue->fbq.work_buffer == NULL) {
|
||||
@@ -599,9 +593,7 @@ int nvgpu_engine_fb_queue_init(struct nvgpu_engine_fb_queue **queue_p,
|
||||
|
||||
free_work_mutex:
|
||||
nvgpu_mutex_destroy(&queue->fbq.work_buffer_mutex);
|
||||
free_mutex:
|
||||
nvgpu_mutex_destroy(&queue->mutex);
|
||||
free_queue:
|
||||
nvgpu_kfree(g, queue);
|
||||
|
||||
return err;
|
||||
|
||||
@@ -429,10 +429,7 @@ int nvgpu_engine_mem_queue_init(struct nvgpu_engine_mem_queue **queue_p,
|
||||
}
|
||||
|
||||
/* init mutex */
|
||||
err = nvgpu_mutex_init(&queue->mutex);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
nvgpu_mutex_init(&queue->mutex);
|
||||
|
||||
*queue_p = queue;
|
||||
exit:
|
||||
|
||||
@@ -733,28 +733,11 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&flcn->imem_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in flcn.imem_lock mutex initialization");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&flcn->dmem_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in flcn.dmem_lock mutex initialization");
|
||||
nvgpu_mutex_destroy(&flcn->imem_lock);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&flcn->imem_lock);
|
||||
nvgpu_mutex_init(&flcn->dmem_lock);
|
||||
|
||||
if (flcn->emem_supported) {
|
||||
err = nvgpu_mutex_init(&flcn->emem_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in flcn.emem_lock "
|
||||
"mutex initialization");
|
||||
nvgpu_mutex_destroy(&flcn->dmem_lock);
|
||||
nvgpu_mutex_destroy(&flcn->imem_lock);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&flcn->emem_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2377,57 +2377,19 @@ int nvgpu_channel_init_support(struct gk20a *g, u32 chid)
|
||||
nvgpu_init_list_node(&c->dbg_s_list);
|
||||
nvgpu_init_list_node(&c->worker_item);
|
||||
|
||||
err = nvgpu_mutex_init(&c->ioctl_lock);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
err = nvgpu_mutex_init(&c->joblist.cleanup_lock);
|
||||
if (err != 0) {
|
||||
goto fail_1;
|
||||
}
|
||||
err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
|
||||
if (err != 0) {
|
||||
goto fail_2;
|
||||
}
|
||||
err = nvgpu_mutex_init(&c->sync_lock);
|
||||
if (err != 0) {
|
||||
goto fail_3;
|
||||
}
|
||||
nvgpu_mutex_init(&c->ioctl_lock);
|
||||
nvgpu_mutex_init(&c->joblist.cleanup_lock);
|
||||
nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock);
|
||||
nvgpu_mutex_init(&c->sync_lock);
|
||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||
err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
|
||||
if (err != 0) {
|
||||
goto fail_4;
|
||||
}
|
||||
err = nvgpu_mutex_init(&c->cs_client_mutex);
|
||||
if (err != 0) {
|
||||
goto fail_5;
|
||||
}
|
||||
nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex);
|
||||
nvgpu_mutex_init(&c->cs_client_mutex);
|
||||
#endif
|
||||
err = nvgpu_mutex_init(&c->dbg_s_lock);
|
||||
if (err != 0) {
|
||||
goto fail_6;
|
||||
}
|
||||
nvgpu_mutex_init(&c->dbg_s_lock);
|
||||
nvgpu_init_list_node(&c->ch_entry);
|
||||
nvgpu_list_add(&c->free_chs, &g->fifo.free_chs);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_6:
|
||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||
nvgpu_mutex_destroy(&c->cs_client_mutex);
|
||||
fail_5:
|
||||
nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex);
|
||||
fail_4:
|
||||
#endif
|
||||
nvgpu_mutex_destroy(&c->sync_lock);
|
||||
fail_3:
|
||||
nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock);
|
||||
fail_2:
|
||||
nvgpu_mutex_destroy(&c->joblist.cleanup_lock);
|
||||
fail_1:
|
||||
nvgpu_mutex_destroy(&c->ioctl_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_channel_setup_sw(struct gk20a *g)
|
||||
@@ -2438,11 +2400,7 @@ int nvgpu_channel_setup_sw(struct gk20a *g)
|
||||
|
||||
f->num_channels = g->ops.channel.count(g);
|
||||
|
||||
err = nvgpu_mutex_init(&f->free_chs_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "mutex init failed");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&f->free_chs_mutex);
|
||||
|
||||
f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
|
||||
if (f->channel == NULL) {
|
||||
|
||||
@@ -67,38 +67,6 @@ static void nvgpu_fifo_remove_support(struct nvgpu_fifo *f)
|
||||
g->ops.fifo.cleanup_sw(g);
|
||||
}
|
||||
|
||||
static int nvgpu_fifo_init_locks(struct gk20a *g, struct nvgpu_fifo *f)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = nvgpu_mutex_init(&f->intr.isr.mutex);
|
||||
if (err != 0) {
|
||||
goto destroy_0;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&f->engines_reset_mutex);
|
||||
if (err != 0) {
|
||||
goto destroy_1;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&f->deferred_reset_mutex);
|
||||
if (err != 0) {
|
||||
goto destroy_2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_2:
|
||||
nvgpu_mutex_destroy(&f->engines_reset_mutex);
|
||||
|
||||
destroy_1:
|
||||
nvgpu_mutex_destroy(&f->intr.isr.mutex);
|
||||
|
||||
destroy_0:
|
||||
nvgpu_err(g, "failed to init mutex");
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_fifo_setup_sw_common(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
@@ -108,10 +76,9 @@ int nvgpu_fifo_setup_sw_common(struct gk20a *g)
|
||||
|
||||
f->g = g;
|
||||
|
||||
err = nvgpu_fifo_init_locks(g, f);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init mutexes");
|
||||
}
|
||||
nvgpu_mutex_init(&f->intr.isr.mutex);
|
||||
nvgpu_mutex_init(&f->engines_reset_mutex);
|
||||
nvgpu_mutex_init(&f->deferred_reset_mutex);
|
||||
|
||||
err = nvgpu_channel_setup_sw(g);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -789,12 +789,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
}
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&runlist->runlist_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
"Error in runlist_lock mutex initialization");
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
nvgpu_mutex_init(&runlist->runlist_lock);
|
||||
|
||||
/* None of buffers is pinned if this value doesn't change.
|
||||
Otherwise, one of them (cur_buffer) must have been pinned. */
|
||||
|
||||
@@ -354,7 +354,8 @@ int nvgpu_tsg_init_support(struct gk20a *g, u32 tsgid)
|
||||
|
||||
nvgpu_init_list_node(&tsg->event_id_list);
|
||||
|
||||
return nvgpu_mutex_init(&tsg->event_id_list_lock);
|
||||
nvgpu_mutex_init(&tsg->event_id_list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_tsg_setup_sw(struct gk20a *g)
|
||||
@@ -363,11 +364,7 @@ int nvgpu_tsg_setup_sw(struct gk20a *g)
|
||||
u32 tsgid, i;
|
||||
int err;
|
||||
|
||||
err = nvgpu_mutex_init(&f->tsg_inuse_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "mutex init failed");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&f->tsg_inuse_mutex);
|
||||
|
||||
f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
|
||||
if (f->tsg == NULL) {
|
||||
@@ -797,10 +794,7 @@ int nvgpu_tsg_alloc_sm_error_states_mem(struct gk20a *g,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&tsg->sm_exception_mask_lock);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&tsg->sm_exception_mask_lock);
|
||||
|
||||
tsg->sm_error_states = nvgpu_kzalloc(g,
|
||||
sizeof(struct nvgpu_tsg_sm_error_state)
|
||||
|
||||
@@ -37,11 +37,7 @@ int nvgpu_userd_init_slabs(struct gk20a *g)
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
int err;
|
||||
|
||||
err = nvgpu_mutex_init(&f->userd_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init userd_mutex");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&f->userd_mutex);
|
||||
|
||||
f->num_channels_per_slab = PAGE_SIZE / f->userd_entry_size;
|
||||
f->num_userd_slabs =
|
||||
|
||||
@@ -133,7 +133,6 @@ void nvgpu_gr_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr,
|
||||
int nvgpu_gr_fecs_trace_init(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_gr_fecs_trace *trace;
|
||||
int err;
|
||||
|
||||
if (!is_power_of_2(GK20A_FECS_TRACE_NUM_RECORDS)) {
|
||||
nvgpu_err(g, "invalid NUM_RECORDS chosen");
|
||||
@@ -147,20 +146,9 @@ int nvgpu_gr_fecs_trace_init(struct gk20a *g)
|
||||
}
|
||||
g->fecs_trace = trace;
|
||||
|
||||
err = nvgpu_mutex_init(&trace->poll_lock);
|
||||
if (err != 0) {
|
||||
goto clean;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&trace->list_lock);
|
||||
if (err != 0) {
|
||||
goto clean_poll_lock;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&trace->enable_lock);
|
||||
if (err != 0) {
|
||||
goto clean_list_lock;
|
||||
}
|
||||
nvgpu_mutex_init(&trace->poll_lock);
|
||||
nvgpu_mutex_init(&trace->list_lock);
|
||||
nvgpu_mutex_init(&trace->enable_lock);
|
||||
|
||||
nvgpu_init_list_node(&trace->context_list);
|
||||
|
||||
@@ -169,15 +157,6 @@ int nvgpu_gr_fecs_trace_init(struct gk20a *g)
|
||||
trace->enable_count = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
clean_list_lock:
|
||||
nvgpu_mutex_destroy(&trace->list_lock);
|
||||
clean_poll_lock:
|
||||
nvgpu_mutex_destroy(&trace->poll_lock);
|
||||
clean:
|
||||
nvgpu_kfree(g, trace);
|
||||
g->fecs_trace = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_gr_fecs_trace_deinit(struct gk20a *g)
|
||||
|
||||
@@ -387,11 +387,7 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
|
||||
gr->g = g;
|
||||
|
||||
err = nvgpu_mutex_init(&gr->ctxsw_disable_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in ctxsw_disable_mutex init");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&gr->ctxsw_disable_mutex);
|
||||
gr->ctxsw_disable_count = 0;
|
||||
|
||||
err = nvgpu_gr_obj_ctx_init(g, &gr->golden_image,
|
||||
|
||||
@@ -46,7 +46,6 @@
|
||||
struct nvgpu_gr_falcon *nvgpu_gr_falcon_init_support(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_gr_falcon *falcon;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -55,17 +54,8 @@ struct nvgpu_gr_falcon *nvgpu_gr_falcon_init_support(struct gk20a *g)
|
||||
return falcon;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&falcon->fecs_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in fecs_mutex init");
|
||||
goto done;
|
||||
}
|
||||
nvgpu_mutex_init(&falcon->fecs_mutex);
|
||||
|
||||
done:
|
||||
if (err != 0) {
|
||||
nvgpu_kfree(g, falcon);
|
||||
falcon = NULL;
|
||||
}
|
||||
return falcon;
|
||||
}
|
||||
|
||||
|
||||
@@ -701,7 +701,6 @@ int nvgpu_gr_obj_ctx_init(struct gk20a *g,
|
||||
struct nvgpu_gr_obj_ctx_golden_image **gr_golden_image, u32 size)
|
||||
{
|
||||
struct nvgpu_gr_obj_ctx_golden_image *golden_image;
|
||||
int err;
|
||||
|
||||
golden_image = nvgpu_kzalloc(g, sizeof(*golden_image));
|
||||
if (golden_image == NULL) {
|
||||
@@ -710,11 +709,7 @@ int nvgpu_gr_obj_ctx_init(struct gk20a *g,
|
||||
|
||||
nvgpu_gr_obj_ctx_set_golden_image_size(golden_image, size);
|
||||
|
||||
err = nvgpu_mutex_init(&golden_image->ctx_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_kfree(g, golden_image);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&golden_image->ctx_mutex);
|
||||
|
||||
*gr_golden_image = golden_image;
|
||||
|
||||
|
||||
@@ -197,11 +197,7 @@ static int nvgpu_gr_zbc_load_default_table(struct gk20a *g,
|
||||
u32 i = 0;
|
||||
int err = 0;
|
||||
|
||||
err = nvgpu_mutex_init(&zbc->zbc_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in zbc_lock mutex initialization");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&zbc->zbc_lock);
|
||||
|
||||
/* load default color table */
|
||||
zbc_val.type = NVGPU_GR_ZBC_TYPE_COLOR;
|
||||
|
||||
@@ -148,8 +148,6 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
|
||||
const char *name, void *priv, bool dbg,
|
||||
const struct nvgpu_allocator_ops *ops)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (ops == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -163,10 +161,7 @@ int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&a->lock);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&a->lock);
|
||||
|
||||
a->g = g;
|
||||
a->ops = ops;
|
||||
|
||||
@@ -74,12 +74,7 @@ int gk20a_comptag_allocator_init(struct gk20a *g,
|
||||
struct gk20a_comptag_allocator *allocator,
|
||||
unsigned long size)
|
||||
{
|
||||
int err = nvgpu_mutex_init(&allocator->lock);
|
||||
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in allocator.lock mutex initialization");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&allocator->lock);
|
||||
|
||||
/*
|
||||
* 0th comptag is special and is never used. The base for this bitmap
|
||||
|
||||
@@ -94,8 +94,6 @@ int nvgpu_pd_cache_init(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pd_cache *cache;
|
||||
u32 i;
|
||||
int err = 0;
|
||||
|
||||
|
||||
/*
|
||||
* This gets called from finalize_poweron() so we need to make sure we
|
||||
@@ -118,12 +116,7 @@ int nvgpu_pd_cache_init(struct gk20a *g)
|
||||
|
||||
cache->mem_tree = NULL;
|
||||
|
||||
err = nvgpu_mutex_init(&cache->lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in cache.lock initialization");
|
||||
nvgpu_kfree(g, cache);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&cache->lock);
|
||||
|
||||
g->mm.pd_cache = cache;
|
||||
|
||||
|
||||
@@ -489,11 +489,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
|
||||
}
|
||||
|
||||
mm->g = g;
|
||||
err = nvgpu_mutex_init(&mm->l2_op_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in l2_op_lock mutex initialization");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&mm->l2_op_lock);
|
||||
|
||||
/*TBD: make channel vm size configurable */
|
||||
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -
|
||||
|
||||
@@ -384,26 +384,9 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
|
||||
nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
|
||||
nvgpu_init_list_node(&mm->vidmem.clear_list_head);
|
||||
|
||||
err = nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "nvgpu_mutex_init(list_mutex) failed err=%d",
|
||||
err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&mm->vidmem.clearing_thread_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "nvgpu_mutex_init(thread_lock) failed err=%d",
|
||||
err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "nvgpu_mutex_init(first_clear) failed err=%d",
|
||||
err);
|
||||
goto fail;
|
||||
}
|
||||
nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
|
||||
nvgpu_mutex_init(&mm->vidmem.clearing_thread_lock);
|
||||
nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
|
||||
|
||||
nvgpu_atomic_set(&mm->vidmem.pause_count, 0);
|
||||
|
||||
|
||||
@@ -529,18 +529,8 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
|
||||
|
||||
vm->mapped_buffers = NULL;
|
||||
|
||||
err = nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g,
|
||||
"Error in syncpt_ro_map_lock mutex initialization");
|
||||
goto clean_up_allocators;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&vm->update_gmmu_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in update_gmmu_lock mutex initialization");
|
||||
goto clean_up_ro_map_lock;
|
||||
}
|
||||
nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
|
||||
nvgpu_mutex_init(&vm->update_gmmu_lock);
|
||||
|
||||
nvgpu_ref_init(&vm->ref);
|
||||
nvgpu_init_list_node(&vm->vm_area_list);
|
||||
@@ -561,7 +551,6 @@ int nvgpu_vm_do_init(struct mm_gk20a *mm,
|
||||
|
||||
clean_up_gmmu_lock:
|
||||
nvgpu_mutex_destroy(&vm->update_gmmu_lock);
|
||||
clean_up_ro_map_lock:
|
||||
nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock);
|
||||
clean_up_allocators:
|
||||
if (nvgpu_alloc_initialized(&vm->kernel)) {
|
||||
|
||||
@@ -75,12 +75,7 @@ int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&sequences->pmu_seq_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_kfree(g, sequences->seq);
|
||||
nvgpu_kfree(g, sequences);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&sequences->pmu_seq_lock);
|
||||
|
||||
*sequences_p = sequences;
|
||||
exit:
|
||||
|
||||
@@ -212,10 +212,7 @@ int nvgpu_pmu_perf_pstate_sw_setup(struct gk20a *g)
|
||||
|
||||
nvgpu_cond_init(&g->perf_pmu->pstatesobjs.pstate_notifier_wq);
|
||||
|
||||
err = nvgpu_mutex_init(&g->perf_pmu->pstatesobjs.pstate_mutex);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&g->perf_pmu->pstatesobjs.pstate_mutex);
|
||||
|
||||
err = nvgpu_boardobjgrp_construct_e32(g, &g->perf_pmu->pstatesobjs.super);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -998,18 +998,8 @@ int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
pg->aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
|
||||
pg->aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
|
||||
|
||||
err = nvgpu_mutex_init(&pg->elpg_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_kfree(g, pg);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&pg->pg_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_mutex_destroy(&pg->elpg_mutex);
|
||||
nvgpu_kfree(g, pg);
|
||||
goto exit;
|
||||
}
|
||||
nvgpu_mutex_init(&pg->elpg_mutex);
|
||||
nvgpu_mutex_init(&pg->pg_mutex);
|
||||
|
||||
*pg_p = pg;
|
||||
|
||||
|
||||
@@ -322,10 +322,7 @@ int nvgpu_pmu_early_init(struct gk20a *g, struct nvgpu_pmu **pmu_p)
|
||||
goto exit;
|
||||
}
|
||||
#ifdef NVGPU_FEATURE_LS_PMU
|
||||
err = nvgpu_mutex_init(&pmu->isr_mutex);
|
||||
if (err != 0) {
|
||||
goto init_failed;
|
||||
}
|
||||
nvgpu_mutex_init(&pmu->isr_mutex);
|
||||
|
||||
/* Allocate memory for pmu_perfmon */
|
||||
err = nvgpu_pmu_initialize_perfmon(g, pmu, &pmu->pmu_perfmon);
|
||||
|
||||
@@ -29,19 +29,13 @@
|
||||
int nvgpu_sec2_sequences_alloc(struct gk20a *g,
|
||||
struct sec2_sequences *sequences)
|
||||
{
|
||||
int err;
|
||||
|
||||
sequences->seq = nvgpu_kzalloc(g, SEC2_MAX_NUM_SEQUENCES *
|
||||
sizeof(struct sec2_sequence));
|
||||
if (sequences->seq == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&sequences->sec2_seq_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_kfree(g, sequences->seq);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&sequences->sec2_seq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -48,24 +48,15 @@ int nvgpu_init_sec2_setup_sw(struct gk20a *g, struct nvgpu_sec2 *sec2)
|
||||
|
||||
err = nvgpu_sec2_sequences_alloc(g, &sec2->sequences);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_sec2_sequences_init(g, &sec2->sequences);
|
||||
|
||||
err = nvgpu_mutex_init(&sec2->isr_mutex);
|
||||
if (err != 0) {
|
||||
goto free_sequences;
|
||||
}
|
||||
nvgpu_mutex_init(&sec2->isr_mutex);
|
||||
|
||||
sec2->remove_support = nvgpu_remove_sec2_support;
|
||||
|
||||
goto exit;
|
||||
|
||||
free_sequences:
|
||||
nvgpu_sec2_sequences_free(g, &sec2->sequences);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -50,15 +50,12 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
|
||||
|
||||
nvgpu_semaphore_sea_lock(sea);
|
||||
|
||||
ret = nvgpu_mutex_init(&p->pool_lock);
|
||||
if (ret != 0) {
|
||||
goto fail;
|
||||
}
|
||||
nvgpu_mutex_init(&p->pool_lock);
|
||||
|
||||
ret = semaphore_bitmap_alloc(sea->pools_alloced,
|
||||
SEMAPHORE_POOL_COUNT);
|
||||
if (ret < 0) {
|
||||
goto fail_alloc;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
page_idx = (unsigned long)ret;
|
||||
@@ -78,9 +75,8 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
|
||||
*pool = p;
|
||||
return 0;
|
||||
|
||||
fail_alloc:
|
||||
nvgpu_mutex_destroy(&p->pool_lock);
|
||||
fail:
|
||||
nvgpu_mutex_destroy(&p->pool_lock);
|
||||
nvgpu_semaphore_sea_unlock(sea);
|
||||
nvgpu_kfree(sea->gk20a, p);
|
||||
gpu_sema_dbg(sea->gk20a, "Failed to allocate semaphore pool!");
|
||||
|
||||
@@ -112,20 +112,17 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
|
||||
g->sema_sea->page_count = 0;
|
||||
g->sema_sea->gk20a = g;
|
||||
nvgpu_init_list_node(&g->sema_sea->pool_list);
|
||||
if (nvgpu_mutex_init(&g->sema_sea->sea_lock) != 0) {
|
||||
goto cleanup_free;
|
||||
}
|
||||
nvgpu_mutex_init(&g->sema_sea->sea_lock);
|
||||
|
||||
if (semaphore_sea_grow(g->sema_sea) != 0) {
|
||||
goto cleanup_destroy;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
gpu_sema_dbg(g, "Created semaphore sea!");
|
||||
return g->sema_sea;
|
||||
|
||||
cleanup_destroy:
|
||||
cleanup:
|
||||
nvgpu_mutex_destroy(&g->sema_sea->sea_lock);
|
||||
cleanup_free:
|
||||
nvgpu_kfree(g, g->sema_sea);
|
||||
g->sema_sea = NULL;
|
||||
gpu_sema_dbg(g, "Failed to creat semaphore sea!");
|
||||
|
||||
@@ -256,16 +256,11 @@ int nvgpu_worker_init(struct gk20a *g, struct nvgpu_worker *worker,
|
||||
nvgpu_cond_init(&worker->wq);
|
||||
nvgpu_init_list_node(&worker->items);
|
||||
nvgpu_spinlock_init(&worker->items_lock);
|
||||
err = nvgpu_mutex_init(&worker->start_lock);
|
||||
nvgpu_mutex_init(&worker->start_lock);
|
||||
|
||||
worker->ops = ops;
|
||||
|
||||
if (err != 0 && ops == NULL) {
|
||||
goto error_check;
|
||||
}
|
||||
|
||||
err = nvgpu_worker_start(worker);
|
||||
error_check:
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to start worker poller thread %s",
|
||||
worker->thread_name);
|
||||
|
||||
@@ -1184,10 +1184,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_mutex_init(&clk->clk_mutex);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&clk->clk_mutex);
|
||||
|
||||
if (clk->sw_ready) {
|
||||
nvgpu_log_fn(g, "skip init");
|
||||
|
||||
@@ -92,14 +92,10 @@ unsigned long gv100_clk_measure_freq(struct gk20a *g, u32 api_domain)
|
||||
int gv100_init_clk_support(struct gk20a *g)
|
||||
{
|
||||
struct clk_gk20a *clk = &g->clk;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_mutex_init(&clk->clk_mutex);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&clk->clk_mutex);
|
||||
|
||||
clk->clk_namemap = (struct namemap_cfg *)
|
||||
nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS);
|
||||
@@ -169,7 +165,7 @@ int gv100_init_clk_support(struct gk20a *g)
|
||||
|
||||
clk->g = g;
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 gv100_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) {
|
||||
|
||||
@@ -712,11 +712,7 @@ int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_mutex_init(&g->mm.hub_isr_mutex);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in hub_isr_mutex initialization");
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&g->mm.hub_isr_mutex);
|
||||
|
||||
err = gv11b_mm_mmu_fault_info_buf_init(g);
|
||||
|
||||
|
||||
@@ -30,10 +30,9 @@ struct nvgpu_raw_spinlock {
|
||||
raw_spinlock_t spinlock;
|
||||
};
|
||||
|
||||
static inline int nvgpu_mutex_init(struct nvgpu_mutex *mutex)
|
||||
static inline void nvgpu_mutex_init(struct nvgpu_mutex *mutex)
|
||||
{
|
||||
mutex_init(&mutex->mutex);
|
||||
return 0;
|
||||
};
|
||||
static inline void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex)
|
||||
{
|
||||
|
||||
@@ -56,7 +56,7 @@ struct nvgpu_spinlock;
|
||||
*/
|
||||
struct nvgpu_raw_spinlock;
|
||||
|
||||
int nvgpu_mutex_init(struct nvgpu_mutex *mutex);
|
||||
void nvgpu_mutex_init(struct nvgpu_mutex *mutex);
|
||||
void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex);
|
||||
void nvgpu_mutex_release(struct nvgpu_mutex *mutex);
|
||||
int nvgpu_mutex_tryacquire(struct nvgpu_mutex *mutex);
|
||||
|
||||
@@ -1450,10 +1450,7 @@ __releases(&cde_app->mutex)
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
|
||||
|
||||
err = nvgpu_mutex_init(&cde_app->mutex);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
nvgpu_mutex_init(&cde_app->mutex);
|
||||
nvgpu_mutex_acquire(&cde_app->mutex);
|
||||
|
||||
nvgpu_init_list_node(&cde_app->free_contexts);
|
||||
|
||||
@@ -551,7 +551,6 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
|
||||
{
|
||||
struct gk20a_ctxsw_trace *trace = g->ctxsw_trace;
|
||||
struct gk20a_ctxsw_dev *dev = trace->devs;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < GK20A_CTXSW_TRACE_NUM_DEVS; i++) {
|
||||
@@ -559,9 +558,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
|
||||
dev->hdr = NULL;
|
||||
dev->write_enabled = false;
|
||||
nvgpu_cond_init(&dev->readout_wq);
|
||||
err = nvgpu_mutex_init(&dev->write_lock);
|
||||
if (err)
|
||||
return err;
|
||||
nvgpu_mutex_init(&dev->write_lock);
|
||||
nvgpu_atomic_set(&dev->vma_ref, 0);
|
||||
dev++;
|
||||
}
|
||||
|
||||
@@ -435,21 +435,13 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
|
||||
|
||||
nvgpu_cond_init(&dbg_s->dbg_events.wait_queue);
|
||||
nvgpu_init_list_node(&dbg_s->ch_list);
|
||||
err = nvgpu_mutex_init(&dbg_s->ch_list_lock);
|
||||
if (err)
|
||||
goto err_free_session;
|
||||
err = nvgpu_mutex_init(&dbg_s->ioctl_lock);
|
||||
if (err)
|
||||
goto err_destroy_lock;
|
||||
nvgpu_mutex_init(&dbg_s->ch_list_lock);
|
||||
nvgpu_mutex_init(&dbg_s->ioctl_lock);
|
||||
dbg_s->dbg_events.events_enabled = false;
|
||||
dbg_s->dbg_events.num_pending_events = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_lock:
|
||||
nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
|
||||
err_free_session:
|
||||
nvgpu_kfree(g, dbg_session_linux);
|
||||
free_ref:
|
||||
gk20a_put(g);
|
||||
return err;
|
||||
|
||||
@@ -339,9 +339,7 @@ static int gk20a_tsg_event_id_enable(struct nvgpu_tsg *tsg,
|
||||
event_id_data->event_id = event_id;
|
||||
|
||||
nvgpu_cond_init(&event_id_data->event_id_wq);
|
||||
err = nvgpu_mutex_init(&event_id_data->lock);
|
||||
if (err)
|
||||
goto clean_up_free;
|
||||
nvgpu_mutex_init(&event_id_data->lock);
|
||||
|
||||
nvgpu_init_list_node(&event_id_data->event_id_node);
|
||||
|
||||
@@ -356,8 +354,6 @@ static int gk20a_tsg_event_id_enable(struct nvgpu_tsg *tsg,
|
||||
|
||||
return 0;
|
||||
|
||||
clean_up_free:
|
||||
nvgpu_kfree(g, event_id_data);
|
||||
clean_up_file:
|
||||
fput(file);
|
||||
clean_up:
|
||||
|
||||
@@ -283,7 +283,6 @@ static void nvgpu_channel_close_linux(struct nvgpu_channel *ch, bool force)
|
||||
static int nvgpu_channel_alloc_linux(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
{
|
||||
struct nvgpu_channel_linux *priv;
|
||||
int err;
|
||||
|
||||
priv = nvgpu_kzalloc(g, sizeof(*priv));
|
||||
if (!priv)
|
||||
@@ -296,11 +295,7 @@ static int nvgpu_channel_alloc_linux(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
ch->has_os_fence_framework_support = true;
|
||||
#endif
|
||||
|
||||
err = nvgpu_mutex_init(&priv->error_notifier.mutex);
|
||||
if (err) {
|
||||
nvgpu_kfree(g, priv);
|
||||
return err;
|
||||
}
|
||||
nvgpu_mutex_init(&priv->error_notifier.mutex);
|
||||
|
||||
nvgpu_channel_work_completion_init(ch);
|
||||
|
||||
|
||||
@@ -627,28 +627,14 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
|
||||
|
||||
nvgpu_cond_init(&sched->readout_wq);
|
||||
|
||||
err = nvgpu_mutex_init(&sched->status_lock);
|
||||
if (err)
|
||||
goto free_ref;
|
||||
|
||||
err = nvgpu_mutex_init(&sched->control_lock);
|
||||
if (err)
|
||||
goto free_status_lock;
|
||||
|
||||
err = nvgpu_mutex_init(&sched->busy_lock);
|
||||
if (err)
|
||||
goto free_control_lock;
|
||||
nvgpu_mutex_init(&sched->status_lock);
|
||||
nvgpu_mutex_init(&sched->control_lock);
|
||||
nvgpu_mutex_init(&sched->busy_lock);
|
||||
|
||||
sched->sw_ready = true;
|
||||
|
||||
return 0;
|
||||
|
||||
free_control_lock:
|
||||
nvgpu_mutex_destroy(&sched->control_lock);
|
||||
free_status_lock:
|
||||
nvgpu_mutex_destroy(&sched->status_lock);
|
||||
free_ref:
|
||||
nvgpu_kfree(g, sched->ref_tsg_bitmap);
|
||||
free_recent:
|
||||
nvgpu_kfree(g, sched->recent_tsg_bitmap);
|
||||
free_active:
|
||||
|
||||
@@ -36,11 +36,7 @@ int nvgpu_cond_init(struct nvgpu_cond *cond)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvgpu_mutex_init(&cond->mutex);
|
||||
if (ret != 0) {
|
||||
(void) pthread_condattr_destroy(&cond->attr);
|
||||
return ret;
|
||||
}
|
||||
nvgpu_mutex_init(&cond->mutex);
|
||||
|
||||
ret = pthread_cond_init(&cond->cond, &cond->attr);
|
||||
if (ret != 0) {
|
||||
|
||||
@@ -20,11 +20,13 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/lock.h>
|
||||
|
||||
int nvgpu_mutex_init(struct nvgpu_mutex *mutex)
|
||||
void nvgpu_mutex_init(struct nvgpu_mutex *mutex)
|
||||
{
|
||||
return pthread_mutex_init(&mutex->lock.mutex, NULL);
|
||||
int err = pthread_mutex_init(&mutex->lock.mutex, NULL);
|
||||
nvgpu_assert(err == 0);
|
||||
}
|
||||
|
||||
void nvgpu_mutex_acquire(struct nvgpu_mutex *mutex)
|
||||
|
||||
@@ -54,12 +54,8 @@ static int test_mutex_init(struct unit_module *m, struct gk20a *g,
|
||||
void *args)
|
||||
{
|
||||
struct nvgpu_mutex mutex;
|
||||
int err = nvgpu_mutex_init(&mutex);
|
||||
|
||||
if (err != 0) {
|
||||
unit_return_fail(m, "mutex_init failure: %d\n", err);
|
||||
}
|
||||
|
||||
nvgpu_mutex_init(&mutex);
|
||||
nvgpu_mutex_destroy(&mutex);
|
||||
|
||||
return UNIT_SUCCESS;
|
||||
@@ -74,9 +70,7 @@ static int test_mutex_tryacquire(struct unit_module *m, struct gk20a *g,
|
||||
struct nvgpu_mutex mutex;
|
||||
int status;
|
||||
|
||||
if (nvgpu_mutex_init(&mutex) != 0) {
|
||||
unit_return_fail(m, "mutex_init failure\n");
|
||||
}
|
||||
nvgpu_mutex_init(&mutex);
|
||||
|
||||
nvgpu_mutex_acquire(&mutex);
|
||||
|
||||
@@ -175,9 +169,7 @@ static int test_lock_acquire_release(struct unit_module *m, struct gk20a *g,
|
||||
|
||||
switch (type) {
|
||||
case TYPE_MUTEX:
|
||||
if (nvgpu_mutex_init(&mutex) != 0) {
|
||||
unit_return_fail(m, "mutex_init failure\n");
|
||||
}
|
||||
nvgpu_mutex_init(&mutex);
|
||||
worker_params.mutex = &mutex;
|
||||
break;
|
||||
case TYPE_SPINLOCK:
|
||||
|
||||
Reference in New Issue
Block a user