gpu: nvgpu: channel MISRA fix for Rule 21.2

Rename
_gk20a_channel_get -> nvgpu_channel_get__func
gk20a_channel_get -> nvgpu_channel_get
_gk20a_channel_put -> nvgpu_channel_put__func
gk20a_channel_put -> nvgpu_channel_put
trace_gk20a_channel_get -> trace_nvgpu_channel_get
trace_gk20a_channel_put -> trace_nvgpu_channel_put

JIRA NVGPU-3388

Change-Id: I4e37adddbb5ce14aa18132722719ca2f73f1ba52
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2114118
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-05-07 15:11:39 -07:00
committed by mobile promotions
parent 26d13b3b6b
commit 671f1c8a36
24 changed files with 103 additions and 105 deletions

View File

@@ -585,8 +585,9 @@ static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch,
* Most global functions in this file require a reference to be held by the * Most global functions in this file require a reference to be held by the
* caller. * caller.
*/ */
struct nvgpu_channel *_gk20a_channel_get(struct nvgpu_channel *ch, struct nvgpu_channel *nvgpu_channel_get__func(struct nvgpu_channel *ch,
const char *caller) { const char *caller)
{
struct nvgpu_channel *ret; struct nvgpu_channel *ret;
nvgpu_spinlock_acquire(&ch->ref_obtain_lock); nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
@@ -602,16 +603,16 @@ struct nvgpu_channel *_gk20a_channel_get(struct nvgpu_channel *ch,
nvgpu_spinlock_release(&ch->ref_obtain_lock); nvgpu_spinlock_release(&ch->ref_obtain_lock);
if (ret != NULL) { if (ret != NULL) {
trace_gk20a_channel_get(ch->chid, caller); trace_nvgpu_channel_get(ch->chid, caller);
} }
return ret; return ret;
} }
void _gk20a_channel_put(struct nvgpu_channel *ch, const char *caller) void nvgpu_channel_put__func(struct nvgpu_channel *ch, const char *caller)
{ {
gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
trace_gk20a_channel_put(ch->chid, caller); trace_nvgpu_channel_put(ch->chid, caller);
nvgpu_atomic_dec(&ch->ref_count); nvgpu_atomic_dec(&ch->ref_count);
nvgpu_cond_broadcast(&ch->ref_count_dec_wq); nvgpu_cond_broadcast(&ch->ref_count_dec_wq);
@@ -632,7 +633,7 @@ struct nvgpu_channel *nvgpu_channel_from_id__func(struct gk20a *g,
return NULL; return NULL;
} }
return _gk20a_channel_get(&g->fifo.channel[chid], caller); return nvgpu_channel_get__func(&g->fifo.channel[chid], caller);
} }
void gk20a_channel_close(struct nvgpu_channel *ch) void gk20a_channel_close(struct nvgpu_channel *ch)
@@ -1600,7 +1601,7 @@ void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g)
if (!gk20a_channel_check_unserviceable(ch)) { if (!gk20a_channel_check_unserviceable(ch)) {
nvgpu_channel_wdt_rewind(ch); nvgpu_channel_wdt_rewind(ch);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
} }
@@ -1699,7 +1700,7 @@ static void nvgpu_channel_poll_wdt(struct gk20a *g)
if (!gk20a_channel_check_unserviceable(ch)) { if (!gk20a_channel_check_unserviceable(ch)) {
nvgpu_channel_wdt_check(ch); nvgpu_channel_wdt_check(ch);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
} }
@@ -1758,7 +1759,7 @@ static void nvgpu_channel_worker_poll_wakeup_process_item(
gk20a_channel_clean_up_jobs(ch, true); gk20a_channel_clean_up_jobs(ch, true);
/* ref taken when enqueued */ /* ref taken when enqueued */
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
static u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout( static u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout(
@@ -1824,7 +1825,7 @@ static void gk20a_channel_worker_enqueue(struct nvgpu_channel *ch)
* the time we end up here (e.g., if the client got killed); if so, just * the time we end up here (e.g., if the client got killed); if so, just
* return. * return.
*/ */
if (gk20a_channel_get(ch) == NULL) { if (nvgpu_channel_get(ch) == NULL) {
nvgpu_info(g, "cannot get ch ref for worker!"); nvgpu_info(g, "cannot get ch ref for worker!");
return; return;
} }
@@ -1832,7 +1833,7 @@ static void gk20a_channel_worker_enqueue(struct nvgpu_channel *ch)
ret = nvgpu_worker_enqueue(&g->channel_worker.worker, ret = nvgpu_worker_enqueue(&g->channel_worker.worker,
&ch->worker_item); &ch->worker_item);
if (ret != 0) { if (ret != 0) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return; return;
} }
} }
@@ -1882,7 +1883,7 @@ int gk20a_channel_add_job(struct nvgpu_channel *c,
* Ref to hold the channel open during the job lifetime. This is * Ref to hold the channel open during the job lifetime. This is
* released by job cleanup launched via syncpt or sema interrupt. * released by job cleanup launched via syncpt or sema interrupt.
*/ */
c = gk20a_channel_get(c); c = nvgpu_channel_get(c);
if (c != NULL) { if (c != NULL) {
job->num_mapped_buffers = num_mapped_buffers; job->num_mapped_buffers = num_mapped_buffers;
@@ -1937,13 +1938,13 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
bool job_finished = false; bool job_finished = false;
bool watchdog_on = false; bool watchdog_on = false;
c = gk20a_channel_get(c); c = nvgpu_channel_get(c);
if (c == NULL) { if (c == NULL) {
return; return;
} }
if (!c->g->power_on) { /* shutdown case */ if (!c->g->power_on) { /* shutdown case */
gk20a_channel_put(c); nvgpu_channel_put(c);
return; return;
} }
@@ -2042,7 +2043,7 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
/* another bookkeeping taken in add_job. caller must hold a ref /* another bookkeeping taken in add_job. caller must hold a ref
* so this wouldn't get freed here. */ * so this wouldn't get freed here. */
gk20a_channel_put(c); nvgpu_channel_put(c);
/* /*
* ensure all pending writes complete before freeing up the job. * ensure all pending writes complete before freeing up the job.
@@ -2074,7 +2075,7 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
g->os_channel.work_completion_signal(c); g->os_channel.work_completion_signal(c);
} }
gk20a_channel_put(c); nvgpu_channel_put(c);
} }
/** /**
@@ -2137,7 +2138,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
gk20a_idle(g); gk20a_idle(g);
} else { } else {
/* Not interesting, carry on. */ /* Not interesting, carry on. */
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
} }
@@ -2168,10 +2169,10 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
nvgpu_err(g, "cannot busy() again!"); nvgpu_err(g, "cannot busy() again!");
} }
/* Took this in idle() */ /* Took this in idle() */
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
/* Release submits, new deterministic channels and frees */ /* Release submits, new deterministic channels and frees */
@@ -2380,7 +2381,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
active_runlist_ids |= (u32) BIT64(ch->runlist_id); active_runlist_ids |= (u32) BIT64(ch->runlist_id);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
if (channels_in_use) { if (channels_in_use) {
@@ -2398,7 +2399,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
} else { } else {
g->ops.channel.unbind(ch); g->ops.channel.unbind(ch);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
} }
@@ -2431,7 +2432,7 @@ void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
channels_in_use = true; channels_in_use = true;
active_runlist_ids |= (u32) BIT64(ch->runlist_id); active_runlist_ids |= (u32) BIT64(ch->runlist_id);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
if (channels_in_use) { if (channels_in_use) {
@@ -2456,7 +2457,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct nvgpu_channel *c = g->fifo.channel+chid; struct nvgpu_channel *c = g->fifo.channel+chid;
if (gk20a_channel_get(c) != NULL) { if (nvgpu_channel_get(c) != NULL) {
if (nvgpu_atomic_read(&c->bound) != 0) { if (nvgpu_atomic_read(&c->bound) != 0) {
nvgpu_cond_broadcast_interruptible( nvgpu_cond_broadcast_interruptible(
&c->semaphore_wq); &c->semaphore_wq);
@@ -2483,7 +2484,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
gk20a_channel_update(c); gk20a_channel_update(c);
} }
} }
gk20a_channel_put(c); nvgpu_channel_put(c);
} }
} }
} }
@@ -2513,7 +2514,7 @@ struct nvgpu_channel *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g,
return ch; return ch;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
return NULL; return NULL;
} }
@@ -2567,7 +2568,7 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
* successful allocs * successful allocs
*/ */
if (info == NULL) { if (info == NULL) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} else { } else {
infos[chid] = info; infos[chid] = info;
} }
@@ -2600,7 +2601,7 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
g->ops.channel.read_state(g, ch, &info->hw_state); g->ops.channel.read_state(g, ch, &info->hw_state);
g->ops.ramfc.capture_ram_dump(g, ch, info); g->ops.ramfc.capture_ram_dump(g, ch, info);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
gk20a_debug_output(o, "Channel Status - chip %-5s", g->name); gk20a_debug_output(o, "Channel Status - chip %-5s", g->name);

View File

@@ -319,7 +319,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
ch = nvgpu_channel_from_id(g, pbdma_chid); ch = nvgpu_channel_from_id(g, pbdma_chid);
if (ch != NULL) { if (ch != NULL) {
err = g->ops.fifo.preempt_channel(g, ch); err = g->ops.fifo.preempt_channel(g, ch);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;
@@ -341,7 +341,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
ch = nvgpu_channel_from_id(g, engine_chid); ch = nvgpu_channel_from_id(g, engine_chid);
if (ch != NULL) { if (ch != NULL) {
err = g->ops.fifo.preempt_channel(g, ch); err = g->ops.fifo.preempt_channel(g, ch);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
if (err != 0) { if (err != 0) {
goto clean_up; goto clean_up;

View File

@@ -398,11 +398,11 @@ bool nvgpu_tsg_mark_error(struct gk20a *g,
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
if (nvgpu_channel_mark_error(g, ch)) { if (nvgpu_channel_mark_error(g, ch)) {
verbose = true; verbose = true;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
@@ -417,9 +417,9 @@ void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct nvgpu_tsg *tsg, u32 ms)
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
ch->ctxsw_timeout_accumulated_ms = ms; ch->ctxsw_timeout_accumulated_ms = ms;
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
@@ -432,11 +432,11 @@ bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct nvgpu_tsg *tsg)
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
if (ch->ctxsw_timeout_debug_dump) { if (ch->ctxsw_timeout_debug_dump) {
verbose = true; verbose = true;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
@@ -451,9 +451,9 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct nvgpu_tsg *tsg,
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
nvgpu_channel_set_error_notifier(g, ch, error_notifier); nvgpu_channel_set_error_notifier(g, ch, error_notifier);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
@@ -485,13 +485,13 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
* maximum timeout without progress (update in gpfifo pointers). * maximum timeout without progress (update in gpfifo pointers).
*/ */
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
recover = nvgpu_channel_update_and_check_ctxsw_timeout(ch, recover = nvgpu_channel_update_and_check_ctxsw_timeout(ch,
*ms, &progress); *ms, &progress);
if (progress || recover) { if (progress || recover) {
break; break;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
@@ -503,7 +503,7 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
* notifier for all channels. * notifier for all channels.
*/ */
*ms = ch->ctxsw_timeout_accumulated_ms; *ms = ch->ctxsw_timeout_accumulated_ms;
gk20a_channel_put(ch); nvgpu_channel_put(ch);
*debug_dump = nvgpu_tsg_ctxsw_timeout_debug_dump_state(tsg); *debug_dump = nvgpu_tsg_ctxsw_timeout_debug_dump_state(tsg);
} else { } else {
@@ -516,7 +516,7 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
if (progress) { if (progress) {
nvgpu_log_info(g, "progress on tsg=%d ch=%d", nvgpu_log_info(g, "progress on tsg=%d ch=%d",
tsg->tsgid, ch->chid); tsg->tsgid, ch->chid);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
*ms = g->ctxsw_timeout_period_ms; *ms = g->ctxsw_timeout_period_ms;
nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(tsg, *ms); nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(tsg, *ms);
} }
@@ -860,12 +860,12 @@ void nvgpu_tsg_abort(struct gk20a *g, struct nvgpu_tsg *tsg, bool preempt)
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch) != NULL) { if (nvgpu_channel_get(ch) != NULL) {
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
if (g->ops.channel.abort_clean_up != NULL) { if (g->ops.channel.abort_clean_up != NULL) {
g->ops.channel.abort_clean_up(ch); g->ops.channel.abort_clean_up(ch);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);

View File

@@ -201,7 +201,7 @@ static void gr_intr_report_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid); ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid);
chid = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID; chid = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID;
if (ch != NULL) { if (ch != NULL) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
(void) memset(&err_info, 0, sizeof(err_info)); (void) memset(&err_info, 0, sizeof(err_info));
@@ -231,7 +231,7 @@ static void gr_intr_report_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
* curr_ctx should be the value read from gr falcon get_current_ctx op * curr_ctx should be the value read from gr falcon get_current_ctx op
* A small tlb is used here to cache translation. * A small tlb is used here to cache translation.
* *
* Returned channel must be freed with gk20a_channel_put() */ * Returned channel must be freed with nvgpu_channel_put() */
struct nvgpu_channel *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g, struct nvgpu_channel *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
u32 curr_ctx, u32 *curr_tsgid) u32 curr_ctx, u32 *curr_tsgid)
{ {
@@ -275,7 +275,7 @@ struct nvgpu_channel *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
ret_ch = ch; ret_ch = ch;
break; break;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
if (ret_ch == NULL) { if (ret_ch == NULL) {
@@ -327,7 +327,7 @@ void nvgpu_gr_intr_report_exception(struct gk20a *g, u32 inst,
ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid); ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid);
chid = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID; chid = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID;
if (ch != NULL) { if (ch != NULL) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
(void) memset(&err_info, 0, sizeof(err_info)); (void) memset(&err_info, 0, sizeof(err_info));
@@ -872,7 +872,7 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
} }
if (ch != NULL) { if (ch != NULL) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
return 0; return 0;

View File

@@ -107,7 +107,7 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f,
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid); nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} else { } else {
nvgpu_err(g, "Invalid pbdma_status.id_type"); nvgpu_err(g, "Invalid pbdma_status.id_type");
} }

View File

@@ -177,7 +177,7 @@ static void channel_sync_syncpt_update(void *priv, int nr_completed)
gk20a_channel_update(ch); gk20a_channel_update(ch);
/* note: channel_get() is in channel_sync_syncpt_incr_common() */ /* note: channel_get() is in channel_sync_syncpt_incr_common() */
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s, static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
@@ -210,7 +210,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
c->g->ops.sync.syncpt.get_incr_per_release()); c->g->ops.sync.syncpt.get_incr_per_release());
if (register_irq) { if (register_irq) {
struct nvgpu_channel *referenced = gk20a_channel_get(c); struct nvgpu_channel *referenced = nvgpu_channel_get(c);
WARN_ON(!referenced); WARN_ON(!referenced);
@@ -223,7 +223,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
sp->id, thresh, sp->id, thresh,
channel_sync_syncpt_update, c); channel_sync_syncpt_update, c);
if (err != 0) { if (err != 0) {
gk20a_channel_put(referenced); nvgpu_channel_put(referenced);
} }
/* Adding interrupt action should /* Adding interrupt action should

View File

@@ -375,11 +375,11 @@ int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
channel_gk20a, ch_entry) { channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch_tsg)) { if (nvgpu_channel_get(ch_tsg)) {
nvgpu_channel_set_error_notifier(g, ch_tsg, nvgpu_channel_set_error_notifier(g, ch_tsg,
err_code); err_code);
gk20a_channel_set_unserviceable(ch_tsg); gk20a_channel_set_unserviceable(ch_tsg);
gk20a_channel_put(ch_tsg); nvgpu_channel_put(ch_tsg);
} }
} }
@@ -431,9 +431,9 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
channel_gk20a, ch_entry) { channel_gk20a, ch_entry) {
if (gk20a_channel_get(ch_tsg)) { if (nvgpu_channel_get(ch_tsg)) {
vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg); vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
gk20a_channel_put(ch_tsg); nvgpu_channel_put(ch_tsg);
} }
} }
@@ -475,7 +475,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
break; break;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return 0; return 0;
} }
@@ -530,7 +530,7 @@ void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
g->ops.channel.abort_clean_up(ch); g->ops.channel.abort_clean_up(ch);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
void vgpu_set_error_notifier(struct gk20a *g, void vgpu_set_error_notifier(struct gk20a *g,

View File

@@ -816,7 +816,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
break; break;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return 0; return 0;
} }

View File

@@ -134,7 +134,7 @@ static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
g->ops.fifo.ch_abort_clean_up(ch); g->ops.fifo.ch_abort_clean_up(ch);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
static void vgpu_set_error_notifier(struct gk20a *g, static void vgpu_set_error_notifier(struct gk20a *g,

View File

@@ -531,7 +531,7 @@ void gv11b_fb_handle_bar2_fault(struct gk20a *g,
g->ops.bus.bar2_bind(g, &g->mm.bar2.inst_block); g->ops.bus.bar2_bind(g, &g->mm.bar2.inst_block);
if (mmufault->refch != NULL) { if (mmufault->refch != NULL) {
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
mmufault->refch = NULL; mmufault->refch = NULL;
} }
} }

View File

@@ -112,7 +112,7 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
ch = nvgpu_channel_from_id(g, id); ch = nvgpu_channel_from_id(g, id);
if (ch != NULL) { if (ch != NULL) {
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} }

View File

@@ -316,7 +316,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
tsg = nvgpu_tsg_get_from_id(g, id); tsg = nvgpu_tsg_get_from_id(g, id);
} else if (type == ENGINE_STATUS_CTX_ID_TYPE_CHID) { } else if (type == ENGINE_STATUS_CTX_ID_TYPE_CHID) {
ch = &g->fifo.channel[id]; ch = &g->fifo.channel[id];
refch = gk20a_channel_get(ch); refch = nvgpu_channel_get(ch);
if (refch != NULL) { if (refch != NULL) {
tsg = tsg_gk20a_from_ch(refch); tsg = tsg_gk20a_from_ch(refch);
} }
@@ -375,12 +375,12 @@ bool gk20a_fifo_handle_mmu_fault_locked(
/* put back the ref taken early above */ /* put back the ref taken early above */
if (refch != NULL) { if (refch != NULL) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
} else if (refch != NULL) { } else if (refch != NULL) {
nvgpu_err(g, "mmu error in unbound channel %d", nvgpu_err(g, "mmu error in unbound channel %d",
ch->chid); ch->chid);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} else if (mmfault_info.inst_ptr == } else if (mmfault_info.inst_ptr ==
nvgpu_inst_block_addr(g, nvgpu_inst_block_addr(g,
&g->mm.bar1.inst_block)) { &g->mm.bar1.inst_block)) {

View File

@@ -1373,7 +1373,7 @@ bool gk20a_is_channel_ctx_resident(struct nvgpu_channel *ch)
ret = true; ret = true;
} }
gk20a_channel_put(curr_ch); nvgpu_channel_put(curr_ch);
return ret; return ret;
} }

View File

@@ -1024,7 +1024,7 @@ static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,
u32 offset; u32 offset;
int err = 0; int err = 0;
fault_ch = gk20a_channel_get(fault_ch); fault_ch = nvgpu_channel_get(fault_ch);
if (fault_ch != NULL) { if (fault_ch != NULL) {
if (!fault_ch->mmu_nack_handled) { if (!fault_ch->mmu_nack_handled) {
/* recovery is not done for the channel implying mmu /* recovery is not done for the channel implying mmu
@@ -1044,7 +1044,7 @@ static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,
* for teardown value in mmu fault handler. * for teardown value in mmu fault handler.
*/ */
if (err == 0 && fault_ch != NULL) { if (err == 0 && fault_ch != NULL) {
gk20a_channel_put(fault_ch); nvgpu_channel_put(fault_ch);
} }
/* clear interrupt */ /* clear interrupt */

View File

@@ -88,7 +88,7 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
if (tsg == NULL) { if (tsg == NULL) {
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return -EINVAL; return -EINVAL;
} }
@@ -99,7 +99,7 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
ret = 0; ret = 0;
} }
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return ret; return ret;
} }
@@ -150,7 +150,7 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
ret = gp10b_gr_intr_clear_cilp_preempt_pending(g, ch); ret = gp10b_gr_intr_clear_cilp_preempt_pending(g, ch);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!"); nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
gk20a_channel_put(ch); nvgpu_channel_put(ch);
goto clean_up; goto clean_up;
} }
@@ -164,7 +164,7 @@ int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
g->ops.tsg.post_event_id(tsg, g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE); NVGPU_EVENT_ID_CILP_PREEMPTION_COMPLETE);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
clean_up: clean_up:

View File

@@ -315,7 +315,7 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed"); nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed");
*invalidate_replay_val = 0; *invalidate_replay_val = 0;
if (mmufault->refch != NULL) { if (mmufault->refch != NULL) {
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
mmufault->refch = NULL; mmufault->refch = NULL;
} }
return; return;
@@ -349,13 +349,13 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
* closing the channel by userspace. Decrement * closing the channel by userspace. Decrement
* channel reference. * channel reference.
*/ */
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
/* /*
* refch in mmufault is assigned at the time * refch in mmufault is assigned at the time
* of copying fault info from snap reg or bar2 * of copying fault info from snap reg or bar2
* fault buf. * fault buf.
*/ */
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
return; return;
} else { } else {
/* /*
@@ -390,7 +390,7 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
* fault info from snap reg or bar2 fault buf * fault info from snap reg or bar2 fault buf
*/ */
if (mmufault->refch != NULL) { if (mmufault->refch != NULL) {
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
mmufault->refch = NULL; mmufault->refch = NULL;
} }
@@ -419,7 +419,7 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
* fault info from snap reg or bar2 fault buf * fault info from snap reg or bar2 fault buf
*/ */
if (mmufault->refch != NULL) { if (mmufault->refch != NULL) {
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
mmufault->refch = NULL; mmufault->refch = NULL;
} }
} }
@@ -492,7 +492,7 @@ void gv11b_mm_mmu_fault_handle_nonreplay_replay_fault(struct gk20a *g,
nvgpu_log(g, gpu_dbg_intr, nvgpu_log(g, gpu_dbg_intr,
"pte already scanned"); "pte already scanned");
if (mmufault->refch != NULL) { if (mmufault->refch != NULL) {
gk20a_channel_put(mmufault->refch); nvgpu_channel_put(mmufault->refch);
mmufault->refch = NULL; mmufault->refch = NULL;
} }
continue; continue;

View File

@@ -447,13 +447,13 @@ struct nvgpu_channel *gk20a_get_channel_from_file(int fd);
void gk20a_channel_update(struct nvgpu_channel *c); void gk20a_channel_update(struct nvgpu_channel *c);
/* returns ch if reference was obtained */ /* returns ch if reference was obtained */
struct nvgpu_channel *__must_check _gk20a_channel_get(struct nvgpu_channel *ch, struct nvgpu_channel *__must_check nvgpu_channel_get__func(
const char *caller); struct nvgpu_channel *ch, const char *caller);
#define gk20a_channel_get(ch) _gk20a_channel_get(ch, __func__) #define nvgpu_channel_get(ch) nvgpu_channel_get__func(ch, __func__)
void _gk20a_channel_put(struct nvgpu_channel *ch, const char *caller); void nvgpu_channel_put__func(struct nvgpu_channel *ch, const char *caller);
#define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__) #define nvgpu_channel_put(ch) nvgpu_channel_put__func(ch, __func__)
/* returns NULL if could not take a ref to the channel */ /* returns NULL if could not take a ref to the channel */
struct nvgpu_channel *__must_check nvgpu_channel_from_id__func( struct nvgpu_channel *__must_check nvgpu_channel_from_id__func(
@@ -480,9 +480,6 @@ void channel_gk20a_joblist_unlock(struct nvgpu_channel *c);
bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c); bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c);
int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add); int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add);
void gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
unsigned int timeslice_period,
unsigned int *__timeslice_timeout, unsigned int *__timeslice_scale);
void gk20a_wait_until_counter_is_N( void gk20a_wait_until_counter_is_N(
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value, struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,

View File

@@ -86,7 +86,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
if (!test_bit(ch->chid, runlist->active_channels)) if (!test_bit(ch->chid, runlist->active_channels))
return ret; return ret;
if (gk20a_channel_get(ch)) { if (nvgpu_channel_get(ch)) {
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
if (tsg) if (tsg)
@@ -99,7 +99,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
tsg->interleave_level, tsg->interleave_level,
nvgpu_gr_ctx_get_graphics_preemption_mode(tsg->gr_ctx), nvgpu_gr_ctx_get_graphics_preemption_mode(tsg->gr_ctx),
nvgpu_gr_ctx_get_compute_preemption_mode(tsg->gr_ctx)); nvgpu_gr_ctx_get_compute_preemption_mode(tsg->gr_ctx));
gk20a_channel_put(ch); nvgpu_channel_put(ch);
} }
return 0; return 0;
} }

View File

@@ -68,7 +68,7 @@ static int gk20a_as_ioctl_bind_channel(
err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch); err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch);
out: out:
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return err; return err;
} }

View File

@@ -372,7 +372,7 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
/* /*
* This returns the channel with a reference. The caller must * This returns the channel with a reference. The caller must
* gk20a_channel_put() the ref back after use. * nvgpu_channel_put() the ref back after use.
* *
* NULL is returned if the channel was not found. * NULL is returned if the channel was not found.
*/ */
@@ -391,7 +391,7 @@ struct nvgpu_channel *gk20a_get_channel_from_file(int fd)
} }
priv = (struct channel_priv *)f->private_data; priv = (struct channel_priv *)f->private_data;
ch = gk20a_channel_get(priv->c); ch = nvgpu_channel_get(priv->c);
fput(f); fput(f);
return ch; return ch;
} }
@@ -1090,7 +1090,7 @@ long gk20a_channel_ioctl(struct file *filp,
} }
/* take a ref or return timeout if channel refs can't be taken */ /* take a ref or return timeout if channel refs can't be taken */
ch = gk20a_channel_get(ch); ch = nvgpu_channel_get(ch);
if (!ch) if (!ch)
return -ETIMEDOUT; return -ETIMEDOUT;
@@ -1398,7 +1398,7 @@ long gk20a_channel_ioctl(struct file *filp,
nvgpu_mutex_release(&ch->ioctl_lock); nvgpu_mutex_release(&ch->ioctl_lock);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
nvgpu_log_fn(g, "end"); nvgpu_log_fn(g, "end");

View File

@@ -681,7 +681,7 @@ static int nvgpu_gpu_ioctl_set_debug_mode(
err = -ENOSYS; err = -ENOSYS;
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return err; return err;
} }
@@ -1640,7 +1640,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g,
err = nvgpu_gpu_set_deterministic_ch(ch, args->flags); err = nvgpu_gpu_set_deterministic_ch(ch, args->flags);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
if (err) if (err)
break; break;

View File

@@ -563,14 +563,14 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
nvgpu_mutex_release(&ch->dbg_s_lock); nvgpu_mutex_release(&ch->dbg_s_lock);
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return 0; return 0;
out_kfree: out_kfree:
nvgpu_kfree(g, ch_data_linux); nvgpu_kfree(g, ch_data_linux);
out_chput: out_chput:
gk20a_channel_put(ch); nvgpu_channel_put(ch);
nvgpu_mutex_release(&ch->dbg_s_lock); nvgpu_mutex_release(&ch->dbg_s_lock);
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
out_fput: out_fput:
@@ -1815,7 +1815,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
out: out:
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return err; return err;
} }

View File

@@ -53,7 +53,7 @@ static int nvgpu_tsg_bind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd)
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return err; return err;
} }
@@ -116,7 +116,7 @@ static int gk20a_tsg_ioctl_bind_channel_ex(struct gk20a *g,
err = nvgpu_tsg_bind_channel(tsg, ch); err = nvgpu_tsg_bind_channel(tsg, ch);
ch_put: ch_put:
gk20a_channel_put(ch); nvgpu_channel_put(ch);
idle: idle:
gk20a_idle(g); gk20a_idle(g);
mutex_release: mutex_release:
@@ -148,7 +148,7 @@ static int nvgpu_tsg_unbind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd)
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
out: out:
gk20a_channel_put(ch); nvgpu_channel_put(ch);
return err; return err;
} }

View File

@@ -159,11 +159,11 @@ DECLARE_EVENT_CLASS(gk20a_channel_getput,
), ),
TP_printk("channel %d caller %s", __entry->channel, __entry->caller) TP_printk("channel %d caller %s", __entry->channel, __entry->caller)
); );
DEFINE_EVENT(gk20a_channel_getput, gk20a_channel_get, DEFINE_EVENT(gk20a_channel_getput, nvgpu_channel_get,
TP_PROTO(int channel, const char *caller), TP_PROTO(int channel, const char *caller),
TP_ARGS(channel, caller) TP_ARGS(channel, caller)
); );
DEFINE_EVENT(gk20a_channel_getput, gk20a_channel_put, DEFINE_EVENT(gk20a_channel_getput, nvgpu_channel_put,
TP_PROTO(int channel, const char *caller), TP_PROTO(int channel, const char *caller),
TP_ARGS(channel, caller) TP_ARGS(channel, caller)
); );
@@ -630,8 +630,8 @@ DEFINE_EVENT(gk20a_cde, gk20a_cde_finished_ctx_cb,
#define trace_gk20a_mmu_fault(arg...) ((void)(NULL)) #define trace_gk20a_mmu_fault(arg...) ((void)(NULL))
#define trace_gk20a_release_used_channel(arg...) ((void)(NULL)) #define trace_gk20a_release_used_channel(arg...) ((void)(NULL))
#define trace_gk20a_free_channel(arg...) ((void)(NULL)) #define trace_gk20a_free_channel(arg...) ((void)(NULL))
#define trace_gk20a_channel_get(arg...) ((void)(NULL)) #define trace_nvgpu_channel_get(arg...) ((void)(NULL))
#define trace_gk20a_channel_put(arg...) ((void)(NULL)) #define trace_nvgpu_channel_put(arg...) ((void)(NULL))
#define trace_gk20a_open_new_channel(arg...) ((void)(NULL)) #define trace_gk20a_open_new_channel(arg...) ((void)(NULL))
#define trace_gk20a_channel_update(arg...) ((void)(NULL)) #define trace_gk20a_channel_update(arg...) ((void)(NULL))
#define trace_gk20a_channel_reset(arg...) ((void)(NULL)) #define trace_gk20a_channel_reset(arg...) ((void)(NULL))