diff --git a/drivers/gpu/nvgpu/common/ce/ce.c b/drivers/gpu/nvgpu/common/ce/ce.c index 8675be834..3985cbfa9 100644 --- a/drivers/gpu/nvgpu/common/ce/ce.c +++ b/drivers/gpu/nvgpu/common/ce/ce.c @@ -203,9 +203,9 @@ static void nvgpu_ce_delete_gpu_context_locked(struct nvgpu_ce_gpu_ctx *ce_ctx) /* * free the channel - * gk20a_channel_close() will also unbind the channel from TSG + * nvgpu_channel_close() will also unbind the channel from TSG */ - gk20a_channel_close(ce_ctx->ch); + nvgpu_channel_close(ce_ctx->ch); nvgpu_ref_put(&ce_ctx->tsg->refcount, nvgpu_tsg_release); /* housekeeping on app */ diff --git a/drivers/gpu/nvgpu/common/fence/fence.c b/drivers/gpu/nvgpu/common/fence/fence.c index a0d67929c..76f7f48fa 100644 --- a/drivers/gpu/nvgpu/common/fence/fence.c +++ b/drivers/gpu/nvgpu/common/fence/fence.c @@ -160,7 +160,7 @@ struct nvgpu_fence_type *nvgpu_fence_alloc(struct nvgpu_channel *ch) { struct nvgpu_fence_type *fence = NULL; - if (channel_gk20a_is_prealloc_enabled(ch)) { + if (nvgpu_channel_is_prealloc_enabled(ch)) { if (nvgpu_alloc_initialized(&ch->fence_allocator)) { fence = (struct nvgpu_fence_type *)(uintptr_t) nvgpu_alloc(&ch->fence_allocator, diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c index c2f50291a..cfc6e6425 100644 --- a/drivers/gpu/nvgpu/common/fifo/channel.c +++ b/drivers/gpu/nvgpu/common/fifo/channel.c @@ -79,7 +79,7 @@ static struct nvgpu_channel *allocate_channel(struct nvgpu_fifo *f) nvgpu_mutex_acquire(&f->free_chs_mutex); if (!nvgpu_list_empty(&f->free_chs)) { - ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a, + ch = nvgpu_list_first_entry(&f->free_chs, nvgpu_channel, free_chs); nvgpu_list_del(&ch->free_chs); WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0); @@ -123,7 +123,7 @@ static void free_channel(struct nvgpu_fifo *f, } } -int channel_gk20a_commit_va(struct nvgpu_channel *c) +int nvgpu_channel_commit_va(struct nvgpu_channel *c) { struct gk20a *g = c->g; @@ -135,7 +135,7 @@ int channel_gk20a_commit_va(struct nvgpu_channel *c) return 0; } -int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add) +int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add) { return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id, c, add, true); @@ -190,17 +190,17 @@ void nvgpu_channel_abort_clean_up(struct nvgpu_channel *ch) * When closing the channel, this scheduled update holds one ref which * is waited for before advancing with freeing. */ - gk20a_channel_update(ch); + nvgpu_channel_update(ch); } -void gk20a_channel_set_unserviceable(struct nvgpu_channel *ch) +void nvgpu_channel_set_unserviceable(struct nvgpu_channel *ch) { nvgpu_spinlock_acquire(&ch->unserviceable_lock); ch->unserviceable = true; nvgpu_spinlock_release(&ch->unserviceable_lock); } -bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch) +bool nvgpu_channel_check_unserviceable(struct nvgpu_channel *ch) { bool unserviceable_status; @@ -211,7 +211,7 @@ bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch) return unserviceable_status; } -void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt) +void nvgpu_channel_abort(struct nvgpu_channel *ch, bool channel_preempt) { struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch); @@ -224,39 +224,6 @@ void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt) } } -int gk20a_wait_channel_idle(struct nvgpu_channel *ch) -{ - bool channel_idle = false; - struct nvgpu_timeout timeout; - int ret; - - ret = nvgpu_timeout_init(ch->g, &timeout, nvgpu_get_poll_timeout(ch->g), - NVGPU_TIMER_CPU_TIMER); - if (ret != 0) { - nvgpu_err(ch->g, "timeout_init failed: %d", ret); - return ret; - } - - do { - channel_gk20a_joblist_lock(ch); - channel_idle = channel_gk20a_joblist_is_empty(ch); - channel_gk20a_joblist_unlock(ch); - if (channel_idle) { - break; - } - - nvgpu_usleep_range(1000, 3000); - } while (nvgpu_timeout_expired(&timeout) == 0); - - if (!channel_idle) { - nvgpu_err(ch->g, "jobs not freed for channel %d", - ch->chid); - return -EBUSY; - } - - return 0; -} - void gk20a_wait_until_counter_is_N( struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value, struct nvgpu_cond *c, const char *caller, const char *counter_name) @@ -386,7 +353,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force) nvgpu_mutex_release(&g->fifo.engines_reset_mutex); } - if (!gk20a_channel_as_bound(ch)) { + if (!nvgpu_channel_as_bound(ch)) { goto unbind; } @@ -404,7 +371,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force) } if (ch->usermode_submit_enabled) { - gk20a_channel_free_usermode_buffers(ch); + nvgpu_channel_free_usermode_buffers(ch); (void) nvgpu_userd_init_channel(g, ch); ch->usermode_submit_enabled = false; } @@ -428,7 +395,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force) * Set user managed syncpoint to safe state * But it's already done if channel is recovered */ - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { nvgpu_channel_sync_destroy(ch->user_sync, false); } else { nvgpu_channel_sync_destroy(ch->user_sync, true); @@ -500,7 +467,7 @@ unbind: nvgpu_mutex_release(&g->dbg_sessions_lock); /* free pre-allocated resources, if applicable */ - if (channel_gk20a_is_prealloc_enabled(ch)) { + if (nvgpu_channel_is_prealloc_enabled(ch)) { channel_gk20a_free_prealloc_resources(ch); } @@ -562,7 +529,7 @@ static void gk20a_channel_dump_ref_actions(struct nvgpu_channel *ch) } static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch, - enum channel_gk20a_ref_action_type type) + enum nvgpu_channel_ref_action_type type) { #if GK20A_CHANNEL_REFCOUNT_TRACKING struct nvgpu_channel_ref_action *act; @@ -587,7 +554,7 @@ static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch, /* Try to get a reference to the channel. Return nonzero on success. If fails, * the channel is dead or being freed elsewhere and you must not touch it. * - * Always when a channel_gk20a pointer is seen and about to be used, a + * Always when a nvgpu_channel pointer is seen and about to be used, a * reference must be held to it - either by you or the caller, which should be * documented well or otherwise clearly seen. This usually boils down to the * file from ioctls directly, or an explicit get in exception handlers when the @@ -649,7 +616,7 @@ struct nvgpu_channel *nvgpu_channel_from_id__func(struct gk20a *g, return nvgpu_channel_get__func(&g->fifo.channel[chid], caller); } -void gk20a_channel_close(struct nvgpu_channel *ch) +void nvgpu_channel_close(struct nvgpu_channel *ch) { gk20a_free_channel(ch, false); } @@ -856,7 +823,7 @@ static void nvgpu_channel_free_priv_cmd_q(struct nvgpu_channel *ch) } /* allocate a cmd buffer with given size. size is number of u32 entries */ -int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size, +int nvgpu_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size, struct priv_cmd_entry *e) { struct priv_cmd_queue *q = &c->priv_cmd_q; @@ -925,26 +892,26 @@ int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size, void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c, struct priv_cmd_entry *e) { - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { (void) memset(e, 0, sizeof(struct priv_cmd_entry)); } else { nvgpu_kfree(c->g, e); } } -int channel_gk20a_alloc_job(struct nvgpu_channel *c, +int nvgpu_gk20a_alloc_job(struct nvgpu_channel *c, struct nvgpu_channel_job **job_out) { int err = 0; - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { unsigned int put = c->joblist.pre_alloc.put; unsigned int get = c->joblist.pre_alloc.get; /* * ensure all subsequent reads happen after reading get. * see corresponding nvgpu_smp_wmb in - * gk20a_channel_clean_up_jobs() + * nvgpu_channel_clean_up_jobs() */ nvgpu_smp_rmb(); @@ -966,7 +933,7 @@ int channel_gk20a_alloc_job(struct nvgpu_channel *c, return err; } -void channel_gk20a_free_job(struct nvgpu_channel *c, +void nvgpu_channel_free_job(struct nvgpu_channel *c, struct nvgpu_channel_job *job) { /* @@ -974,7 +941,7 @@ void channel_gk20a_free_job(struct nvgpu_channel *c, * the job but maintain the pointers to the priv_cmd_entry, * since they're inherently tied to the job node. */ - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { struct priv_cmd_entry *wait_cmd = job->wait_cmd; struct priv_cmd_entry *incr_cmd = job->incr_cmd; (void) memset(job, 0, sizeof(*job)); @@ -985,18 +952,18 @@ void channel_gk20a_free_job(struct nvgpu_channel *c, } } -void channel_gk20a_joblist_lock(struct nvgpu_channel *c) +void nvgpu_channel_joblist_lock(struct nvgpu_channel *c) { - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock); } else { nvgpu_spinlock_acquire(&c->joblist.dynamic.lock); } } -void channel_gk20a_joblist_unlock(struct nvgpu_channel *c) +void nvgpu_channel_joblist_unlock(struct nvgpu_channel *c) { - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock); } else { nvgpu_spinlock_release(&c->joblist.dynamic.lock); @@ -1009,8 +976,8 @@ static struct nvgpu_channel_job *channel_gk20a_joblist_peek( u32 get; struct nvgpu_channel_job *job = NULL; - if (channel_gk20a_is_prealloc_enabled(c)) { - if (!channel_gk20a_joblist_is_empty(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { + if (!nvgpu_channel_joblist_is_empty(c)) { get = c->joblist.pre_alloc.get; job = &c->joblist.pre_alloc.jobs[get]; } @@ -1027,7 +994,7 @@ static struct nvgpu_channel_job *channel_gk20a_joblist_peek( static void channel_gk20a_joblist_add(struct nvgpu_channel *c, struct nvgpu_channel_job *job) { - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) % (c->joblist.pre_alloc.length); } else { @@ -1038,7 +1005,7 @@ static void channel_gk20a_joblist_add(struct nvgpu_channel *c, static void channel_gk20a_joblist_delete(struct nvgpu_channel *c, struct nvgpu_channel_job *job) { - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) % (c->joblist.pre_alloc.length); } else { @@ -1046,9 +1013,9 @@ static void channel_gk20a_joblist_delete(struct nvgpu_channel *c, } } -bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c) +bool nvgpu_channel_joblist_is_empty(struct nvgpu_channel *c) { - if (channel_gk20a_is_prealloc_enabled(c)) { + if (nvgpu_channel_is_prealloc_enabled(c)) { unsigned int get = c->joblist.pre_alloc.get; unsigned int put = c->joblist.pre_alloc.put; @@ -1059,7 +1026,7 @@ bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c) return nvgpu_list_empty(&c->joblist.dynamic.jobs); } -bool channel_gk20a_is_prealloc_enabled(struct nvgpu_channel *c) +bool nvgpu_channel_is_prealloc_enabled(struct nvgpu_channel *c) { bool pre_alloc_enabled = c->joblist.pre_alloc.enabled; @@ -1075,7 +1042,7 @@ static int channel_gk20a_prealloc_resources(struct nvgpu_channel *ch, size_t size; struct priv_cmd_entry *entries = NULL; - if ((channel_gk20a_is_prealloc_enabled(ch)) || (num_jobs == 0U)) { + if ((nvgpu_channel_is_prealloc_enabled(ch)) || (num_jobs == 0U)) { return -EINVAL; } @@ -1129,7 +1096,7 @@ static int channel_gk20a_prealloc_resources(struct nvgpu_channel *ch, /* * commit the previous writes before setting the flag. * see corresponding nvgpu_smp_rmb in - * channel_gk20a_is_prealloc_enabled() + * nvgpu_channel_is_prealloc_enabled() */ nvgpu_smp_wmb(); ch->joblist.pre_alloc.enabled = true; @@ -1154,7 +1121,7 @@ static void channel_gk20a_free_prealloc_resources(struct nvgpu_channel *c) /* * commit the previous writes before disabling the flag. * see corresponding nvgpu_smp_rmb in - * channel_gk20a_is_prealloc_enabled() + * nvgpu_channel_is_prealloc_enabled() */ nvgpu_smp_wmb(); c->joblist.pre_alloc.enabled = false; @@ -1253,7 +1220,7 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c, } /* an address space needs to have been bound at this point. */ - if (!gk20a_channel_as_bound(c)) { + if (!nvgpu_channel_as_bound(c)) { nvgpu_err(g, "not bound to an address space at time of setup_bind"); err = -EINVAL; @@ -1366,7 +1333,7 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c, goto clean_up_prealloc; } - err = channel_gk20a_update_runlist(c, true); + err = nvgpu_channel_update_runlist(c, true); if (err != 0) { goto clean_up_priv_cmd; } @@ -1391,7 +1358,7 @@ clean_up_unmap: nvgpu_big_free(g, c->gpfifo.pipe); nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); if (c->usermode_submit_enabled) { - gk20a_channel_free_usermode_buffers(c); + nvgpu_channel_free_usermode_buffers(c); (void) nvgpu_userd_init_channel(g, c); c->usermode_submit_enabled = false; } @@ -1408,7 +1375,7 @@ clean_up_idle: return err; } -void gk20a_channel_free_usermode_buffers(struct nvgpu_channel *c) +void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c) { if (nvgpu_mem_is_valid(&c->usermode_userd)) { nvgpu_dma_free(c->g, &c->usermode_userd); @@ -1453,7 +1420,7 @@ static void nvgpu_channel_set_has_timedout_and_wakeup_wqs(struct gk20a *g, struct nvgpu_channel *ch) { /* mark channel as faulted */ - gk20a_channel_set_unserviceable(ch); + nvgpu_channel_set_unserviceable(ch); /* unblock pending waits */ if (nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq) != 0) { @@ -1523,7 +1490,7 @@ static void nvgpu_channel_wdt_init(struct nvgpu_channel *ch) struct gk20a *g = ch->g; int ret; - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { ch->wdt.running = false; return; } @@ -1650,7 +1617,7 @@ void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g) struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid); if (ch != NULL) { - if (!gk20a_channel_check_unserviceable(ch)) { + if (!nvgpu_channel_check_unserviceable(ch)) { nvgpu_channel_wdt_rewind(ch); } nvgpu_channel_put(ch); @@ -1678,7 +1645,7 @@ static void nvgpu_channel_wdt_handler(struct nvgpu_channel *ch) nvgpu_log_fn(g, " "); - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { /* channel is already recovered */ if (nvgpu_channel_wdt_stop(ch) == true) { nvgpu_info(g, "chid: %d unserviceable but wdt was ON", @@ -1755,7 +1722,7 @@ static void nvgpu_channel_poll_wdt(struct gk20a *g) struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid); if (ch != NULL) { - if (!gk20a_channel_check_unserviceable(ch)) { + if (!nvgpu_channel_check_unserviceable(ch)) { nvgpu_channel_wdt_check(ch); } nvgpu_channel_put(ch); @@ -1823,13 +1790,13 @@ static u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout( static void nvgpu_channel_worker_poll_wakeup_process_item( struct nvgpu_list_node *work_item) { - struct nvgpu_channel *ch = channel_gk20a_from_worker_item(work_item); + struct nvgpu_channel *ch = nvgpu_channel_from_worker_item(work_item); nvgpu_assert(ch != NULL); nvgpu_log_fn(ch->g, " "); - gk20a_channel_clean_up_jobs(ch, true); + nvgpu_channel_clean_up_jobs(ch, true); /* ref taken when enqueued */ nvgpu_channel_put(ch); @@ -1927,14 +1894,14 @@ void nvgpu_channel_update_priv_cmd_q_and_free_entry( nvgpu_channel_free_priv_cmd_entry(ch, e); } -int gk20a_channel_add_job(struct nvgpu_channel *c, +int nvgpu_channel_add_job(struct nvgpu_channel *c, struct nvgpu_channel_job *job, bool skip_buffer_refcounting) { struct vm_gk20a *vm = c->vm; struct nvgpu_mapped_buf **mapped_buffers = NULL; int err = 0, num_mapped_buffers = 0; - bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c); + bool pre_alloc_enabled = nvgpu_channel_is_prealloc_enabled(c); if (!skip_buffer_refcounting) { err = nvgpu_vm_get_buffers(vm, &mapped_buffers, @@ -1959,19 +1926,19 @@ int gk20a_channel_add_job(struct nvgpu_channel *c, #endif if (!pre_alloc_enabled) { - channel_gk20a_joblist_lock(c); + nvgpu_channel_joblist_lock(c); } /* * ensure all pending write complete before adding to the list. * see corresponding nvgpu_smp_rmb in - * gk20a_channel_clean_up_jobs() + * nvgpu_channel_clean_up_jobs() */ nvgpu_smp_wmb(); channel_gk20a_joblist_add(c, job); if (!pre_alloc_enabled) { - channel_gk20a_joblist_unlock(c); + nvgpu_channel_joblist_unlock(c); } } else { err = -ETIMEDOUT; @@ -1996,7 +1963,7 @@ err_put_buffers: * per-job memory for completed jobs; in case of preallocated resources, this * opens up slots for new jobs to be submitted. */ -void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c, +void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c, bool clean_all) { struct vm_gk20a *vm; @@ -2036,24 +2003,24 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c, while (true) { bool completed; - channel_gk20a_joblist_lock(c); - if (channel_gk20a_joblist_is_empty(c)) { + nvgpu_channel_joblist_lock(c); + if (nvgpu_channel_joblist_is_empty(c)) { /* * No jobs in flight, timeout will remain stopped until * new jobs are submitted. */ - channel_gk20a_joblist_unlock(c); + nvgpu_channel_joblist_unlock(c); break; } /* * ensure that all subsequent reads occur after checking * that we have a valid node. see corresponding nvgpu_smp_wmb in - * gk20a_channel_add_job(). + * nvgpu_channel_add_job(). */ nvgpu_smp_rmb(); job = channel_gk20a_joblist_peek(c); - channel_gk20a_joblist_unlock(c); + nvgpu_channel_joblist_unlock(c); completed = nvgpu_fence_is_expired(job->post_fence); if (!completed) { @@ -2099,12 +2066,12 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c, /* * Remove job from channel's job list before we close the - * fences, to prevent other callers (gk20a_channel_abort) from + * fences, to prevent other callers (nvgpu_channel_abort) from * trying to dereference post_fence when it no longer exists. */ - channel_gk20a_joblist_lock(c); + nvgpu_channel_joblist_lock(c); channel_gk20a_joblist_delete(c, job); - channel_gk20a_joblist_unlock(c); + nvgpu_channel_joblist_unlock(c); /* Close the fence (this will unref the semaphore and release * it to the pool). */ @@ -2127,11 +2094,11 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c, /* * ensure all pending writes complete before freeing up the job. - * see corresponding nvgpu_smp_rmb in channel_gk20a_alloc_job(). + * see corresponding nvgpu_smp_rmb in nvgpu_gk20a_alloc_job(). */ nvgpu_smp_wmb(); - channel_gk20a_free_job(c, job); + nvgpu_channel_free_job(c, job); job_finished = true; /* @@ -2167,13 +2134,13 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c, * safe to call even if there is nothing to clean up. Any visible actions on * jobs just before calling this are guaranteed to be processed. */ -void gk20a_channel_update(struct nvgpu_channel *c) +void nvgpu_channel_update(struct nvgpu_channel *c) { if (!c->g->power_on) { /* shutdown case */ return; } - trace_gk20a_channel_update(c->chid); + trace_nvgpu_channel_update(c->chid); /* A queued channel is always checked for job cleanup. */ gk20a_channel_worker_enqueue(c); } @@ -2299,7 +2266,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g) nvgpu_mutex_destroy(&f->free_chs_mutex); } -int gk20a_init_channel_support(struct gk20a *g, u32 chid) +int nvgpu_channel_init_support(struct gk20a *g, u32 chid) { struct nvgpu_channel *c = g->fifo.channel+chid; int err; @@ -2407,7 +2374,7 @@ int nvgpu_channel_setup_sw(struct gk20a *g) nvgpu_init_list_node(&f->free_chs); for (chid = 0; chid < f->num_channels; chid++) { - err = gk20a_init_channel_support(g, chid); + err = nvgpu_channel_init_support(g, chid); if (err != 0) { nvgpu_err(g, "channel init failed, chid=%u", chid); goto clean_up; @@ -2446,7 +2413,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g) if (ch == NULL) { continue; } - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { nvgpu_log_info(g, "do not suspend recovered " "channel %d", chid); } else { @@ -2479,7 +2446,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g) nvgpu_channel_from_id(g, chid); if (ch != NULL) { - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { nvgpu_log_info(g, "do not unbind " "recovered channel %d", chid); @@ -2510,7 +2477,7 @@ void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g) if (ch == NULL) { continue; } - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { nvgpu_log_info(g, "do not resume recovered " "channel %d", chid); } else { @@ -2573,7 +2540,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events) * semaphore. */ if (!c->deterministic) { - gk20a_channel_update(c); + nvgpu_channel_update(c); } } nvgpu_channel_put(c); diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index 1e6f541ae..a243e933d 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -96,7 +96,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g, nvgpu_rwsem_down_read(&tsg->ch_list_lock); /* add runnable channels bound to this TSG */ nvgpu_list_for_each_entry(ch, &tsg->ch_list, - channel_gk20a, ch_entry) { + nvgpu_channel, ch_entry) { if (!test_bit((int)ch->chid, runlist->active_channels)) { continue; diff --git a/drivers/gpu/nvgpu/common/fifo/submit.c b/drivers/gpu/nvgpu/common/fifo/submit.c index fe33ece16..e0f1aa925 100644 --- a/drivers/gpu/nvgpu/common/fifo/submit.c +++ b/drivers/gpu/nvgpu/common/fifo/submit.c @@ -51,7 +51,7 @@ static int nvgpu_submit_prepare_syncs(struct nvgpu_channel *c, int wait_fence_fd = -1; int err = 0; bool need_wfi = (flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI) == 0U; - bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c); + bool pre_alloc_enabled = nvgpu_channel_is_prealloc_enabled(c); struct nvgpu_channel_sync_syncpt *sync_syncpt = NULL; bool flag_fence_get = (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) != 0U; bool flag_sync_fence = (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) != 0U; @@ -349,7 +349,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, return -ENODEV; } - if (gk20a_channel_check_unserviceable(c)) { + if (nvgpu_channel_check_unserviceable(c)) { return -ETIMEDOUT; } @@ -375,7 +375,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, } /* an address space needs to have been bound at this point. */ - if (!gk20a_channel_as_bound(c)) { + if (!nvgpu_channel_as_bound(c)) { nvgpu_err(g, "not bound to an address space at time of gpfifo" " submission."); @@ -418,7 +418,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, * job tracking is required, the channel must have * pre-allocated resources. Otherwise, we fail the submit here */ - if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) { + if (c->deterministic && !nvgpu_channel_is_prealloc_enabled(c)) { return -EINVAL; } @@ -460,7 +460,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, /* * Get a power ref unless this is a deterministic * channel that holds them during the channel lifetime. - * This one is released by gk20a_channel_clean_up_jobs, + * This one is released by nvgpu_channel_clean_up_jobs, * via syncpt or sema interrupt, whichever is used. */ err = gk20a_busy(g); @@ -474,7 +474,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, if (!need_deferred_cleanup) { /* clean up a single job */ - gk20a_channel_clean_up_jobs(c, false); + nvgpu_channel_clean_up_jobs(c, false); } } @@ -519,13 +519,13 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, } } - if (gk20a_channel_check_unserviceable(c)) { + if (nvgpu_channel_check_unserviceable(c)) { err = -ETIMEDOUT; goto clean_up; } if (need_job_tracking) { - err = channel_gk20a_alloc_job(c, &job); + err = nvgpu_gk20a_alloc_job(c, &job); if (err != 0) { goto clean_up; } @@ -565,7 +565,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, } if (need_job_tracking) { - err = gk20a_channel_add_job(c, job, skip_buffer_refcounting); + err = nvgpu_channel_add_job(c, job, skip_buffer_refcounting); if (err != 0) { goto clean_up_job; } @@ -595,7 +595,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c, return err; clean_up_job: - channel_gk20a_free_job(c, job); + nvgpu_channel_free_job(c, job); clean_up: nvgpu_log_fn(g, "fail"); nvgpu_fence_put(post_fence); diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index c26348248..f3329a5b9 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -40,7 +40,7 @@ void nvgpu_tsg_disable(struct nvgpu_tsg *tsg) struct nvgpu_channel *ch; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { g->ops.channel.disable(ch); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); @@ -147,7 +147,7 @@ int nvgpu_tsg_unbind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch) nvgpu_tsg_abort(g, tsg, true); /* If channel unbind fails, channel is still part of runlist */ - if (channel_gk20a_update_runlist(ch, false) != 0) { + if (nvgpu_channel_update_runlist(ch, false) != 0) { nvgpu_err(g, "remove ch %u from runlist failed", ch->chid); } @@ -176,7 +176,7 @@ int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg, /* If one channel in TSG times out, we disable all channels */ nvgpu_rwsem_down_write(&tsg->ch_list_lock); - tsg_timedout = gk20a_channel_check_unserviceable(ch); + tsg_timedout = nvgpu_channel_check_unserviceable(ch); nvgpu_rwsem_up_write(&tsg->ch_list_lock); /* Disable TSG and examine status before unbinding channel */ @@ -197,7 +197,7 @@ int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg, } /* Channel should be seen as TSG channel while updating runlist */ - err = channel_gk20a_update_runlist(ch, false); + err = nvgpu_channel_update_runlist(ch, false); if (err != 0) { nvgpu_err(g, "update runlist failed ch:%u tsg:%u", ch->chid, tsg->tsgid); @@ -275,7 +275,7 @@ void nvgpu_tsg_unbind_channel_check_ctx_reload(struct nvgpu_tsg *tsg, if (hw_state->ctx_reload) { nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, - channel_gk20a, ch_entry) { + nvgpu_channel, ch_entry) { if (temp_ch->chid != ch->chid) { g->ops.channel.force_ctx_reload(temp_ch); break; @@ -397,7 +397,7 @@ bool nvgpu_tsg_mark_error(struct gk20a *g, bool verbose = false; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { if (nvgpu_channel_mark_error(g, ch)) { verbose = true; @@ -416,7 +416,7 @@ void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct nvgpu_tsg *tsg, u32 ms) struct nvgpu_channel *ch = NULL; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { ch->ctxsw_timeout_accumulated_ms = ms; nvgpu_channel_put(ch); @@ -431,7 +431,7 @@ bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct nvgpu_tsg *tsg) bool verbose = false; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { if (ch->ctxsw_timeout_debug_dump) { verbose = true; @@ -450,7 +450,7 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct nvgpu_tsg *tsg, struct nvgpu_channel *ch = NULL; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { nvgpu_channel_set_error_notifier(g, ch, error_notifier); nvgpu_channel_put(ch); @@ -484,7 +484,7 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg, * fifo recovery is needed if at least one channel reached the * maximum timeout without progress (update in gpfifo pointers). */ - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { recover = nvgpu_channel_update_and_check_ctxsw_timeout(ch, *ms, &progress); @@ -859,9 +859,9 @@ void nvgpu_tsg_abort(struct gk20a *g, struct nvgpu_tsg *tsg, bool preempt) } nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch) != NULL) { - gk20a_channel_set_unserviceable(ch); + nvgpu_channel_set_unserviceable(ch); if (g->ops.channel.abort_clean_up != NULL) { g->ops.channel.abort_clean_up(ch); } @@ -887,7 +887,7 @@ void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct nvgpu_tsg *tsg, nvgpu_log(g, gpu_dbg_info, "reset faulted eng and pbdma bits in ccsr"); nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { g->ops.channel.reset_faulted(g, ch, eng, pbdma); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); diff --git a/drivers/gpu/nvgpu/common/fifo/userd.c b/drivers/gpu/nvgpu/common/fifo/userd.c index 8d7cae6e9..11ed1d96d 100644 --- a/drivers/gpu/nvgpu/common/fifo/userd.c +++ b/drivers/gpu/nvgpu/common/fifo/userd.c @@ -110,13 +110,13 @@ int nvgpu_userd_init_channel(struct gk20a *g, struct nvgpu_channel *c) c->userd_mem = mem; c->userd_offset = (c->chid % f->num_channels_per_slab) * f->userd_entry_size; - c->userd_iova = gk20a_channel_userd_addr(c); + c->userd_iova = nvgpu_channel_userd_addr(c); nvgpu_log(g, gpu_dbg_info, "chid=%u slab=%u mem=%p offset=%u addr=%llx gpu_va=%llx", c->chid, slab, mem, c->userd_offset, - gk20a_channel_userd_addr(c), - gk20a_channel_userd_gpu_va(c)); + nvgpu_channel_userd_addr(c), + nvgpu_channel_userd_gpu_va(c)); done: nvgpu_mutex_release(&f->userd_mutex); diff --git a/drivers/gpu/nvgpu/common/gr/gr_setup.c b/drivers/gpu/nvgpu/common/gr/gr_setup.c index 3ba232ce4..3bf89a64e 100644 --- a/drivers/gpu/nvgpu/common/gr/gr_setup.c +++ b/drivers/gpu/nvgpu/common/gr/gr_setup.c @@ -106,7 +106,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, nvgpu_log_fn(g, " "); /* an address space needs to have been bound at this point.*/ - if (!gk20a_channel_as_bound(c) && (c->vm == NULL)) { + if (!nvgpu_channel_as_bound(c) && (c->vm == NULL)) { nvgpu_err(g, "not bound to address space at time" " of grctx allocation"); diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 19d89e15e..0edecb8df 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -70,7 +70,7 @@ int nvgpu_vm_bind_channel(struct vm_gk20a *vm, struct nvgpu_channel *ch) nvgpu_vm_get(vm); ch->vm = vm; - err = channel_gk20a_commit_va(ch); + err = nvgpu_channel_commit_va(ch); if (err != 0) { ch->vm = NULL; } diff --git a/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c b/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c index 80ffa13e3..710c06826 100644 --- a/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c +++ b/drivers/gpu/nvgpu/common/sync/channel_sync_semaphore.c @@ -154,7 +154,7 @@ static int channel_sync_semaphore_wait_fd( } wait_cmd_size = c->g->ops.sync.sema.get_wait_cmd_size(); - err = gk20a_channel_alloc_priv_cmdbuf(c, + err = nvgpu_channel_alloc_priv_cmdbuf(c, wait_cmd_size * num_fences, entry); if (err != 0) { nvgpu_err(c->g, "not enough priv cmd buffer space"); @@ -195,7 +195,7 @@ static int channel_sync_semaphore_incr_common( } incr_cmd_size = c->g->ops.sync.sema.get_incr_cmd_size(); - err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd); + err = nvgpu_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd); if (err != 0) { nvgpu_err(c->g, "not enough priv cmd buffer space"); diff --git a/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c b/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c index 4e0e872e8..68280cd0b 100644 --- a/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c +++ b/drivers/gpu/nvgpu/common/sync/channel_sync_syncpt.c @@ -70,7 +70,7 @@ static int channel_sync_syncpt_gen_wait_cmd(struct nvgpu_channel *c, } } else { if (!preallocated) { - err = gk20a_channel_alloc_priv_cmdbuf(c, + err = nvgpu_channel_alloc_priv_cmdbuf(c, c->g->ops.sync.syncpt.get_wait_cmd_size(), wait_cmd); if (err != 0) { @@ -150,7 +150,7 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd, } wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size(); - err = gk20a_channel_alloc_priv_cmdbuf(c, + err = nvgpu_channel_alloc_priv_cmdbuf(c, wait_cmd_size * num_fences, wait_cmd); if (err != 0) { nvgpu_err(c->g, "not enough priv cmd buffer space"); @@ -174,7 +174,7 @@ static void channel_sync_syncpt_update(void *priv, int nr_completed) { struct nvgpu_channel *ch = priv; - gk20a_channel_update(ch); + nvgpu_channel_update(ch); /* note: channel_get() is in channel_sync_syncpt_incr_common() */ nvgpu_channel_put(ch); @@ -194,7 +194,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s, struct nvgpu_channel *c = sp->c; struct nvgpu_os_fence os_fence = {0}; - err = gk20a_channel_alloc_priv_cmdbuf(c, + err = nvgpu_channel_alloc_priv_cmdbuf(c, c->g->ops.sync.syncpt.get_incr_cmd_size(wfi_cmd), incr_cmd); if (err != 0) { diff --git a/drivers/gpu/nvgpu/common/vgpu/fifo/fifo_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/fifo/fifo_vgpu.c index 91d94ec5c..47fc7a5d3 100644 --- a/drivers/gpu/nvgpu/common/vgpu/fifo/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/fifo/fifo_vgpu.c @@ -374,11 +374,11 @@ int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch, nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, - channel_gk20a, ch_entry) { + nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch_tsg)) { nvgpu_channel_set_error_notifier(g, ch_tsg, err_code); - gk20a_channel_set_unserviceable(ch_tsg); + nvgpu_channel_set_unserviceable(ch_tsg); nvgpu_channel_put(ch_tsg); } } @@ -394,7 +394,7 @@ int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); if (!err) { - gk20a_channel_abort(ch, false); + nvgpu_channel_abort(ch, false); } return err ? err : msg.ret; } @@ -412,7 +412,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g, NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); /* mark channel as faulted */ - gk20a_channel_set_unserviceable(ch); + nvgpu_channel_set_unserviceable(ch); /* unblock pending waits */ nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq); @@ -430,7 +430,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g, nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list, - channel_gk20a, ch_entry) { + nvgpu_channel, ch_entry) { if (nvgpu_channel_get(ch_tsg)) { vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg); nvgpu_channel_put(ch_tsg); @@ -468,7 +468,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) break; case TEGRA_VGPU_FIFO_INTR_MMU_FAULT: vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch); - gk20a_channel_abort(ch, false); + nvgpu_channel_abort(ch, false); break; default: WARN_ON(1); @@ -528,7 +528,7 @@ void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid) return; } - gk20a_channel_set_unserviceable(ch); + nvgpu_channel_set_unserviceable(ch); g->ops.channel.abort_clean_up(ch); nvgpu_channel_put(ch); } diff --git a/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c index 8987714fa..2f6bb9f1a 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c @@ -224,7 +224,7 @@ int vgpu_gr_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, u32 flags) nvgpu_log_fn(g, " "); /* an address space needs to have been bound at this point.*/ - if (!gk20a_channel_as_bound(c)) { + if (!nvgpu_channel_as_bound(c)) { nvgpu_err(g, "not bound to address space at time" " of grctx allocation"); return -EINVAL; diff --git a/drivers/gpu/nvgpu/common/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/tsg_vgpu.c index c7666b0d9..18fe54a89 100644 --- a/drivers/gpu/nvgpu/common/vgpu/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/tsg_vgpu.c @@ -82,7 +82,7 @@ void vgpu_tsg_enable(struct nvgpu_tsg *tsg) struct nvgpu_channel *ch; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { g->ops.channel.enable(ch); } nvgpu_rwsem_up_read(&tsg->ch_list_lock); diff --git a/drivers/gpu/nvgpu/common/vgpu/vgpu.c b/drivers/gpu/nvgpu/common/vgpu/vgpu.c index f5b55e9b5..d1fb3eb8c 100644 --- a/drivers/gpu/nvgpu/common/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/vgpu.c @@ -132,7 +132,7 @@ static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid) return; } - gk20a_channel_set_unserviceable(ch); + nvgpu_channel_set_unserviceable(ch); g->ops.fifo.ch_abort_clean_up(ch); nvgpu_channel_put(ch); } diff --git a/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c index cc9a514ff..99d84c0e1 100644 --- a/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/tsg_gk20a.c @@ -40,7 +40,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg) * and then rest of the channels should be enabled */ nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { struct nvgpu_channel_hw_state hw_state; g->ops.channel.read_state(g, ch, &hw_state); @@ -50,7 +50,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg) } } - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { struct nvgpu_channel_hw_state hw_state; g->ops.channel.read_state(g, ch, &hw_state); diff --git a/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b.c b/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b.c index ec0ff8efb..bdb892e37 100644 --- a/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b.c +++ b/drivers/gpu/nvgpu/hal/fifo/tsg_gv11b.c @@ -42,7 +42,7 @@ void gv11b_tsg_enable(struct nvgpu_tsg *tsg) struct nvgpu_channel *last_ch = NULL; nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { g->ops.channel.enable(ch); last_ch = ch; } diff --git a/drivers/gpu/nvgpu/hal/fifo/userd_gk20a.c b/drivers/gpu/nvgpu/hal/fifo/userd_gk20a.c index 7e6282d96..ea2ab1eed 100644 --- a/drivers/gpu/nvgpu/hal/fifo/userd_gk20a.c +++ b/drivers/gpu/nvgpu/hal/fifo/userd_gk20a.c @@ -52,7 +52,7 @@ void gk20a_userd_init_mem(struct gk20a *g, struct nvgpu_channel *c) u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c) { - u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c); + u64 userd_gpu_va = nvgpu_channel_userd_gpu_va(c); u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w(); BUG_ON(u64_hi32(addr) != 0U); @@ -62,7 +62,7 @@ u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c) u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c) { - u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c); + u64 userd_gpu_va = nvgpu_channel_userd_gpu_va(c); u64 lo_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_w(); u64 hi_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_hi_w(); u32 lo, hi; @@ -76,7 +76,7 @@ u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c) void gk20a_userd_gp_put(struct gk20a *g, struct nvgpu_channel *c) { - u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c); + u64 userd_gpu_va = nvgpu_channel_userd_gpu_va(c); u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w(); BUG_ON(u64_hi32(addr) != 0U); diff --git a/drivers/gpu/nvgpu/hal/gr/gr/gr_gk20a.c b/drivers/gpu/nvgpu/hal/gr/gr/gr_gk20a.c index 6c80f403a..640e0cda4 100644 --- a/drivers/gpu/nvgpu/hal/gr/gr/gr_gk20a.c +++ b/drivers/gpu/nvgpu/hal/gr/gr/gr_gk20a.c @@ -168,7 +168,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) { err = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false); if (err != 0) { nvgpu_err(g, "chid: %d set_hwpm_mode failed", diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h index db5722988..f7c5e54b4 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/channel.h +++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h @@ -247,7 +247,7 @@ struct nvgpu_channel_wdt { * These are zeroed when a channel is closed, so a new one starts fresh. */ -enum channel_gk20a_ref_action_type { +enum nvgpu_channel_ref_action_type { channel_gk20a_ref_action_get, channel_gk20a_ref_action_put }; @@ -257,7 +257,7 @@ enum channel_gk20a_ref_action_type { #include struct nvgpu_channel_ref_action { - enum channel_gk20a_ref_action_type type; + enum nvgpu_channel_ref_action_type type; s64 timestamp_ms; /* * Many of these traces will be similar. Simpler to just capture @@ -383,39 +383,38 @@ struct nvgpu_channel { bool is_privileged_channel; }; - static inline struct nvgpu_channel * -channel_gk20a_from_free_chs(struct nvgpu_list_node *node) +nvgpu_channel_from_free_chs(struct nvgpu_list_node *node) { - return (struct nvgpu_channel *) - ((uintptr_t)node - offsetof(struct nvgpu_channel, free_chs)); + return (struct nvgpu_channel *) + ((uintptr_t)node - offsetof(struct nvgpu_channel, free_chs)); }; static inline struct nvgpu_channel * -channel_gk20a_from_ch_entry(struct nvgpu_list_node *node) +nvgpu_channel_from_ch_entry(struct nvgpu_list_node *node) { - return (struct nvgpu_channel *) - ((uintptr_t)node - offsetof(struct nvgpu_channel, ch_entry)); + return (struct nvgpu_channel *) + ((uintptr_t)node - offsetof(struct nvgpu_channel, ch_entry)); }; static inline struct nvgpu_channel * -channel_gk20a_from_worker_item(struct nvgpu_list_node *node) +nvgpu_channel_from_worker_item(struct nvgpu_list_node *node) { return (struct nvgpu_channel *) ((uintptr_t)node - offsetof(struct nvgpu_channel, worker_item)); }; -static inline bool gk20a_channel_as_bound(struct nvgpu_channel *ch) +static inline bool nvgpu_channel_as_bound(struct nvgpu_channel *ch) { return (ch->vm != NULL); } -int channel_gk20a_commit_va(struct nvgpu_channel *c); -int gk20a_init_channel_support(struct gk20a *g, u32 chid); +int nvgpu_channel_commit_va(struct nvgpu_channel *c); +int nvgpu_channel_init_support(struct gk20a *g, u32 chid); int nvgpu_channel_setup_sw(struct gk20a *g); void nvgpu_channel_cleanup_sw(struct gk20a *g); /* must be inside gk20a_busy()..gk20a_idle() */ -void gk20a_channel_close(struct nvgpu_channel *ch); +void nvgpu_channel_close(struct nvgpu_channel *ch); void nvgpu_channel_kill(struct nvgpu_channel *ch); void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g, @@ -428,10 +427,10 @@ bool nvgpu_channel_update_and_check_ctxsw_timeout(struct nvgpu_channel *ch, void nvgpu_channel_recover(struct gk20a *g, struct nvgpu_channel *ch, bool verbose, u32 rc_type); -void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt); +void nvgpu_channel_abort(struct nvgpu_channel *ch, bool channel_preempt); void nvgpu_channel_abort_clean_up(struct nvgpu_channel *ch); void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events); -int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size, +int nvgpu_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size, struct priv_cmd_entry *e); void nvgpu_channel_update_priv_cmd_q_and_free_entry( struct nvgpu_channel *ch, struct priv_cmd_entry *e); @@ -448,8 +447,8 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g); int nvgpu_channel_worker_init(struct gk20a *g); void nvgpu_channel_worker_deinit(struct gk20a *g); -struct nvgpu_channel *gk20a_get_channel_from_file(int fd); -void gk20a_channel_update(struct nvgpu_channel *c); +struct nvgpu_channel *nvgpu_channel_get_from_file(int fd); +void nvgpu_channel_update(struct nvgpu_channel *c); /* returns ch if reference was obtained */ struct nvgpu_channel *__must_check nvgpu_channel_get__func( @@ -466,8 +465,6 @@ struct nvgpu_channel *__must_check nvgpu_channel_from_id__func( #define nvgpu_channel_from_id(g, chid) \ nvgpu_channel_from_id__func(g, chid, __func__) -int gk20a_wait_channel_idle(struct nvgpu_channel *ch); - /* runlist_id -1 is synonym for NVGPU_ENGINE_GR runlist id */ struct nvgpu_channel *gk20a_open_new_channel(struct gk20a *g, u32 runlist_id, @@ -479,32 +476,32 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c, void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g); -bool channel_gk20a_is_prealloc_enabled(struct nvgpu_channel *c); -void channel_gk20a_joblist_lock(struct nvgpu_channel *c); -void channel_gk20a_joblist_unlock(struct nvgpu_channel *c); -bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c); +bool nvgpu_channel_is_prealloc_enabled(struct nvgpu_channel *c); +void nvgpu_channel_joblist_lock(struct nvgpu_channel *c); +void nvgpu_channel_joblist_unlock(struct nvgpu_channel *c); +bool nvgpu_channel_joblist_is_empty(struct nvgpu_channel *c); -int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add); +int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add); void gk20a_wait_until_counter_is_N( struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value, struct nvgpu_cond *c, const char *caller, const char *counter_name); -int channel_gk20a_alloc_job(struct nvgpu_channel *c, +int nvgpu_gk20a_alloc_job(struct nvgpu_channel *c, struct nvgpu_channel_job **job_out); -void channel_gk20a_free_job(struct nvgpu_channel *c, +void nvgpu_channel_free_job(struct nvgpu_channel *c, struct nvgpu_channel_job *job); u32 nvgpu_channel_update_gpfifo_get_and_get_free_count( struct nvgpu_channel *ch); u32 nvgpu_channel_get_gpfifo_free_count(struct nvgpu_channel *ch); -int gk20a_channel_add_job(struct nvgpu_channel *c, +int nvgpu_channel_add_job(struct nvgpu_channel *c, struct nvgpu_channel_job *job, bool skip_buffer_refcounting); void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c, struct priv_cmd_entry *e); -void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c, +void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c, bool clean_all); -void gk20a_channel_free_usermode_buffers(struct nvgpu_channel *c); +void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c); u32 nvgpu_get_gpfifo_entry_size(void); int nvgpu_submit_channel_gpfifo_user(struct nvgpu_channel *c, @@ -530,15 +527,15 @@ static inline void trace_write_pushbuffers(struct nvgpu_channel *c, u32 count) } #endif -void gk20a_channel_set_unserviceable(struct nvgpu_channel *ch); -bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch); +void nvgpu_channel_set_unserviceable(struct nvgpu_channel *ch); +bool nvgpu_channel_check_unserviceable(struct nvgpu_channel *ch); -static inline u64 gk20a_channel_userd_addr(struct nvgpu_channel *c) +static inline u64 nvgpu_channel_userd_addr(struct nvgpu_channel *c) { return nvgpu_mem_get_addr(c->g, c->userd_mem) + c->userd_offset; } -static inline u64 gk20a_channel_userd_gpu_va(struct nvgpu_channel *c) +static inline u64 nvgpu_channel_userd_gpu_va(struct nvgpu_channel *c) { struct nvgpu_mem *mem = c->userd_mem; return (mem->gpu_va != 0ULL) ? mem->gpu_va + c->userd_offset : 0ULL; diff --git a/drivers/gpu/nvgpu/os/linux/cde.c b/drivers/gpu/nvgpu/os/linux/cde.c index 8588e7544..9768454d9 100644 --- a/drivers/gpu/nvgpu/os/linux/cde.c +++ b/drivers/gpu/nvgpu/os/linux/cde.c @@ -111,9 +111,9 @@ __must_hold(&cde_app->mutex) /* * free the channel - * gk20a_channel_close() will also unbind the channel from TSG + * nvgpu_channel_close() will also unbind the channel from TSG */ - gk20a_channel_close(ch); + nvgpu_channel_close(ch); nvgpu_ref_put(&cde_ctx->tsg->refcount, nvgpu_tsg_release); /* housekeeping on app */ @@ -1258,9 +1258,9 @@ __releases(&cde_app->mutex) struct gk20a_cde_app *cde_app = &l->cde_app; bool channel_idle; - channel_gk20a_joblist_lock(ch); - channel_idle = channel_gk20a_joblist_is_empty(ch); - channel_gk20a_joblist_unlock(ch); + nvgpu_channel_joblist_lock(ch); + channel_idle = nvgpu_channel_joblist_is_empty(ch); + nvgpu_channel_joblist_unlock(ch); if (!channel_idle) return; @@ -1271,7 +1271,7 @@ __releases(&cde_app->mutex) nvgpu_log_info(g, "double finish cde context %p on channel %p", cde_ctx, ch); - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { if (cde_ctx->is_temporary) { nvgpu_warn(g, "cde: channel had timed out" @@ -1298,7 +1298,7 @@ __releases(&cde_app->mutex) msecs_to_jiffies(CTX_DELETE_TIME)); } - if (!gk20a_channel_check_unserviceable(ch)) { + if (!nvgpu_channel_check_unserviceable(ch)) { gk20a_cde_ctx_release(cde_ctx); } } diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_as.c b/drivers/gpu/nvgpu/os/linux/ioctl_as.c index e518a74df..dc91251d4 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_as.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_as.c @@ -55,16 +55,16 @@ static int gk20a_as_ioctl_bind_channel( nvgpu_log_fn(g, " "); - ch = gk20a_get_channel_from_file(args->channel_fd); + ch = nvgpu_channel_get_from_file(args->channel_fd); if (!ch) return -EINVAL; - if (gk20a_channel_as_bound(ch)) { + if (nvgpu_channel_as_bound(ch)) { err = -EINVAL; goto out; } - /* this will set channel_gk20a->vm */ + /* this will set nvgpu_channel->vm */ err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch); out: diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c index ae564c82e..e7fdf3d11 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c @@ -380,7 +380,7 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch, * * NULL is returned if the channel was not found. */ -struct nvgpu_channel *gk20a_get_channel_from_file(int fd) +struct nvgpu_channel *nvgpu_channel_get_from_file(int fd) { struct nvgpu_channel *ch; struct channel_priv *priv; @@ -425,7 +425,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp) trace_gk20a_channel_release(dev_name(dev_from_gk20a(g))); - gk20a_channel_close(ch); + nvgpu_channel_close(ch); gk20a_channel_free_error_notifiers(ch); gk20a_idle(g); @@ -636,7 +636,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch, int ret = 0; /* do not wait if channel has timed out */ - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { return -ETIMEDOUT; } @@ -658,7 +658,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch, ret = NVGPU_COND_WAIT_INTERRUPTIBLE( &ch->semaphore_wq, *semaphore == payload || - gk20a_channel_check_unserviceable(ch), + nvgpu_channel_check_unserviceable(ch), timeout); dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data); @@ -682,7 +682,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch, nvgpu_log_fn(g, " "); - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { return -ETIMEDOUT; } @@ -720,7 +720,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch, remain = NVGPU_COND_WAIT_INTERRUPTIBLE( &ch->notifier_wq, notif->status == 0 || - gk20a_channel_check_unserviceable(ch), + nvgpu_channel_check_unserviceable(ch), args->timeout); if (remain == 0 && notif->status != 0) { @@ -789,7 +789,7 @@ static int gk20a_ioctl_channel_submit_gpfifo( profile = nvgpu_profile_acquire(ch->g); nvgpu_profile_snapshot(profile, PROFILE_IOCTL_ENTRY); - if (gk20a_channel_check_unserviceable(ch)) { + if (nvgpu_channel_check_unserviceable(ch)) { return -ETIMEDOUT; } @@ -1278,7 +1278,7 @@ long gk20a_channel_ioctl(struct file *filp, } case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT: ((struct nvgpu_get_param_args *)buf)->value = - gk20a_channel_check_unserviceable(ch); + nvgpu_channel_check_unserviceable(ch); break; case NVGPU_IOCTL_CHANNEL_ENABLE: err = gk20a_busy(ch->g); diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c index 2541f804a..3ed945bf5 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_ctrl.c @@ -670,7 +670,7 @@ static int nvgpu_gpu_ioctl_set_debug_mode( struct nvgpu_channel *ch; int err; - ch = gk20a_get_channel_from_file(args->channel_fd); + ch = nvgpu_channel_get_from_file(args->channel_fd); if (!ch) return -EINVAL; @@ -1633,7 +1633,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g, break; } - ch = gk20a_get_channel_from_file(ch_fd); + ch = nvgpu_channel_get_from_file(ch_fd); if (!ch) { err = -EINVAL; break; diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c index af87a640c..648a9f549 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c @@ -518,7 +518,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, g->name, args->channel_fd); /* - * Although gk20a_get_channel_from_file gives us a channel ref, need to + * Although nvgpu_channel_get_from_file gives us a channel ref, need to * hold a ref to the file during the session lifetime. See comment in * struct dbg_session_channel_data. */ @@ -526,7 +526,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, if (!f) return -ENODEV; - ch = gk20a_get_channel_from_file(args->channel_fd); + ch = nvgpu_channel_get_from_file(args->channel_fd); if (!ch) { nvgpu_log_fn(g, "no channel found for fd"); err = -EINVAL; @@ -1835,7 +1835,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", g->name, args->channel_fd); - ch = gk20a_get_channel_from_file(args->channel_fd); + ch = nvgpu_channel_get_from_file(args->channel_fd); if (!ch) { nvgpu_log_fn(g, "no channel found for fd"); return -EINVAL; diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c index 2733c5b5b..a73688982 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c @@ -48,7 +48,7 @@ static int nvgpu_tsg_bind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd) struct nvgpu_channel *ch; int err; - ch = gk20a_get_channel_from_file(ch_fd); + ch = nvgpu_channel_get_from_file(ch_fd); if (!ch) return -EINVAL; @@ -80,7 +80,7 @@ static int gk20a_tsg_ioctl_bind_channel_ex(struct gk20a *g, goto mutex_release; } - ch = gk20a_get_channel_from_file(arg->channel_fd); + ch = nvgpu_channel_get_from_file(arg->channel_fd); if (!ch) { err = -EINVAL; goto idle; @@ -130,7 +130,7 @@ static int nvgpu_tsg_unbind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd) struct nvgpu_channel *ch; int err = 0; - ch = gk20a_get_channel_from_file(ch_fd); + ch = nvgpu_channel_get_from_file(ch_fd); if (!ch) { return -EINVAL; } @@ -146,7 +146,7 @@ static int nvgpu_tsg_unbind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd) * Mark the channel unserviceable since channel unbound from TSG * has no context of its own so it can't serve any job */ - gk20a_channel_set_unserviceable(ch); + nvgpu_channel_set_unserviceable(ch); out: nvgpu_channel_put(ch); diff --git a/drivers/gpu/nvgpu/os/linux/linux-channel.c b/drivers/gpu/nvgpu/os/linux/linux-channel.c index 1b5215630..9870dd9b4 100644 --- a/drivers/gpu/nvgpu/os/linux/linux-channel.c +++ b/drivers/gpu/nvgpu/os/linux/linux-channel.c @@ -440,7 +440,7 @@ put_dmabuf: return err; } -void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c) +void nvgpu_os_channel_free_usermode_buffers(struct nvgpu_channel *c) { struct nvgpu_channel_linux *priv = c->os_priv; struct gk20a *g = c->g; @@ -564,7 +564,7 @@ int nvgpu_channel_init_support_linux(struct nvgpu_os_linux *l) nvgpu_channel_alloc_usermode_buffers; g->os_channel.free_usermode_buffers = - nvgpu_channel_free_usermode_buffers; + nvgpu_os_channel_free_usermode_buffers; return 0; diff --git a/include/trace/events/gk20a.h b/include/trace/events/gk20a.h index 7a81c1dfd..48dffed3e 100644 --- a/include/trace/events/gk20a.h +++ b/include/trace/events/gk20a.h @@ -129,7 +129,7 @@ DECLARE_EVENT_CLASS(gk20a_channel, TP_fast_assign(__entry->channel = channel;), TP_printk("ch id %d", __entry->channel) ); -DEFINE_EVENT(gk20a_channel, gk20a_channel_update, +DEFINE_EVENT(gk20a_channel, nvgpu_channel_update, TP_PROTO(int channel), TP_ARGS(channel) ); @@ -633,7 +633,7 @@ DEFINE_EVENT(gk20a_cde, gk20a_cde_finished_ctx_cb, #define trace_nvgpu_channel_get(arg...) ((void)(NULL)) #define trace_nvgpu_channel_put(arg...) ((void)(NULL)) #define trace_gk20a_open_new_channel(arg...) ((void)(NULL)) -#define trace_gk20a_channel_update(arg...) ((void)(NULL)) +#define trace_nvgpu_channel_update(arg...) ((void)(NULL)) #define trace_gk20a_channel_reset(arg...) ((void)(NULL)) #define trace_gk20a_mm_fb_flush(arg...) ((void)(NULL))