mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: rename gk20a_channel_* APIs
Renamed gk20a_channel_* APIs to nvgpu_channel_* APIs. Removed unused channel API int gk20a_wait_channel_idle Renamed nvgpu_channel_free_usermode_buffers in os/linux-channel.c to nvgpu_os_channel_free_usermode_buffers to avoid conflicts with the API with the same name in channel unit. Jira NVGPU-3248 Change-Id: I21379bd79e64da7e987ddaf5d19ff3804348acca Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2121902 Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1bf55ec715
commit
f39a5c4ead
@@ -203,9 +203,9 @@ static void nvgpu_ce_delete_gpu_context_locked(struct nvgpu_ce_gpu_ctx *ce_ctx)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* free the channel
|
* free the channel
|
||||||
* gk20a_channel_close() will also unbind the channel from TSG
|
* nvgpu_channel_close() will also unbind the channel from TSG
|
||||||
*/
|
*/
|
||||||
gk20a_channel_close(ce_ctx->ch);
|
nvgpu_channel_close(ce_ctx->ch);
|
||||||
nvgpu_ref_put(&ce_ctx->tsg->refcount, nvgpu_tsg_release);
|
nvgpu_ref_put(&ce_ctx->tsg->refcount, nvgpu_tsg_release);
|
||||||
|
|
||||||
/* housekeeping on app */
|
/* housekeeping on app */
|
||||||
|
|||||||
@@ -160,7 +160,7 @@ struct nvgpu_fence_type *nvgpu_fence_alloc(struct nvgpu_channel *ch)
|
|||||||
{
|
{
|
||||||
struct nvgpu_fence_type *fence = NULL;
|
struct nvgpu_fence_type *fence = NULL;
|
||||||
|
|
||||||
if (channel_gk20a_is_prealloc_enabled(ch)) {
|
if (nvgpu_channel_is_prealloc_enabled(ch)) {
|
||||||
if (nvgpu_alloc_initialized(&ch->fence_allocator)) {
|
if (nvgpu_alloc_initialized(&ch->fence_allocator)) {
|
||||||
fence = (struct nvgpu_fence_type *)(uintptr_t)
|
fence = (struct nvgpu_fence_type *)(uintptr_t)
|
||||||
nvgpu_alloc(&ch->fence_allocator,
|
nvgpu_alloc(&ch->fence_allocator,
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ static struct nvgpu_channel *allocate_channel(struct nvgpu_fifo *f)
|
|||||||
|
|
||||||
nvgpu_mutex_acquire(&f->free_chs_mutex);
|
nvgpu_mutex_acquire(&f->free_chs_mutex);
|
||||||
if (!nvgpu_list_empty(&f->free_chs)) {
|
if (!nvgpu_list_empty(&f->free_chs)) {
|
||||||
ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a,
|
ch = nvgpu_list_first_entry(&f->free_chs, nvgpu_channel,
|
||||||
free_chs);
|
free_chs);
|
||||||
nvgpu_list_del(&ch->free_chs);
|
nvgpu_list_del(&ch->free_chs);
|
||||||
WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0);
|
WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0);
|
||||||
@@ -123,7 +123,7 @@ static void free_channel(struct nvgpu_fifo *f,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int channel_gk20a_commit_va(struct nvgpu_channel *c)
|
int nvgpu_channel_commit_va(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
struct gk20a *g = c->g;
|
struct gk20a *g = c->g;
|
||||||
|
|
||||||
@@ -135,7 +135,7 @@ int channel_gk20a_commit_va(struct nvgpu_channel *c)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add)
|
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add)
|
||||||
{
|
{
|
||||||
return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id,
|
return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id,
|
||||||
c, add, true);
|
c, add, true);
|
||||||
@@ -190,17 +190,17 @@ void nvgpu_channel_abort_clean_up(struct nvgpu_channel *ch)
|
|||||||
* When closing the channel, this scheduled update holds one ref which
|
* When closing the channel, this scheduled update holds one ref which
|
||||||
* is waited for before advancing with freeing.
|
* is waited for before advancing with freeing.
|
||||||
*/
|
*/
|
||||||
gk20a_channel_update(ch);
|
nvgpu_channel_update(ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_channel_set_unserviceable(struct nvgpu_channel *ch)
|
void nvgpu_channel_set_unserviceable(struct nvgpu_channel *ch)
|
||||||
{
|
{
|
||||||
nvgpu_spinlock_acquire(&ch->unserviceable_lock);
|
nvgpu_spinlock_acquire(&ch->unserviceable_lock);
|
||||||
ch->unserviceable = true;
|
ch->unserviceable = true;
|
||||||
nvgpu_spinlock_release(&ch->unserviceable_lock);
|
nvgpu_spinlock_release(&ch->unserviceable_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch)
|
bool nvgpu_channel_check_unserviceable(struct nvgpu_channel *ch)
|
||||||
{
|
{
|
||||||
bool unserviceable_status;
|
bool unserviceable_status;
|
||||||
|
|
||||||
@@ -211,7 +211,7 @@ bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch)
|
|||||||
return unserviceable_status;
|
return unserviceable_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt)
|
void nvgpu_channel_abort(struct nvgpu_channel *ch, bool channel_preempt)
|
||||||
{
|
{
|
||||||
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
|
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
|
||||||
|
|
||||||
@@ -224,39 +224,6 @@ void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_wait_channel_idle(struct nvgpu_channel *ch)
|
|
||||||
{
|
|
||||||
bool channel_idle = false;
|
|
||||||
struct nvgpu_timeout timeout;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(ch->g, &timeout, nvgpu_get_poll_timeout(ch->g),
|
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(ch->g, "timeout_init failed: %d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
|
||||||
channel_gk20a_joblist_lock(ch);
|
|
||||||
channel_idle = channel_gk20a_joblist_is_empty(ch);
|
|
||||||
channel_gk20a_joblist_unlock(ch);
|
|
||||||
if (channel_idle) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_usleep_range(1000, 3000);
|
|
||||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
|
||||||
|
|
||||||
if (!channel_idle) {
|
|
||||||
nvgpu_err(ch->g, "jobs not freed for channel %d",
|
|
||||||
ch->chid);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void gk20a_wait_until_counter_is_N(
|
void gk20a_wait_until_counter_is_N(
|
||||||
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
||||||
struct nvgpu_cond *c, const char *caller, const char *counter_name)
|
struct nvgpu_cond *c, const char *caller, const char *counter_name)
|
||||||
@@ -386,7 +353,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
|
|||||||
nvgpu_mutex_release(&g->fifo.engines_reset_mutex);
|
nvgpu_mutex_release(&g->fifo.engines_reset_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gk20a_channel_as_bound(ch)) {
|
if (!nvgpu_channel_as_bound(ch)) {
|
||||||
goto unbind;
|
goto unbind;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -404,7 +371,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ch->usermode_submit_enabled) {
|
if (ch->usermode_submit_enabled) {
|
||||||
gk20a_channel_free_usermode_buffers(ch);
|
nvgpu_channel_free_usermode_buffers(ch);
|
||||||
(void) nvgpu_userd_init_channel(g, ch);
|
(void) nvgpu_userd_init_channel(g, ch);
|
||||||
ch->usermode_submit_enabled = false;
|
ch->usermode_submit_enabled = false;
|
||||||
}
|
}
|
||||||
@@ -428,7 +395,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
|
|||||||
* Set user managed syncpoint to safe state
|
* Set user managed syncpoint to safe state
|
||||||
* But it's already done if channel is recovered
|
* But it's already done if channel is recovered
|
||||||
*/
|
*/
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
nvgpu_channel_sync_destroy(ch->user_sync, false);
|
nvgpu_channel_sync_destroy(ch->user_sync, false);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_channel_sync_destroy(ch->user_sync, true);
|
nvgpu_channel_sync_destroy(ch->user_sync, true);
|
||||||
@@ -500,7 +467,7 @@ unbind:
|
|||||||
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
||||||
|
|
||||||
/* free pre-allocated resources, if applicable */
|
/* free pre-allocated resources, if applicable */
|
||||||
if (channel_gk20a_is_prealloc_enabled(ch)) {
|
if (nvgpu_channel_is_prealloc_enabled(ch)) {
|
||||||
channel_gk20a_free_prealloc_resources(ch);
|
channel_gk20a_free_prealloc_resources(ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -562,7 +529,7 @@ static void gk20a_channel_dump_ref_actions(struct nvgpu_channel *ch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch,
|
static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch,
|
||||||
enum channel_gk20a_ref_action_type type)
|
enum nvgpu_channel_ref_action_type type)
|
||||||
{
|
{
|
||||||
#if GK20A_CHANNEL_REFCOUNT_TRACKING
|
#if GK20A_CHANNEL_REFCOUNT_TRACKING
|
||||||
struct nvgpu_channel_ref_action *act;
|
struct nvgpu_channel_ref_action *act;
|
||||||
@@ -587,7 +554,7 @@ static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch,
|
|||||||
/* Try to get a reference to the channel. Return nonzero on success. If fails,
|
/* Try to get a reference to the channel. Return nonzero on success. If fails,
|
||||||
* the channel is dead or being freed elsewhere and you must not touch it.
|
* the channel is dead or being freed elsewhere and you must not touch it.
|
||||||
*
|
*
|
||||||
* Always when a channel_gk20a pointer is seen and about to be used, a
|
* Always when a nvgpu_channel pointer is seen and about to be used, a
|
||||||
* reference must be held to it - either by you or the caller, which should be
|
* reference must be held to it - either by you or the caller, which should be
|
||||||
* documented well or otherwise clearly seen. This usually boils down to the
|
* documented well or otherwise clearly seen. This usually boils down to the
|
||||||
* file from ioctls directly, or an explicit get in exception handlers when the
|
* file from ioctls directly, or an explicit get in exception handlers when the
|
||||||
@@ -649,7 +616,7 @@ struct nvgpu_channel *nvgpu_channel_from_id__func(struct gk20a *g,
|
|||||||
return nvgpu_channel_get__func(&g->fifo.channel[chid], caller);
|
return nvgpu_channel_get__func(&g->fifo.channel[chid], caller);
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_channel_close(struct nvgpu_channel *ch)
|
void nvgpu_channel_close(struct nvgpu_channel *ch)
|
||||||
{
|
{
|
||||||
gk20a_free_channel(ch, false);
|
gk20a_free_channel(ch, false);
|
||||||
}
|
}
|
||||||
@@ -856,7 +823,7 @@ static void nvgpu_channel_free_priv_cmd_q(struct nvgpu_channel *ch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate a cmd buffer with given size. size is number of u32 entries */
|
/* allocate a cmd buffer with given size. size is number of u32 entries */
|
||||||
int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
|
int nvgpu_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
|
||||||
struct priv_cmd_entry *e)
|
struct priv_cmd_entry *e)
|
||||||
{
|
{
|
||||||
struct priv_cmd_queue *q = &c->priv_cmd_q;
|
struct priv_cmd_queue *q = &c->priv_cmd_q;
|
||||||
@@ -925,26 +892,26 @@ int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
|
|||||||
void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c,
|
void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c,
|
||||||
struct priv_cmd_entry *e)
|
struct priv_cmd_entry *e)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
(void) memset(e, 0, sizeof(struct priv_cmd_entry));
|
(void) memset(e, 0, sizeof(struct priv_cmd_entry));
|
||||||
} else {
|
} else {
|
||||||
nvgpu_kfree(c->g, e);
|
nvgpu_kfree(c->g, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int channel_gk20a_alloc_job(struct nvgpu_channel *c,
|
int nvgpu_gk20a_alloc_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job **job_out)
|
struct nvgpu_channel_job **job_out)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
unsigned int put = c->joblist.pre_alloc.put;
|
unsigned int put = c->joblist.pre_alloc.put;
|
||||||
unsigned int get = c->joblist.pre_alloc.get;
|
unsigned int get = c->joblist.pre_alloc.get;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ensure all subsequent reads happen after reading get.
|
* ensure all subsequent reads happen after reading get.
|
||||||
* see corresponding nvgpu_smp_wmb in
|
* see corresponding nvgpu_smp_wmb in
|
||||||
* gk20a_channel_clean_up_jobs()
|
* nvgpu_channel_clean_up_jobs()
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_rmb();
|
nvgpu_smp_rmb();
|
||||||
|
|
||||||
@@ -966,7 +933,7 @@ int channel_gk20a_alloc_job(struct nvgpu_channel *c,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void channel_gk20a_free_job(struct nvgpu_channel *c,
|
void nvgpu_channel_free_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job)
|
struct nvgpu_channel_job *job)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@@ -974,7 +941,7 @@ void channel_gk20a_free_job(struct nvgpu_channel *c,
|
|||||||
* the job but maintain the pointers to the priv_cmd_entry,
|
* the job but maintain the pointers to the priv_cmd_entry,
|
||||||
* since they're inherently tied to the job node.
|
* since they're inherently tied to the job node.
|
||||||
*/
|
*/
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
struct priv_cmd_entry *wait_cmd = job->wait_cmd;
|
struct priv_cmd_entry *wait_cmd = job->wait_cmd;
|
||||||
struct priv_cmd_entry *incr_cmd = job->incr_cmd;
|
struct priv_cmd_entry *incr_cmd = job->incr_cmd;
|
||||||
(void) memset(job, 0, sizeof(*job));
|
(void) memset(job, 0, sizeof(*job));
|
||||||
@@ -985,18 +952,18 @@ void channel_gk20a_free_job(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void channel_gk20a_joblist_lock(struct nvgpu_channel *c)
|
void nvgpu_channel_joblist_lock(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock);
|
nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_spinlock_acquire(&c->joblist.dynamic.lock);
|
nvgpu_spinlock_acquire(&c->joblist.dynamic.lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void channel_gk20a_joblist_unlock(struct nvgpu_channel *c)
|
void nvgpu_channel_joblist_unlock(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock);
|
nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_spinlock_release(&c->joblist.dynamic.lock);
|
nvgpu_spinlock_release(&c->joblist.dynamic.lock);
|
||||||
@@ -1009,8 +976,8 @@ static struct nvgpu_channel_job *channel_gk20a_joblist_peek(
|
|||||||
u32 get;
|
u32 get;
|
||||||
struct nvgpu_channel_job *job = NULL;
|
struct nvgpu_channel_job *job = NULL;
|
||||||
|
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
if (!channel_gk20a_joblist_is_empty(c)) {
|
if (!nvgpu_channel_joblist_is_empty(c)) {
|
||||||
get = c->joblist.pre_alloc.get;
|
get = c->joblist.pre_alloc.get;
|
||||||
job = &c->joblist.pre_alloc.jobs[get];
|
job = &c->joblist.pre_alloc.jobs[get];
|
||||||
}
|
}
|
||||||
@@ -1027,7 +994,7 @@ static struct nvgpu_channel_job *channel_gk20a_joblist_peek(
|
|||||||
static void channel_gk20a_joblist_add(struct nvgpu_channel *c,
|
static void channel_gk20a_joblist_add(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job)
|
struct nvgpu_channel_job *job)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) %
|
c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) %
|
||||||
(c->joblist.pre_alloc.length);
|
(c->joblist.pre_alloc.length);
|
||||||
} else {
|
} else {
|
||||||
@@ -1038,7 +1005,7 @@ static void channel_gk20a_joblist_add(struct nvgpu_channel *c,
|
|||||||
static void channel_gk20a_joblist_delete(struct nvgpu_channel *c,
|
static void channel_gk20a_joblist_delete(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job)
|
struct nvgpu_channel_job *job)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) %
|
c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) %
|
||||||
(c->joblist.pre_alloc.length);
|
(c->joblist.pre_alloc.length);
|
||||||
} else {
|
} else {
|
||||||
@@ -1046,9 +1013,9 @@ static void channel_gk20a_joblist_delete(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c)
|
bool nvgpu_channel_joblist_is_empty(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
if (channel_gk20a_is_prealloc_enabled(c)) {
|
if (nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
|
|
||||||
unsigned int get = c->joblist.pre_alloc.get;
|
unsigned int get = c->joblist.pre_alloc.get;
|
||||||
unsigned int put = c->joblist.pre_alloc.put;
|
unsigned int put = c->joblist.pre_alloc.put;
|
||||||
@@ -1059,7 +1026,7 @@ bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c)
|
|||||||
return nvgpu_list_empty(&c->joblist.dynamic.jobs);
|
return nvgpu_list_empty(&c->joblist.dynamic.jobs);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool channel_gk20a_is_prealloc_enabled(struct nvgpu_channel *c)
|
bool nvgpu_channel_is_prealloc_enabled(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
bool pre_alloc_enabled = c->joblist.pre_alloc.enabled;
|
bool pre_alloc_enabled = c->joblist.pre_alloc.enabled;
|
||||||
|
|
||||||
@@ -1075,7 +1042,7 @@ static int channel_gk20a_prealloc_resources(struct nvgpu_channel *ch,
|
|||||||
size_t size;
|
size_t size;
|
||||||
struct priv_cmd_entry *entries = NULL;
|
struct priv_cmd_entry *entries = NULL;
|
||||||
|
|
||||||
if ((channel_gk20a_is_prealloc_enabled(ch)) || (num_jobs == 0U)) {
|
if ((nvgpu_channel_is_prealloc_enabled(ch)) || (num_jobs == 0U)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1129,7 +1096,7 @@ static int channel_gk20a_prealloc_resources(struct nvgpu_channel *ch,
|
|||||||
/*
|
/*
|
||||||
* commit the previous writes before setting the flag.
|
* commit the previous writes before setting the flag.
|
||||||
* see corresponding nvgpu_smp_rmb in
|
* see corresponding nvgpu_smp_rmb in
|
||||||
* channel_gk20a_is_prealloc_enabled()
|
* nvgpu_channel_is_prealloc_enabled()
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_wmb();
|
nvgpu_smp_wmb();
|
||||||
ch->joblist.pre_alloc.enabled = true;
|
ch->joblist.pre_alloc.enabled = true;
|
||||||
@@ -1154,7 +1121,7 @@ static void channel_gk20a_free_prealloc_resources(struct nvgpu_channel *c)
|
|||||||
/*
|
/*
|
||||||
* commit the previous writes before disabling the flag.
|
* commit the previous writes before disabling the flag.
|
||||||
* see corresponding nvgpu_smp_rmb in
|
* see corresponding nvgpu_smp_rmb in
|
||||||
* channel_gk20a_is_prealloc_enabled()
|
* nvgpu_channel_is_prealloc_enabled()
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_wmb();
|
nvgpu_smp_wmb();
|
||||||
c->joblist.pre_alloc.enabled = false;
|
c->joblist.pre_alloc.enabled = false;
|
||||||
@@ -1253,7 +1220,7 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* an address space needs to have been bound at this point. */
|
/* an address space needs to have been bound at this point. */
|
||||||
if (!gk20a_channel_as_bound(c)) {
|
if (!nvgpu_channel_as_bound(c)) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"not bound to an address space at time of setup_bind");
|
"not bound to an address space at time of setup_bind");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@@ -1366,7 +1333,7 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c,
|
|||||||
goto clean_up_prealloc;
|
goto clean_up_prealloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = channel_gk20a_update_runlist(c, true);
|
err = nvgpu_channel_update_runlist(c, true);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up_priv_cmd;
|
goto clean_up_priv_cmd;
|
||||||
}
|
}
|
||||||
@@ -1391,7 +1358,7 @@ clean_up_unmap:
|
|||||||
nvgpu_big_free(g, c->gpfifo.pipe);
|
nvgpu_big_free(g, c->gpfifo.pipe);
|
||||||
nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
|
nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
|
||||||
if (c->usermode_submit_enabled) {
|
if (c->usermode_submit_enabled) {
|
||||||
gk20a_channel_free_usermode_buffers(c);
|
nvgpu_channel_free_usermode_buffers(c);
|
||||||
(void) nvgpu_userd_init_channel(g, c);
|
(void) nvgpu_userd_init_channel(g, c);
|
||||||
c->usermode_submit_enabled = false;
|
c->usermode_submit_enabled = false;
|
||||||
}
|
}
|
||||||
@@ -1408,7 +1375,7 @@ clean_up_idle:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_channel_free_usermode_buffers(struct nvgpu_channel *c)
|
void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
if (nvgpu_mem_is_valid(&c->usermode_userd)) {
|
if (nvgpu_mem_is_valid(&c->usermode_userd)) {
|
||||||
nvgpu_dma_free(c->g, &c->usermode_userd);
|
nvgpu_dma_free(c->g, &c->usermode_userd);
|
||||||
@@ -1453,7 +1420,7 @@ static void nvgpu_channel_set_has_timedout_and_wakeup_wqs(struct gk20a *g,
|
|||||||
struct nvgpu_channel *ch)
|
struct nvgpu_channel *ch)
|
||||||
{
|
{
|
||||||
/* mark channel as faulted */
|
/* mark channel as faulted */
|
||||||
gk20a_channel_set_unserviceable(ch);
|
nvgpu_channel_set_unserviceable(ch);
|
||||||
|
|
||||||
/* unblock pending waits */
|
/* unblock pending waits */
|
||||||
if (nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq) != 0) {
|
if (nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq) != 0) {
|
||||||
@@ -1523,7 +1490,7 @@ static void nvgpu_channel_wdt_init(struct nvgpu_channel *ch)
|
|||||||
struct gk20a *g = ch->g;
|
struct gk20a *g = ch->g;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
ch->wdt.running = false;
|
ch->wdt.running = false;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -1650,7 +1617,7 @@ void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g)
|
|||||||
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
|
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
|
||||||
|
|
||||||
if (ch != NULL) {
|
if (ch != NULL) {
|
||||||
if (!gk20a_channel_check_unserviceable(ch)) {
|
if (!nvgpu_channel_check_unserviceable(ch)) {
|
||||||
nvgpu_channel_wdt_rewind(ch);
|
nvgpu_channel_wdt_rewind(ch);
|
||||||
}
|
}
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
@@ -1678,7 +1645,7 @@ static void nvgpu_channel_wdt_handler(struct nvgpu_channel *ch)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
/* channel is already recovered */
|
/* channel is already recovered */
|
||||||
if (nvgpu_channel_wdt_stop(ch) == true) {
|
if (nvgpu_channel_wdt_stop(ch) == true) {
|
||||||
nvgpu_info(g, "chid: %d unserviceable but wdt was ON",
|
nvgpu_info(g, "chid: %d unserviceable but wdt was ON",
|
||||||
@@ -1755,7 +1722,7 @@ static void nvgpu_channel_poll_wdt(struct gk20a *g)
|
|||||||
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
|
struct nvgpu_channel *ch = nvgpu_channel_from_id(g, chid);
|
||||||
|
|
||||||
if (ch != NULL) {
|
if (ch != NULL) {
|
||||||
if (!gk20a_channel_check_unserviceable(ch)) {
|
if (!nvgpu_channel_check_unserviceable(ch)) {
|
||||||
nvgpu_channel_wdt_check(ch);
|
nvgpu_channel_wdt_check(ch);
|
||||||
}
|
}
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
@@ -1823,13 +1790,13 @@ static u32 nvgpu_channel_worker_poll_wakeup_condition_get_timeout(
|
|||||||
static void nvgpu_channel_worker_poll_wakeup_process_item(
|
static void nvgpu_channel_worker_poll_wakeup_process_item(
|
||||||
struct nvgpu_list_node *work_item)
|
struct nvgpu_list_node *work_item)
|
||||||
{
|
{
|
||||||
struct nvgpu_channel *ch = channel_gk20a_from_worker_item(work_item);
|
struct nvgpu_channel *ch = nvgpu_channel_from_worker_item(work_item);
|
||||||
|
|
||||||
nvgpu_assert(ch != NULL);
|
nvgpu_assert(ch != NULL);
|
||||||
|
|
||||||
nvgpu_log_fn(ch->g, " ");
|
nvgpu_log_fn(ch->g, " ");
|
||||||
|
|
||||||
gk20a_channel_clean_up_jobs(ch, true);
|
nvgpu_channel_clean_up_jobs(ch, true);
|
||||||
|
|
||||||
/* ref taken when enqueued */
|
/* ref taken when enqueued */
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
@@ -1927,14 +1894,14 @@ void nvgpu_channel_update_priv_cmd_q_and_free_entry(
|
|||||||
nvgpu_channel_free_priv_cmd_entry(ch, e);
|
nvgpu_channel_free_priv_cmd_entry(ch, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_channel_add_job(struct nvgpu_channel *c,
|
int nvgpu_channel_add_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job,
|
struct nvgpu_channel_job *job,
|
||||||
bool skip_buffer_refcounting)
|
bool skip_buffer_refcounting)
|
||||||
{
|
{
|
||||||
struct vm_gk20a *vm = c->vm;
|
struct vm_gk20a *vm = c->vm;
|
||||||
struct nvgpu_mapped_buf **mapped_buffers = NULL;
|
struct nvgpu_mapped_buf **mapped_buffers = NULL;
|
||||||
int err = 0, num_mapped_buffers = 0;
|
int err = 0, num_mapped_buffers = 0;
|
||||||
bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c);
|
bool pre_alloc_enabled = nvgpu_channel_is_prealloc_enabled(c);
|
||||||
|
|
||||||
if (!skip_buffer_refcounting) {
|
if (!skip_buffer_refcounting) {
|
||||||
err = nvgpu_vm_get_buffers(vm, &mapped_buffers,
|
err = nvgpu_vm_get_buffers(vm, &mapped_buffers,
|
||||||
@@ -1959,19 +1926,19 @@ int gk20a_channel_add_job(struct nvgpu_channel *c,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!pre_alloc_enabled) {
|
if (!pre_alloc_enabled) {
|
||||||
channel_gk20a_joblist_lock(c);
|
nvgpu_channel_joblist_lock(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ensure all pending write complete before adding to the list.
|
* ensure all pending write complete before adding to the list.
|
||||||
* see corresponding nvgpu_smp_rmb in
|
* see corresponding nvgpu_smp_rmb in
|
||||||
* gk20a_channel_clean_up_jobs()
|
* nvgpu_channel_clean_up_jobs()
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_wmb();
|
nvgpu_smp_wmb();
|
||||||
channel_gk20a_joblist_add(c, job);
|
channel_gk20a_joblist_add(c, job);
|
||||||
|
|
||||||
if (!pre_alloc_enabled) {
|
if (!pre_alloc_enabled) {
|
||||||
channel_gk20a_joblist_unlock(c);
|
nvgpu_channel_joblist_unlock(c);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = -ETIMEDOUT;
|
err = -ETIMEDOUT;
|
||||||
@@ -1996,7 +1963,7 @@ err_put_buffers:
|
|||||||
* per-job memory for completed jobs; in case of preallocated resources, this
|
* per-job memory for completed jobs; in case of preallocated resources, this
|
||||||
* opens up slots for new jobs to be submitted.
|
* opens up slots for new jobs to be submitted.
|
||||||
*/
|
*/
|
||||||
void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
|
void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c,
|
||||||
bool clean_all)
|
bool clean_all)
|
||||||
{
|
{
|
||||||
struct vm_gk20a *vm;
|
struct vm_gk20a *vm;
|
||||||
@@ -2036,24 +2003,24 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
|
|||||||
while (true) {
|
while (true) {
|
||||||
bool completed;
|
bool completed;
|
||||||
|
|
||||||
channel_gk20a_joblist_lock(c);
|
nvgpu_channel_joblist_lock(c);
|
||||||
if (channel_gk20a_joblist_is_empty(c)) {
|
if (nvgpu_channel_joblist_is_empty(c)) {
|
||||||
/*
|
/*
|
||||||
* No jobs in flight, timeout will remain stopped until
|
* No jobs in flight, timeout will remain stopped until
|
||||||
* new jobs are submitted.
|
* new jobs are submitted.
|
||||||
*/
|
*/
|
||||||
channel_gk20a_joblist_unlock(c);
|
nvgpu_channel_joblist_unlock(c);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ensure that all subsequent reads occur after checking
|
* ensure that all subsequent reads occur after checking
|
||||||
* that we have a valid node. see corresponding nvgpu_smp_wmb in
|
* that we have a valid node. see corresponding nvgpu_smp_wmb in
|
||||||
* gk20a_channel_add_job().
|
* nvgpu_channel_add_job().
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_rmb();
|
nvgpu_smp_rmb();
|
||||||
job = channel_gk20a_joblist_peek(c);
|
job = channel_gk20a_joblist_peek(c);
|
||||||
channel_gk20a_joblist_unlock(c);
|
nvgpu_channel_joblist_unlock(c);
|
||||||
|
|
||||||
completed = nvgpu_fence_is_expired(job->post_fence);
|
completed = nvgpu_fence_is_expired(job->post_fence);
|
||||||
if (!completed) {
|
if (!completed) {
|
||||||
@@ -2099,12 +2066,12 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove job from channel's job list before we close the
|
* Remove job from channel's job list before we close the
|
||||||
* fences, to prevent other callers (gk20a_channel_abort) from
|
* fences, to prevent other callers (nvgpu_channel_abort) from
|
||||||
* trying to dereference post_fence when it no longer exists.
|
* trying to dereference post_fence when it no longer exists.
|
||||||
*/
|
*/
|
||||||
channel_gk20a_joblist_lock(c);
|
nvgpu_channel_joblist_lock(c);
|
||||||
channel_gk20a_joblist_delete(c, job);
|
channel_gk20a_joblist_delete(c, job);
|
||||||
channel_gk20a_joblist_unlock(c);
|
nvgpu_channel_joblist_unlock(c);
|
||||||
|
|
||||||
/* Close the fence (this will unref the semaphore and release
|
/* Close the fence (this will unref the semaphore and release
|
||||||
* it to the pool). */
|
* it to the pool). */
|
||||||
@@ -2127,11 +2094,11 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* ensure all pending writes complete before freeing up the job.
|
* ensure all pending writes complete before freeing up the job.
|
||||||
* see corresponding nvgpu_smp_rmb in channel_gk20a_alloc_job().
|
* see corresponding nvgpu_smp_rmb in nvgpu_gk20a_alloc_job().
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_wmb();
|
nvgpu_smp_wmb();
|
||||||
|
|
||||||
channel_gk20a_free_job(c, job);
|
nvgpu_channel_free_job(c, job);
|
||||||
job_finished = true;
|
job_finished = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2167,13 +2134,13 @@ void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
|
|||||||
* safe to call even if there is nothing to clean up. Any visible actions on
|
* safe to call even if there is nothing to clean up. Any visible actions on
|
||||||
* jobs just before calling this are guaranteed to be processed.
|
* jobs just before calling this are guaranteed to be processed.
|
||||||
*/
|
*/
|
||||||
void gk20a_channel_update(struct nvgpu_channel *c)
|
void nvgpu_channel_update(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
if (!c->g->power_on) { /* shutdown case */
|
if (!c->g->power_on) { /* shutdown case */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_gk20a_channel_update(c->chid);
|
trace_nvgpu_channel_update(c->chid);
|
||||||
/* A queued channel is always checked for job cleanup. */
|
/* A queued channel is always checked for job cleanup. */
|
||||||
gk20a_channel_worker_enqueue(c);
|
gk20a_channel_worker_enqueue(c);
|
||||||
}
|
}
|
||||||
@@ -2299,7 +2266,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g)
|
|||||||
nvgpu_mutex_destroy(&f->free_chs_mutex);
|
nvgpu_mutex_destroy(&f->free_chs_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_init_channel_support(struct gk20a *g, u32 chid)
|
int nvgpu_channel_init_support(struct gk20a *g, u32 chid)
|
||||||
{
|
{
|
||||||
struct nvgpu_channel *c = g->fifo.channel+chid;
|
struct nvgpu_channel *c = g->fifo.channel+chid;
|
||||||
int err;
|
int err;
|
||||||
@@ -2407,7 +2374,7 @@ int nvgpu_channel_setup_sw(struct gk20a *g)
|
|||||||
nvgpu_init_list_node(&f->free_chs);
|
nvgpu_init_list_node(&f->free_chs);
|
||||||
|
|
||||||
for (chid = 0; chid < f->num_channels; chid++) {
|
for (chid = 0; chid < f->num_channels; chid++) {
|
||||||
err = gk20a_init_channel_support(g, chid);
|
err = nvgpu_channel_init_support(g, chid);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "channel init failed, chid=%u", chid);
|
nvgpu_err(g, "channel init failed, chid=%u", chid);
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
@@ -2446,7 +2413,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
|
|||||||
if (ch == NULL) {
|
if (ch == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
nvgpu_log_info(g, "do not suspend recovered "
|
nvgpu_log_info(g, "do not suspend recovered "
|
||||||
"channel %d", chid);
|
"channel %d", chid);
|
||||||
} else {
|
} else {
|
||||||
@@ -2479,7 +2446,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
|
|||||||
nvgpu_channel_from_id(g, chid);
|
nvgpu_channel_from_id(g, chid);
|
||||||
|
|
||||||
if (ch != NULL) {
|
if (ch != NULL) {
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
nvgpu_log_info(g, "do not unbind "
|
nvgpu_log_info(g, "do not unbind "
|
||||||
"recovered channel %d",
|
"recovered channel %d",
|
||||||
chid);
|
chid);
|
||||||
@@ -2510,7 +2477,7 @@ void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
|
|||||||
if (ch == NULL) {
|
if (ch == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
nvgpu_log_info(g, "do not resume recovered "
|
nvgpu_log_info(g, "do not resume recovered "
|
||||||
"channel %d", chid);
|
"channel %d", chid);
|
||||||
} else {
|
} else {
|
||||||
@@ -2573,7 +2540,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
|
|||||||
* semaphore.
|
* semaphore.
|
||||||
*/
|
*/
|
||||||
if (!c->deterministic) {
|
if (!c->deterministic) {
|
||||||
gk20a_channel_update(c);
|
nvgpu_channel_update(c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nvgpu_channel_put(c);
|
nvgpu_channel_put(c);
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
|
|||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
/* add runnable channels bound to this TSG */
|
/* add runnable channels bound to this TSG */
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
nvgpu_channel, ch_entry) {
|
||||||
if (!test_bit((int)ch->chid,
|
if (!test_bit((int)ch->chid,
|
||||||
runlist->active_channels)) {
|
runlist->active_channels)) {
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ static int nvgpu_submit_prepare_syncs(struct nvgpu_channel *c,
|
|||||||
int wait_fence_fd = -1;
|
int wait_fence_fd = -1;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
bool need_wfi = (flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI) == 0U;
|
bool need_wfi = (flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI) == 0U;
|
||||||
bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c);
|
bool pre_alloc_enabled = nvgpu_channel_is_prealloc_enabled(c);
|
||||||
struct nvgpu_channel_sync_syncpt *sync_syncpt = NULL;
|
struct nvgpu_channel_sync_syncpt *sync_syncpt = NULL;
|
||||||
bool flag_fence_get = (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) != 0U;
|
bool flag_fence_get = (flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) != 0U;
|
||||||
bool flag_sync_fence = (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) != 0U;
|
bool flag_sync_fence = (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) != 0U;
|
||||||
@@ -349,7 +349,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(c)) {
|
if (nvgpu_channel_check_unserviceable(c)) {
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -375,7 +375,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* an address space needs to have been bound at this point. */
|
/* an address space needs to have been bound at this point. */
|
||||||
if (!gk20a_channel_as_bound(c)) {
|
if (!nvgpu_channel_as_bound(c)) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"not bound to an address space at time of gpfifo"
|
"not bound to an address space at time of gpfifo"
|
||||||
" submission.");
|
" submission.");
|
||||||
@@ -418,7 +418,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
* job tracking is required, the channel must have
|
* job tracking is required, the channel must have
|
||||||
* pre-allocated resources. Otherwise, we fail the submit here
|
* pre-allocated resources. Otherwise, we fail the submit here
|
||||||
*/
|
*/
|
||||||
if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) {
|
if (c->deterministic && !nvgpu_channel_is_prealloc_enabled(c)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -460,7 +460,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
/*
|
/*
|
||||||
* Get a power ref unless this is a deterministic
|
* Get a power ref unless this is a deterministic
|
||||||
* channel that holds them during the channel lifetime.
|
* channel that holds them during the channel lifetime.
|
||||||
* This one is released by gk20a_channel_clean_up_jobs,
|
* This one is released by nvgpu_channel_clean_up_jobs,
|
||||||
* via syncpt or sema interrupt, whichever is used.
|
* via syncpt or sema interrupt, whichever is used.
|
||||||
*/
|
*/
|
||||||
err = gk20a_busy(g);
|
err = gk20a_busy(g);
|
||||||
@@ -474,7 +474,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
|
|
||||||
if (!need_deferred_cleanup) {
|
if (!need_deferred_cleanup) {
|
||||||
/* clean up a single job */
|
/* clean up a single job */
|
||||||
gk20a_channel_clean_up_jobs(c, false);
|
nvgpu_channel_clean_up_jobs(c, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -519,13 +519,13 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(c)) {
|
if (nvgpu_channel_check_unserviceable(c)) {
|
||||||
err = -ETIMEDOUT;
|
err = -ETIMEDOUT;
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_job_tracking) {
|
if (need_job_tracking) {
|
||||||
err = channel_gk20a_alloc_job(c, &job);
|
err = nvgpu_gk20a_alloc_job(c, &job);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
}
|
}
|
||||||
@@ -565,7 +565,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (need_job_tracking) {
|
if (need_job_tracking) {
|
||||||
err = gk20a_channel_add_job(c, job, skip_buffer_refcounting);
|
err = nvgpu_channel_add_job(c, job, skip_buffer_refcounting);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up_job;
|
goto clean_up_job;
|
||||||
}
|
}
|
||||||
@@ -595,7 +595,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
clean_up_job:
|
clean_up_job:
|
||||||
channel_gk20a_free_job(c, job);
|
nvgpu_channel_free_job(c, job);
|
||||||
clean_up:
|
clean_up:
|
||||||
nvgpu_log_fn(g, "fail");
|
nvgpu_log_fn(g, "fail");
|
||||||
nvgpu_fence_put(post_fence);
|
nvgpu_fence_put(post_fence);
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ void nvgpu_tsg_disable(struct nvgpu_tsg *tsg)
|
|||||||
struct nvgpu_channel *ch;
|
struct nvgpu_channel *ch;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
g->ops.channel.disable(ch);
|
g->ops.channel.disable(ch);
|
||||||
}
|
}
|
||||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||||
@@ -147,7 +147,7 @@ int nvgpu_tsg_unbind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
|
|||||||
|
|
||||||
nvgpu_tsg_abort(g, tsg, true);
|
nvgpu_tsg_abort(g, tsg, true);
|
||||||
/* If channel unbind fails, channel is still part of runlist */
|
/* If channel unbind fails, channel is still part of runlist */
|
||||||
if (channel_gk20a_update_runlist(ch, false) != 0) {
|
if (nvgpu_channel_update_runlist(ch, false) != 0) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"remove ch %u from runlist failed", ch->chid);
|
"remove ch %u from runlist failed", ch->chid);
|
||||||
}
|
}
|
||||||
@@ -176,7 +176,7 @@ int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
|
|||||||
|
|
||||||
/* If one channel in TSG times out, we disable all channels */
|
/* If one channel in TSG times out, we disable all channels */
|
||||||
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
|
||||||
tsg_timedout = gk20a_channel_check_unserviceable(ch);
|
tsg_timedout = nvgpu_channel_check_unserviceable(ch);
|
||||||
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
|
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
|
||||||
|
|
||||||
/* Disable TSG and examine status before unbinding channel */
|
/* Disable TSG and examine status before unbinding channel */
|
||||||
@@ -197,7 +197,7 @@ int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Channel should be seen as TSG channel while updating runlist */
|
/* Channel should be seen as TSG channel while updating runlist */
|
||||||
err = channel_gk20a_update_runlist(ch, false);
|
err = nvgpu_channel_update_runlist(ch, false);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "update runlist failed ch:%u tsg:%u",
|
nvgpu_err(g, "update runlist failed ch:%u tsg:%u",
|
||||||
ch->chid, tsg->tsgid);
|
ch->chid, tsg->tsgid);
|
||||||
@@ -275,7 +275,7 @@ void nvgpu_tsg_unbind_channel_check_ctx_reload(struct nvgpu_tsg *tsg,
|
|||||||
if (hw_state->ctx_reload) {
|
if (hw_state->ctx_reload) {
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list,
|
nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
nvgpu_channel, ch_entry) {
|
||||||
if (temp_ch->chid != ch->chid) {
|
if (temp_ch->chid != ch->chid) {
|
||||||
g->ops.channel.force_ctx_reload(temp_ch);
|
g->ops.channel.force_ctx_reload(temp_ch);
|
||||||
break;
|
break;
|
||||||
@@ -397,7 +397,7 @@ bool nvgpu_tsg_mark_error(struct gk20a *g,
|
|||||||
bool verbose = false;
|
bool verbose = false;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch) != NULL) {
|
if (nvgpu_channel_get(ch) != NULL) {
|
||||||
if (nvgpu_channel_mark_error(g, ch)) {
|
if (nvgpu_channel_mark_error(g, ch)) {
|
||||||
verbose = true;
|
verbose = true;
|
||||||
@@ -416,7 +416,7 @@ void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct nvgpu_tsg *tsg, u32 ms)
|
|||||||
struct nvgpu_channel *ch = NULL;
|
struct nvgpu_channel *ch = NULL;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch) != NULL) {
|
if (nvgpu_channel_get(ch) != NULL) {
|
||||||
ch->ctxsw_timeout_accumulated_ms = ms;
|
ch->ctxsw_timeout_accumulated_ms = ms;
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
@@ -431,7 +431,7 @@ bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct nvgpu_tsg *tsg)
|
|||||||
bool verbose = false;
|
bool verbose = false;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch) != NULL) {
|
if (nvgpu_channel_get(ch) != NULL) {
|
||||||
if (ch->ctxsw_timeout_debug_dump) {
|
if (ch->ctxsw_timeout_debug_dump) {
|
||||||
verbose = true;
|
verbose = true;
|
||||||
@@ -450,7 +450,7 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct nvgpu_tsg *tsg,
|
|||||||
struct nvgpu_channel *ch = NULL;
|
struct nvgpu_channel *ch = NULL;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch) != NULL) {
|
if (nvgpu_channel_get(ch) != NULL) {
|
||||||
nvgpu_channel_set_error_notifier(g, ch, error_notifier);
|
nvgpu_channel_set_error_notifier(g, ch, error_notifier);
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
@@ -484,7 +484,7 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
|
|||||||
* fifo recovery is needed if at least one channel reached the
|
* fifo recovery is needed if at least one channel reached the
|
||||||
* maximum timeout without progress (update in gpfifo pointers).
|
* maximum timeout without progress (update in gpfifo pointers).
|
||||||
*/
|
*/
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch) != NULL) {
|
if (nvgpu_channel_get(ch) != NULL) {
|
||||||
recover = nvgpu_channel_update_and_check_ctxsw_timeout(ch,
|
recover = nvgpu_channel_update_and_check_ctxsw_timeout(ch,
|
||||||
*ms, &progress);
|
*ms, &progress);
|
||||||
@@ -859,9 +859,9 @@ void nvgpu_tsg_abort(struct gk20a *g, struct nvgpu_tsg *tsg, bool preempt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch) != NULL) {
|
if (nvgpu_channel_get(ch) != NULL) {
|
||||||
gk20a_channel_set_unserviceable(ch);
|
nvgpu_channel_set_unserviceable(ch);
|
||||||
if (g->ops.channel.abort_clean_up != NULL) {
|
if (g->ops.channel.abort_clean_up != NULL) {
|
||||||
g->ops.channel.abort_clean_up(ch);
|
g->ops.channel.abort_clean_up(ch);
|
||||||
}
|
}
|
||||||
@@ -887,7 +887,7 @@ void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct nvgpu_tsg *tsg,
|
|||||||
nvgpu_log(g, gpu_dbg_info, "reset faulted eng and pbdma bits in ccsr");
|
nvgpu_log(g, gpu_dbg_info, "reset faulted eng and pbdma bits in ccsr");
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
g->ops.channel.reset_faulted(g, ch, eng, pbdma);
|
g->ops.channel.reset_faulted(g, ch, eng, pbdma);
|
||||||
}
|
}
|
||||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||||
|
|||||||
@@ -110,13 +110,13 @@ int nvgpu_userd_init_channel(struct gk20a *g, struct nvgpu_channel *c)
|
|||||||
c->userd_mem = mem;
|
c->userd_mem = mem;
|
||||||
c->userd_offset = (c->chid % f->num_channels_per_slab) *
|
c->userd_offset = (c->chid % f->num_channels_per_slab) *
|
||||||
f->userd_entry_size;
|
f->userd_entry_size;
|
||||||
c->userd_iova = gk20a_channel_userd_addr(c);
|
c->userd_iova = nvgpu_channel_userd_addr(c);
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_info,
|
nvgpu_log(g, gpu_dbg_info,
|
||||||
"chid=%u slab=%u mem=%p offset=%u addr=%llx gpu_va=%llx",
|
"chid=%u slab=%u mem=%p offset=%u addr=%llx gpu_va=%llx",
|
||||||
c->chid, slab, mem, c->userd_offset,
|
c->chid, slab, mem, c->userd_offset,
|
||||||
gk20a_channel_userd_addr(c),
|
nvgpu_channel_userd_addr(c),
|
||||||
gk20a_channel_userd_gpu_va(c));
|
nvgpu_channel_userd_gpu_va(c));
|
||||||
|
|
||||||
done:
|
done:
|
||||||
nvgpu_mutex_release(&f->userd_mutex);
|
nvgpu_mutex_release(&f->userd_mutex);
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
|
|||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
/* an address space needs to have been bound at this point.*/
|
/* an address space needs to have been bound at this point.*/
|
||||||
if (!gk20a_channel_as_bound(c) && (c->vm == NULL)) {
|
if (!nvgpu_channel_as_bound(c) && (c->vm == NULL)) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"not bound to address space at time"
|
"not bound to address space at time"
|
||||||
" of grctx allocation");
|
" of grctx allocation");
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ int nvgpu_vm_bind_channel(struct vm_gk20a *vm, struct nvgpu_channel *ch)
|
|||||||
|
|
||||||
nvgpu_vm_get(vm);
|
nvgpu_vm_get(vm);
|
||||||
ch->vm = vm;
|
ch->vm = vm;
|
||||||
err = channel_gk20a_commit_va(ch);
|
err = nvgpu_channel_commit_va(ch);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
ch->vm = NULL;
|
ch->vm = NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -154,7 +154,7 @@ static int channel_sync_semaphore_wait_fd(
|
|||||||
}
|
}
|
||||||
|
|
||||||
wait_cmd_size = c->g->ops.sync.sema.get_wait_cmd_size();
|
wait_cmd_size = c->g->ops.sync.sema.get_wait_cmd_size();
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = nvgpu_channel_alloc_priv_cmdbuf(c,
|
||||||
wait_cmd_size * num_fences, entry);
|
wait_cmd_size * num_fences, entry);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(c->g, "not enough priv cmd buffer space");
|
nvgpu_err(c->g, "not enough priv cmd buffer space");
|
||||||
@@ -195,7 +195,7 @@ static int channel_sync_semaphore_incr_common(
|
|||||||
}
|
}
|
||||||
|
|
||||||
incr_cmd_size = c->g->ops.sync.sema.get_incr_cmd_size();
|
incr_cmd_size = c->g->ops.sync.sema.get_incr_cmd_size();
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
|
err = nvgpu_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(c->g,
|
nvgpu_err(c->g,
|
||||||
"not enough priv cmd buffer space");
|
"not enough priv cmd buffer space");
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ static int channel_sync_syncpt_gen_wait_cmd(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!preallocated) {
|
if (!preallocated) {
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = nvgpu_channel_alloc_priv_cmdbuf(c,
|
||||||
c->g->ops.sync.syncpt.get_wait_cmd_size(),
|
c->g->ops.sync.syncpt.get_wait_cmd_size(),
|
||||||
wait_cmd);
|
wait_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@@ -150,7 +150,7 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size();
|
wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size();
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = nvgpu_channel_alloc_priv_cmdbuf(c,
|
||||||
wait_cmd_size * num_fences, wait_cmd);
|
wait_cmd_size * num_fences, wait_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(c->g, "not enough priv cmd buffer space");
|
nvgpu_err(c->g, "not enough priv cmd buffer space");
|
||||||
@@ -174,7 +174,7 @@ static void channel_sync_syncpt_update(void *priv, int nr_completed)
|
|||||||
{
|
{
|
||||||
struct nvgpu_channel *ch = priv;
|
struct nvgpu_channel *ch = priv;
|
||||||
|
|
||||||
gk20a_channel_update(ch);
|
nvgpu_channel_update(ch);
|
||||||
|
|
||||||
/* note: channel_get() is in channel_sync_syncpt_incr_common() */
|
/* note: channel_get() is in channel_sync_syncpt_incr_common() */
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
@@ -194,7 +194,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
|
|||||||
struct nvgpu_channel *c = sp->c;
|
struct nvgpu_channel *c = sp->c;
|
||||||
struct nvgpu_os_fence os_fence = {0};
|
struct nvgpu_os_fence os_fence = {0};
|
||||||
|
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = nvgpu_channel_alloc_priv_cmdbuf(c,
|
||||||
c->g->ops.sync.syncpt.get_incr_cmd_size(wfi_cmd),
|
c->g->ops.sync.syncpt.get_incr_cmd_size(wfi_cmd),
|
||||||
incr_cmd);
|
incr_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
|
|||||||
@@ -374,11 +374,11 @@ int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
|
|||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
|
|
||||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch_tsg)) {
|
if (nvgpu_channel_get(ch_tsg)) {
|
||||||
nvgpu_channel_set_error_notifier(g, ch_tsg,
|
nvgpu_channel_set_error_notifier(g, ch_tsg,
|
||||||
err_code);
|
err_code);
|
||||||
gk20a_channel_set_unserviceable(ch_tsg);
|
nvgpu_channel_set_unserviceable(ch_tsg);
|
||||||
nvgpu_channel_put(ch_tsg);
|
nvgpu_channel_put(ch_tsg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -394,7 +394,7 @@ int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
|
|||||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||||
WARN_ON(err || msg.ret);
|
WARN_ON(err || msg.ret);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
gk20a_channel_abort(ch, false);
|
nvgpu_channel_abort(ch, false);
|
||||||
}
|
}
|
||||||
return err ? err : msg.ret;
|
return err ? err : msg.ret;
|
||||||
}
|
}
|
||||||
@@ -412,7 +412,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
|
|||||||
NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT);
|
NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT);
|
||||||
|
|
||||||
/* mark channel as faulted */
|
/* mark channel as faulted */
|
||||||
gk20a_channel_set_unserviceable(ch);
|
nvgpu_channel_set_unserviceable(ch);
|
||||||
|
|
||||||
/* unblock pending waits */
|
/* unblock pending waits */
|
||||||
nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
|
nvgpu_cond_broadcast_interruptible(&ch->semaphore_wq);
|
||||||
@@ -430,7 +430,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
|
|||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
|
|
||||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
nvgpu_channel, ch_entry) {
|
||||||
if (nvgpu_channel_get(ch_tsg)) {
|
if (nvgpu_channel_get(ch_tsg)) {
|
||||||
vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
|
vgpu_fifo_set_ctx_mmu_error_ch(g, ch_tsg);
|
||||||
nvgpu_channel_put(ch_tsg);
|
nvgpu_channel_put(ch_tsg);
|
||||||
@@ -468,7 +468,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
|
|||||||
break;
|
break;
|
||||||
case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
|
case TEGRA_VGPU_FIFO_INTR_MMU_FAULT:
|
||||||
vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch);
|
vgpu_fifo_set_ctx_mmu_error_ch_tsg(g, ch);
|
||||||
gk20a_channel_abort(ch, false);
|
nvgpu_channel_abort(ch, false);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
@@ -528,7 +528,7 @@ void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gk20a_channel_set_unserviceable(ch);
|
nvgpu_channel_set_unserviceable(ch);
|
||||||
g->ops.channel.abort_clean_up(ch);
|
g->ops.channel.abort_clean_up(ch);
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -224,7 +224,7 @@ int vgpu_gr_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, u32 flags)
|
|||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
/* an address space needs to have been bound at this point.*/
|
/* an address space needs to have been bound at this point.*/
|
||||||
if (!gk20a_channel_as_bound(c)) {
|
if (!nvgpu_channel_as_bound(c)) {
|
||||||
nvgpu_err(g, "not bound to address space at time"
|
nvgpu_err(g, "not bound to address space at time"
|
||||||
" of grctx allocation");
|
" of grctx allocation");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ void vgpu_tsg_enable(struct nvgpu_tsg *tsg)
|
|||||||
struct nvgpu_channel *ch;
|
struct nvgpu_channel *ch;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
g->ops.channel.enable(ch);
|
g->ops.channel.enable(ch);
|
||||||
}
|
}
|
||||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gk20a_channel_set_unserviceable(ch);
|
nvgpu_channel_set_unserviceable(ch);
|
||||||
g->ops.fifo.ch_abort_clean_up(ch);
|
g->ops.fifo.ch_abort_clean_up(ch);
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
|
|||||||
* and then rest of the channels should be enabled
|
* and then rest of the channels should be enabled
|
||||||
*/
|
*/
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
struct nvgpu_channel_hw_state hw_state;
|
struct nvgpu_channel_hw_state hw_state;
|
||||||
|
|
||||||
g->ops.channel.read_state(g, ch, &hw_state);
|
g->ops.channel.read_state(g, ch, &hw_state);
|
||||||
@@ -50,7 +50,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
struct nvgpu_channel_hw_state hw_state;
|
struct nvgpu_channel_hw_state hw_state;
|
||||||
|
|
||||||
g->ops.channel.read_state(g, ch, &hw_state);
|
g->ops.channel.read_state(g, ch, &hw_state);
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ void gv11b_tsg_enable(struct nvgpu_tsg *tsg)
|
|||||||
struct nvgpu_channel *last_ch = NULL;
|
struct nvgpu_channel *last_ch = NULL;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
g->ops.channel.enable(ch);
|
g->ops.channel.enable(ch);
|
||||||
last_ch = ch;
|
last_ch = ch;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ void gk20a_userd_init_mem(struct gk20a *g, struct nvgpu_channel *c)
|
|||||||
|
|
||||||
u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c)
|
u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c);
|
u64 userd_gpu_va = nvgpu_channel_userd_gpu_va(c);
|
||||||
u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w();
|
u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w();
|
||||||
|
|
||||||
BUG_ON(u64_hi32(addr) != 0U);
|
BUG_ON(u64_hi32(addr) != 0U);
|
||||||
@@ -62,7 +62,7 @@ u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c)
|
|||||||
|
|
||||||
u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c)
|
u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c);
|
u64 userd_gpu_va = nvgpu_channel_userd_gpu_va(c);
|
||||||
u64 lo_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_w();
|
u64 lo_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_w();
|
||||||
u64 hi_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_hi_w();
|
u64 hi_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_hi_w();
|
||||||
u32 lo, hi;
|
u32 lo, hi;
|
||||||
@@ -76,7 +76,7 @@ u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c)
|
|||||||
|
|
||||||
void gk20a_userd_gp_put(struct gk20a *g, struct nvgpu_channel *c)
|
void gk20a_userd_gp_put(struct gk20a *g, struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c);
|
u64 userd_gpu_va = nvgpu_channel_userd_gpu_va(c);
|
||||||
u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w();
|
u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w();
|
||||||
|
|
||||||
BUG_ON(u64_hi32(addr) != 0U);
|
BUG_ON(u64_hi32(addr) != 0U);
|
||||||
|
|||||||
@@ -168,7 +168,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
|
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
||||||
err = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false);
|
err = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "chid: %d set_hwpm_mode failed",
|
nvgpu_err(g, "chid: %d set_hwpm_mode failed",
|
||||||
|
|||||||
@@ -247,7 +247,7 @@ struct nvgpu_channel_wdt {
|
|||||||
* These are zeroed when a channel is closed, so a new one starts fresh.
|
* These are zeroed when a channel is closed, so a new one starts fresh.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
enum channel_gk20a_ref_action_type {
|
enum nvgpu_channel_ref_action_type {
|
||||||
channel_gk20a_ref_action_get,
|
channel_gk20a_ref_action_get,
|
||||||
channel_gk20a_ref_action_put
|
channel_gk20a_ref_action_put
|
||||||
};
|
};
|
||||||
@@ -257,7 +257,7 @@ enum channel_gk20a_ref_action_type {
|
|||||||
#include <linux/stacktrace.h>
|
#include <linux/stacktrace.h>
|
||||||
|
|
||||||
struct nvgpu_channel_ref_action {
|
struct nvgpu_channel_ref_action {
|
||||||
enum channel_gk20a_ref_action_type type;
|
enum nvgpu_channel_ref_action_type type;
|
||||||
s64 timestamp_ms;
|
s64 timestamp_ms;
|
||||||
/*
|
/*
|
||||||
* Many of these traces will be similar. Simpler to just capture
|
* Many of these traces will be similar. Simpler to just capture
|
||||||
@@ -383,39 +383,38 @@ struct nvgpu_channel {
|
|||||||
|
|
||||||
bool is_privileged_channel;
|
bool is_privileged_channel;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nvgpu_channel *
|
static inline struct nvgpu_channel *
|
||||||
channel_gk20a_from_free_chs(struct nvgpu_list_node *node)
|
nvgpu_channel_from_free_chs(struct nvgpu_list_node *node)
|
||||||
{
|
{
|
||||||
return (struct nvgpu_channel *)
|
return (struct nvgpu_channel *)
|
||||||
((uintptr_t)node - offsetof(struct nvgpu_channel, free_chs));
|
((uintptr_t)node - offsetof(struct nvgpu_channel, free_chs));
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nvgpu_channel *
|
static inline struct nvgpu_channel *
|
||||||
channel_gk20a_from_ch_entry(struct nvgpu_list_node *node)
|
nvgpu_channel_from_ch_entry(struct nvgpu_list_node *node)
|
||||||
{
|
{
|
||||||
return (struct nvgpu_channel *)
|
return (struct nvgpu_channel *)
|
||||||
((uintptr_t)node - offsetof(struct nvgpu_channel, ch_entry));
|
((uintptr_t)node - offsetof(struct nvgpu_channel, ch_entry));
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nvgpu_channel *
|
static inline struct nvgpu_channel *
|
||||||
channel_gk20a_from_worker_item(struct nvgpu_list_node *node)
|
nvgpu_channel_from_worker_item(struct nvgpu_list_node *node)
|
||||||
{
|
{
|
||||||
return (struct nvgpu_channel *)
|
return (struct nvgpu_channel *)
|
||||||
((uintptr_t)node - offsetof(struct nvgpu_channel, worker_item));
|
((uintptr_t)node - offsetof(struct nvgpu_channel, worker_item));
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool gk20a_channel_as_bound(struct nvgpu_channel *ch)
|
static inline bool nvgpu_channel_as_bound(struct nvgpu_channel *ch)
|
||||||
{
|
{
|
||||||
return (ch->vm != NULL);
|
return (ch->vm != NULL);
|
||||||
}
|
}
|
||||||
int channel_gk20a_commit_va(struct nvgpu_channel *c);
|
int nvgpu_channel_commit_va(struct nvgpu_channel *c);
|
||||||
int gk20a_init_channel_support(struct gk20a *g, u32 chid);
|
int nvgpu_channel_init_support(struct gk20a *g, u32 chid);
|
||||||
int nvgpu_channel_setup_sw(struct gk20a *g);
|
int nvgpu_channel_setup_sw(struct gk20a *g);
|
||||||
void nvgpu_channel_cleanup_sw(struct gk20a *g);
|
void nvgpu_channel_cleanup_sw(struct gk20a *g);
|
||||||
|
|
||||||
/* must be inside gk20a_busy()..gk20a_idle() */
|
/* must be inside gk20a_busy()..gk20a_idle() */
|
||||||
void gk20a_channel_close(struct nvgpu_channel *ch);
|
void nvgpu_channel_close(struct nvgpu_channel *ch);
|
||||||
void nvgpu_channel_kill(struct nvgpu_channel *ch);
|
void nvgpu_channel_kill(struct nvgpu_channel *ch);
|
||||||
|
|
||||||
void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g,
|
void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g,
|
||||||
@@ -428,10 +427,10 @@ bool nvgpu_channel_update_and_check_ctxsw_timeout(struct nvgpu_channel *ch,
|
|||||||
void nvgpu_channel_recover(struct gk20a *g, struct nvgpu_channel *ch,
|
void nvgpu_channel_recover(struct gk20a *g, struct nvgpu_channel *ch,
|
||||||
bool verbose, u32 rc_type);
|
bool verbose, u32 rc_type);
|
||||||
|
|
||||||
void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt);
|
void nvgpu_channel_abort(struct nvgpu_channel *ch, bool channel_preempt);
|
||||||
void nvgpu_channel_abort_clean_up(struct nvgpu_channel *ch);
|
void nvgpu_channel_abort_clean_up(struct nvgpu_channel *ch);
|
||||||
void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events);
|
void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events);
|
||||||
int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
|
int nvgpu_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
|
||||||
struct priv_cmd_entry *e);
|
struct priv_cmd_entry *e);
|
||||||
void nvgpu_channel_update_priv_cmd_q_and_free_entry(
|
void nvgpu_channel_update_priv_cmd_q_and_free_entry(
|
||||||
struct nvgpu_channel *ch, struct priv_cmd_entry *e);
|
struct nvgpu_channel *ch, struct priv_cmd_entry *e);
|
||||||
@@ -448,8 +447,8 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g);
|
|||||||
int nvgpu_channel_worker_init(struct gk20a *g);
|
int nvgpu_channel_worker_init(struct gk20a *g);
|
||||||
void nvgpu_channel_worker_deinit(struct gk20a *g);
|
void nvgpu_channel_worker_deinit(struct gk20a *g);
|
||||||
|
|
||||||
struct nvgpu_channel *gk20a_get_channel_from_file(int fd);
|
struct nvgpu_channel *nvgpu_channel_get_from_file(int fd);
|
||||||
void gk20a_channel_update(struct nvgpu_channel *c);
|
void nvgpu_channel_update(struct nvgpu_channel *c);
|
||||||
|
|
||||||
/* returns ch if reference was obtained */
|
/* returns ch if reference was obtained */
|
||||||
struct nvgpu_channel *__must_check nvgpu_channel_get__func(
|
struct nvgpu_channel *__must_check nvgpu_channel_get__func(
|
||||||
@@ -466,8 +465,6 @@ struct nvgpu_channel *__must_check nvgpu_channel_from_id__func(
|
|||||||
#define nvgpu_channel_from_id(g, chid) \
|
#define nvgpu_channel_from_id(g, chid) \
|
||||||
nvgpu_channel_from_id__func(g, chid, __func__)
|
nvgpu_channel_from_id__func(g, chid, __func__)
|
||||||
|
|
||||||
int gk20a_wait_channel_idle(struct nvgpu_channel *ch);
|
|
||||||
|
|
||||||
/* runlist_id -1 is synonym for NVGPU_ENGINE_GR runlist id */
|
/* runlist_id -1 is synonym for NVGPU_ENGINE_GR runlist id */
|
||||||
struct nvgpu_channel *gk20a_open_new_channel(struct gk20a *g,
|
struct nvgpu_channel *gk20a_open_new_channel(struct gk20a *g,
|
||||||
u32 runlist_id,
|
u32 runlist_id,
|
||||||
@@ -479,32 +476,32 @@ int nvgpu_channel_setup_bind(struct nvgpu_channel *c,
|
|||||||
|
|
||||||
void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g);
|
void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g);
|
||||||
|
|
||||||
bool channel_gk20a_is_prealloc_enabled(struct nvgpu_channel *c);
|
bool nvgpu_channel_is_prealloc_enabled(struct nvgpu_channel *c);
|
||||||
void channel_gk20a_joblist_lock(struct nvgpu_channel *c);
|
void nvgpu_channel_joblist_lock(struct nvgpu_channel *c);
|
||||||
void channel_gk20a_joblist_unlock(struct nvgpu_channel *c);
|
void nvgpu_channel_joblist_unlock(struct nvgpu_channel *c);
|
||||||
bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c);
|
bool nvgpu_channel_joblist_is_empty(struct nvgpu_channel *c);
|
||||||
|
|
||||||
int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add);
|
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add);
|
||||||
|
|
||||||
void gk20a_wait_until_counter_is_N(
|
void gk20a_wait_until_counter_is_N(
|
||||||
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
||||||
struct nvgpu_cond *c, const char *caller, const char *counter_name);
|
struct nvgpu_cond *c, const char *caller, const char *counter_name);
|
||||||
int channel_gk20a_alloc_job(struct nvgpu_channel *c,
|
int nvgpu_gk20a_alloc_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job **job_out);
|
struct nvgpu_channel_job **job_out);
|
||||||
void channel_gk20a_free_job(struct nvgpu_channel *c,
|
void nvgpu_channel_free_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job);
|
struct nvgpu_channel_job *job);
|
||||||
u32 nvgpu_channel_update_gpfifo_get_and_get_free_count(
|
u32 nvgpu_channel_update_gpfifo_get_and_get_free_count(
|
||||||
struct nvgpu_channel *ch);
|
struct nvgpu_channel *ch);
|
||||||
u32 nvgpu_channel_get_gpfifo_free_count(struct nvgpu_channel *ch);
|
u32 nvgpu_channel_get_gpfifo_free_count(struct nvgpu_channel *ch);
|
||||||
int gk20a_channel_add_job(struct nvgpu_channel *c,
|
int nvgpu_channel_add_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job,
|
struct nvgpu_channel_job *job,
|
||||||
bool skip_buffer_refcounting);
|
bool skip_buffer_refcounting);
|
||||||
void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c,
|
void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c,
|
||||||
struct priv_cmd_entry *e);
|
struct priv_cmd_entry *e);
|
||||||
void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
|
void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c,
|
||||||
bool clean_all);
|
bool clean_all);
|
||||||
|
|
||||||
void gk20a_channel_free_usermode_buffers(struct nvgpu_channel *c);
|
void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c);
|
||||||
u32 nvgpu_get_gpfifo_entry_size(void);
|
u32 nvgpu_get_gpfifo_entry_size(void);
|
||||||
|
|
||||||
int nvgpu_submit_channel_gpfifo_user(struct nvgpu_channel *c,
|
int nvgpu_submit_channel_gpfifo_user(struct nvgpu_channel *c,
|
||||||
@@ -530,15 +527,15 @@ static inline void trace_write_pushbuffers(struct nvgpu_channel *c, u32 count)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void gk20a_channel_set_unserviceable(struct nvgpu_channel *ch);
|
void nvgpu_channel_set_unserviceable(struct nvgpu_channel *ch);
|
||||||
bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch);
|
bool nvgpu_channel_check_unserviceable(struct nvgpu_channel *ch);
|
||||||
|
|
||||||
static inline u64 gk20a_channel_userd_addr(struct nvgpu_channel *c)
|
static inline u64 nvgpu_channel_userd_addr(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
return nvgpu_mem_get_addr(c->g, c->userd_mem) + c->userd_offset;
|
return nvgpu_mem_get_addr(c->g, c->userd_mem) + c->userd_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 gk20a_channel_userd_gpu_va(struct nvgpu_channel *c)
|
static inline u64 nvgpu_channel_userd_gpu_va(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
struct nvgpu_mem *mem = c->userd_mem;
|
struct nvgpu_mem *mem = c->userd_mem;
|
||||||
return (mem->gpu_va != 0ULL) ? mem->gpu_va + c->userd_offset : 0ULL;
|
return (mem->gpu_va != 0ULL) ? mem->gpu_va + c->userd_offset : 0ULL;
|
||||||
|
|||||||
@@ -111,9 +111,9 @@ __must_hold(&cde_app->mutex)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* free the channel
|
* free the channel
|
||||||
* gk20a_channel_close() will also unbind the channel from TSG
|
* nvgpu_channel_close() will also unbind the channel from TSG
|
||||||
*/
|
*/
|
||||||
gk20a_channel_close(ch);
|
nvgpu_channel_close(ch);
|
||||||
nvgpu_ref_put(&cde_ctx->tsg->refcount, nvgpu_tsg_release);
|
nvgpu_ref_put(&cde_ctx->tsg->refcount, nvgpu_tsg_release);
|
||||||
|
|
||||||
/* housekeeping on app */
|
/* housekeeping on app */
|
||||||
@@ -1258,9 +1258,9 @@ __releases(&cde_app->mutex)
|
|||||||
struct gk20a_cde_app *cde_app = &l->cde_app;
|
struct gk20a_cde_app *cde_app = &l->cde_app;
|
||||||
bool channel_idle;
|
bool channel_idle;
|
||||||
|
|
||||||
channel_gk20a_joblist_lock(ch);
|
nvgpu_channel_joblist_lock(ch);
|
||||||
channel_idle = channel_gk20a_joblist_is_empty(ch);
|
channel_idle = nvgpu_channel_joblist_is_empty(ch);
|
||||||
channel_gk20a_joblist_unlock(ch);
|
nvgpu_channel_joblist_unlock(ch);
|
||||||
|
|
||||||
if (!channel_idle)
|
if (!channel_idle)
|
||||||
return;
|
return;
|
||||||
@@ -1271,7 +1271,7 @@ __releases(&cde_app->mutex)
|
|||||||
nvgpu_log_info(g, "double finish cde context %p on channel %p",
|
nvgpu_log_info(g, "double finish cde context %p on channel %p",
|
||||||
cde_ctx, ch);
|
cde_ctx, ch);
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
if (cde_ctx->is_temporary) {
|
if (cde_ctx->is_temporary) {
|
||||||
nvgpu_warn(g,
|
nvgpu_warn(g,
|
||||||
"cde: channel had timed out"
|
"cde: channel had timed out"
|
||||||
@@ -1298,7 +1298,7 @@ __releases(&cde_app->mutex)
|
|||||||
msecs_to_jiffies(CTX_DELETE_TIME));
|
msecs_to_jiffies(CTX_DELETE_TIME));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gk20a_channel_check_unserviceable(ch)) {
|
if (!nvgpu_channel_check_unserviceable(ch)) {
|
||||||
gk20a_cde_ctx_release(cde_ctx);
|
gk20a_cde_ctx_release(cde_ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,16 +55,16 @@ static int gk20a_as_ioctl_bind_channel(
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(args->channel_fd);
|
ch = nvgpu_channel_get_from_file(args->channel_fd);
|
||||||
if (!ch)
|
if (!ch)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (gk20a_channel_as_bound(ch)) {
|
if (nvgpu_channel_as_bound(ch)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this will set channel_gk20a->vm */
|
/* this will set nvgpu_channel->vm */
|
||||||
err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch);
|
err = ch->g->ops.mm.vm_bind_channel(as_share->vm, ch);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|||||||
@@ -380,7 +380,7 @@ static int gk20a_init_error_notifier(struct nvgpu_channel *ch,
|
|||||||
*
|
*
|
||||||
* NULL is returned if the channel was not found.
|
* NULL is returned if the channel was not found.
|
||||||
*/
|
*/
|
||||||
struct nvgpu_channel *gk20a_get_channel_from_file(int fd)
|
struct nvgpu_channel *nvgpu_channel_get_from_file(int fd)
|
||||||
{
|
{
|
||||||
struct nvgpu_channel *ch;
|
struct nvgpu_channel *ch;
|
||||||
struct channel_priv *priv;
|
struct channel_priv *priv;
|
||||||
@@ -425,7 +425,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
|
|||||||
|
|
||||||
trace_gk20a_channel_release(dev_name(dev_from_gk20a(g)));
|
trace_gk20a_channel_release(dev_name(dev_from_gk20a(g)));
|
||||||
|
|
||||||
gk20a_channel_close(ch);
|
nvgpu_channel_close(ch);
|
||||||
gk20a_channel_free_error_notifiers(ch);
|
gk20a_channel_free_error_notifiers(ch);
|
||||||
|
|
||||||
gk20a_idle(g);
|
gk20a_idle(g);
|
||||||
@@ -636,7 +636,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* do not wait if channel has timed out */
|
/* do not wait if channel has timed out */
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -658,7 +658,7 @@ static int gk20a_channel_wait_semaphore(struct nvgpu_channel *ch,
|
|||||||
ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
|
ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
|
||||||
&ch->semaphore_wq,
|
&ch->semaphore_wq,
|
||||||
*semaphore == payload ||
|
*semaphore == payload ||
|
||||||
gk20a_channel_check_unserviceable(ch),
|
nvgpu_channel_check_unserviceable(ch),
|
||||||
timeout);
|
timeout);
|
||||||
|
|
||||||
dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
|
dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
|
||||||
@@ -682,7 +682,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -720,7 +720,7 @@ static int gk20a_channel_wait(struct nvgpu_channel *ch,
|
|||||||
remain = NVGPU_COND_WAIT_INTERRUPTIBLE(
|
remain = NVGPU_COND_WAIT_INTERRUPTIBLE(
|
||||||
&ch->notifier_wq,
|
&ch->notifier_wq,
|
||||||
notif->status == 0 ||
|
notif->status == 0 ||
|
||||||
gk20a_channel_check_unserviceable(ch),
|
nvgpu_channel_check_unserviceable(ch),
|
||||||
args->timeout);
|
args->timeout);
|
||||||
|
|
||||||
if (remain == 0 && notif->status != 0) {
|
if (remain == 0 && notif->status != 0) {
|
||||||
@@ -789,7 +789,7 @@ static int gk20a_ioctl_channel_submit_gpfifo(
|
|||||||
profile = nvgpu_profile_acquire(ch->g);
|
profile = nvgpu_profile_acquire(ch->g);
|
||||||
nvgpu_profile_snapshot(profile, PROFILE_IOCTL_ENTRY);
|
nvgpu_profile_snapshot(profile, PROFILE_IOCTL_ENTRY);
|
||||||
|
|
||||||
if (gk20a_channel_check_unserviceable(ch)) {
|
if (nvgpu_channel_check_unserviceable(ch)) {
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1278,7 +1278,7 @@ long gk20a_channel_ioctl(struct file *filp,
|
|||||||
}
|
}
|
||||||
case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
|
case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
|
||||||
((struct nvgpu_get_param_args *)buf)->value =
|
((struct nvgpu_get_param_args *)buf)->value =
|
||||||
gk20a_channel_check_unserviceable(ch);
|
nvgpu_channel_check_unserviceable(ch);
|
||||||
break;
|
break;
|
||||||
case NVGPU_IOCTL_CHANNEL_ENABLE:
|
case NVGPU_IOCTL_CHANNEL_ENABLE:
|
||||||
err = gk20a_busy(ch->g);
|
err = gk20a_busy(ch->g);
|
||||||
|
|||||||
@@ -670,7 +670,7 @@ static int nvgpu_gpu_ioctl_set_debug_mode(
|
|||||||
struct nvgpu_channel *ch;
|
struct nvgpu_channel *ch;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(args->channel_fd);
|
ch = nvgpu_channel_get_from_file(args->channel_fd);
|
||||||
if (!ch)
|
if (!ch)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@@ -1633,7 +1633,7 @@ static int nvgpu_gpu_set_deterministic_opts(struct gk20a *g,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(ch_fd);
|
ch = nvgpu_channel_get_from_file(ch_fd);
|
||||||
if (!ch) {
|
if (!ch) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -518,7 +518,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
|
|||||||
g->name, args->channel_fd);
|
g->name, args->channel_fd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Although gk20a_get_channel_from_file gives us a channel ref, need to
|
* Although nvgpu_channel_get_from_file gives us a channel ref, need to
|
||||||
* hold a ref to the file during the session lifetime. See comment in
|
* hold a ref to the file during the session lifetime. See comment in
|
||||||
* struct dbg_session_channel_data.
|
* struct dbg_session_channel_data.
|
||||||
*/
|
*/
|
||||||
@@ -526,7 +526,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
|
|||||||
if (!f)
|
if (!f)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(args->channel_fd);
|
ch = nvgpu_channel_get_from_file(args->channel_fd);
|
||||||
if (!ch) {
|
if (!ch) {
|
||||||
nvgpu_log_fn(g, "no channel found for fd");
|
nvgpu_log_fn(g, "no channel found for fd");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@@ -1835,7 +1835,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
|
|||||||
nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
|
nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
|
||||||
g->name, args->channel_fd);
|
g->name, args->channel_fd);
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(args->channel_fd);
|
ch = nvgpu_channel_get_from_file(args->channel_fd);
|
||||||
if (!ch) {
|
if (!ch) {
|
||||||
nvgpu_log_fn(g, "no channel found for fd");
|
nvgpu_log_fn(g, "no channel found for fd");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ static int nvgpu_tsg_bind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd)
|
|||||||
struct nvgpu_channel *ch;
|
struct nvgpu_channel *ch;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(ch_fd);
|
ch = nvgpu_channel_get_from_file(ch_fd);
|
||||||
if (!ch)
|
if (!ch)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ static int gk20a_tsg_ioctl_bind_channel_ex(struct gk20a *g,
|
|||||||
goto mutex_release;
|
goto mutex_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(arg->channel_fd);
|
ch = nvgpu_channel_get_from_file(arg->channel_fd);
|
||||||
if (!ch) {
|
if (!ch) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto idle;
|
goto idle;
|
||||||
@@ -130,7 +130,7 @@ static int nvgpu_tsg_unbind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd)
|
|||||||
struct nvgpu_channel *ch;
|
struct nvgpu_channel *ch;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
ch = gk20a_get_channel_from_file(ch_fd);
|
ch = nvgpu_channel_get_from_file(ch_fd);
|
||||||
if (!ch) {
|
if (!ch) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -146,7 +146,7 @@ static int nvgpu_tsg_unbind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd)
|
|||||||
* Mark the channel unserviceable since channel unbound from TSG
|
* Mark the channel unserviceable since channel unbound from TSG
|
||||||
* has no context of its own so it can't serve any job
|
* has no context of its own so it can't serve any job
|
||||||
*/
|
*/
|
||||||
gk20a_channel_set_unserviceable(ch);
|
nvgpu_channel_set_unserviceable(ch);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
nvgpu_channel_put(ch);
|
nvgpu_channel_put(ch);
|
||||||
|
|||||||
@@ -440,7 +440,7 @@ put_dmabuf:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c)
|
void nvgpu_os_channel_free_usermode_buffers(struct nvgpu_channel *c)
|
||||||
{
|
{
|
||||||
struct nvgpu_channel_linux *priv = c->os_priv;
|
struct nvgpu_channel_linux *priv = c->os_priv;
|
||||||
struct gk20a *g = c->g;
|
struct gk20a *g = c->g;
|
||||||
@@ -564,7 +564,7 @@ int nvgpu_channel_init_support_linux(struct nvgpu_os_linux *l)
|
|||||||
nvgpu_channel_alloc_usermode_buffers;
|
nvgpu_channel_alloc_usermode_buffers;
|
||||||
|
|
||||||
g->os_channel.free_usermode_buffers =
|
g->os_channel.free_usermode_buffers =
|
||||||
nvgpu_channel_free_usermode_buffers;
|
nvgpu_os_channel_free_usermode_buffers;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ DECLARE_EVENT_CLASS(gk20a_channel,
|
|||||||
TP_fast_assign(__entry->channel = channel;),
|
TP_fast_assign(__entry->channel = channel;),
|
||||||
TP_printk("ch id %d", __entry->channel)
|
TP_printk("ch id %d", __entry->channel)
|
||||||
);
|
);
|
||||||
DEFINE_EVENT(gk20a_channel, gk20a_channel_update,
|
DEFINE_EVENT(gk20a_channel, nvgpu_channel_update,
|
||||||
TP_PROTO(int channel),
|
TP_PROTO(int channel),
|
||||||
TP_ARGS(channel)
|
TP_ARGS(channel)
|
||||||
);
|
);
|
||||||
@@ -633,7 +633,7 @@ DEFINE_EVENT(gk20a_cde, gk20a_cde_finished_ctx_cb,
|
|||||||
#define trace_nvgpu_channel_get(arg...) ((void)(NULL))
|
#define trace_nvgpu_channel_get(arg...) ((void)(NULL))
|
||||||
#define trace_nvgpu_channel_put(arg...) ((void)(NULL))
|
#define trace_nvgpu_channel_put(arg...) ((void)(NULL))
|
||||||
#define trace_gk20a_open_new_channel(arg...) ((void)(NULL))
|
#define trace_gk20a_open_new_channel(arg...) ((void)(NULL))
|
||||||
#define trace_gk20a_channel_update(arg...) ((void)(NULL))
|
#define trace_nvgpu_channel_update(arg...) ((void)(NULL))
|
||||||
#define trace_gk20a_channel_reset(arg...) ((void)(NULL))
|
#define trace_gk20a_channel_reset(arg...) ((void)(NULL))
|
||||||
|
|
||||||
#define trace_gk20a_mm_fb_flush(arg...) ((void)(NULL))
|
#define trace_gk20a_mm_fb_flush(arg...) ((void)(NULL))
|
||||||
|
|||||||
Reference in New Issue
Block a user