gpu: nvgpu: rename hw_chid to chid

hw_chid is a relative id for vgpu. For native it's same as hw id.
Renaming it to chid to avoid confusing.

Jira VFND-3796

Change-Id: I1c7924da1757330ace715a7c52ac61ec9dc7065c
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master/r/1509530
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Richard Zhao
2017-06-27 11:20:58 -07:00
committed by mobile promotions
parent d32bd6605d
commit 7d584bf868
24 changed files with 188 additions and 188 deletions

View File

@@ -77,7 +77,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
ret = 0; ret = 0;
} }
if (!test_bit(ch->hw_chid, runlist->active_channels)) if (!test_bit(ch->chid, runlist->active_channels))
return ret; return ret;
if (gk20a_channel_get(ch)) { if (gk20a_channel_get(ch)) {
@@ -85,7 +85,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
tsg = &f->tsg[ch->tsgid]; tsg = &f->tsg[ch->tsgid];
seq_printf(s, "%-8d %-8d %-8d %-9d %-8d %-10d %-8d %-8d\n", seq_printf(s, "%-8d %-8d %-8d %-9d %-8d %-10d %-8d %-8d\n",
ch->hw_chid, ch->chid,
ch->tsgid, ch->tsgid,
ch->tgid, ch->tgid,
tsg ? tsg->timeslice_us : ch->timeslice_us, tsg ? tsg->timeslice_us : ch->timeslice_us,

View File

@@ -42,7 +42,7 @@ static void gk20a_channel_trace_sched_param(
const char *compute_preempt_mode), const char *compute_preempt_mode),
struct channel_gk20a *ch) struct channel_gk20a *ch)
{ {
(trace)(ch->hw_chid, ch->tsgid, ch->pid, (trace)(ch->chid, ch->tsgid, ch->pid,
gk20a_is_channel_marked_as_tsg(ch) ? gk20a_is_channel_marked_as_tsg(ch) ?
tsg_gk20a_from_ch(ch)->timeslice_us : ch->timeslice_us, tsg_gk20a_from_ch(ch)->timeslice_us : ch->timeslice_us,
ch->timeout_ms_max, ch->timeout_ms_max,
@@ -525,7 +525,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
notif->timestamp.nanoseconds[0] = tv.tv_nsec; notif->timestamp.nanoseconds[0] = tv.tv_nsec;
notif->timestamp.nanoseconds[1] = tv.tv_sec; notif->timestamp.nanoseconds[1] = tv.tv_sec;
notif->info32 = 0xDEADBEEF; /* should be object name */ notif->info32 = 0xDEADBEEF; /* should be object name */
notif->info16 = ch->hw_chid; /* should be method offset */ notif->info16 = ch->chid; /* should be method offset */
notif_clean_up: notif_clean_up:
dma_buf_vunmap(dmabuf, notif); dma_buf_vunmap(dmabuf, notif);
@@ -578,7 +578,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
if (event_id_data->event_posted) { if (event_id_data->event_posted) {
gk20a_dbg_info( gk20a_dbg_info(
"found pending event_id=%d on chid=%d\n", "found pending event_id=%d on chid=%d\n",
event_id, ch->hw_chid); event_id, ch->chid);
mask = (POLLPRI | POLLIN); mask = (POLLPRI | POLLIN);
event_id_data->event_posted = false; event_id_data->event_posted = false;
} }
@@ -662,7 +662,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
gk20a_dbg_info( gk20a_dbg_info(
"posting event for event_id=%d on ch=%d\n", "posting event for event_id=%d on ch=%d\n",
event_id, ch->hw_chid); event_id, ch->chid);
event_id_data->event_posted = true; event_id_data->event_posted = true;
wake_up_interruptible_all(&event_id_data->event_id_wq); wake_up_interruptible_all(&event_id_data->event_id_wq);
@@ -713,7 +713,7 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
goto clean_up_file; goto clean_up_file;
} }
event_id_data->g = g; event_id_data->g = g;
event_id_data->id = ch->hw_chid; event_id_data->id = ch->chid;
event_id_data->is_tsg = false; event_id_data->is_tsg = false;
event_id_data->event_id = event_id; event_id_data->event_id = event_id;
@@ -1006,7 +1006,7 @@ long gk20a_channel_ioctl(struct file *filp,
u32 timeout = u32 timeout =
(u32)((struct nvgpu_set_timeout_args *)buf)->timeout; (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
timeout, ch->hw_chid); timeout, ch->chid);
ch->timeout_ms_max = timeout; ch->timeout_ms_max = timeout;
gk20a_channel_trace_sched_param( gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_timeout, ch); trace_gk20a_channel_set_timeout, ch);
@@ -1020,7 +1020,7 @@ long gk20a_channel_ioctl(struct file *filp,
((struct nvgpu_set_timeout_ex_args *)buf)->flags & ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
(1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP)); (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
timeout, ch->hw_chid); timeout, ch->chid);
ch->timeout_ms_max = timeout; ch->timeout_ms_max = timeout;
ch->timeout_debug_dump = timeout_debug_dump; ch->timeout_debug_dump = timeout_debug_dump;
gk20a_channel_trace_sched_param( gk20a_channel_trace_sched_param(

View File

@@ -433,7 +433,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
*/ */
nvgpu_semaphore_pool_get(s->hw_sema->p); nvgpu_semaphore_pool_get(s->hw_sema->p);
gpu_sema_dbg(ch->g, "Allocated semaphore (c=%d)", ch->hw_chid); gpu_sema_dbg(ch->g, "Allocated semaphore (c=%d)", ch->chid);
return s; return s;
} }

View File

@@ -118,7 +118,7 @@ static void free_channel(struct fifo_gk20a *f,
{ {
struct gk20a *g = f->g; struct gk20a *g = f->g;
trace_gk20a_release_used_channel(ch->hw_chid); trace_gk20a_release_used_channel(ch->chid);
/* refcount is zero here and channel is in a freed/dead state */ /* refcount is zero here and channel is in a freed/dead state */
nvgpu_mutex_acquire(&f->free_chs_mutex); nvgpu_mutex_acquire(&f->free_chs_mutex);
/* add to head to increase visibility of timing-related bugs */ /* add to head to increase visibility of timing-related bugs */
@@ -189,7 +189,7 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
{ {
return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->hw_chid, add, true); return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->chid, add, true);
} }
int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch) int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
@@ -295,7 +295,7 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
ch->g->ops.fifo.disable_channel(ch); ch->g->ops.fifo.disable_channel(ch);
if (channel_preempt && ch->ch_ctx.gr_ctx) if (channel_preempt && ch->ch_ctx.gr_ctx)
ch->g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); ch->g->ops.fifo.preempt_channel(ch->g, ch->chid);
gk20a_channel_abort_clean_up(ch); gk20a_channel_abort_clean_up(ch);
} }
@@ -320,7 +320,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
if (!channel_idle) { if (!channel_idle) {
nvgpu_err(ch->g, "jobs not freed for channel %d", nvgpu_err(ch->g, "jobs not freed for channel %d",
ch->hw_chid); ch->chid);
return -EBUSY; return -EBUSY;
} }
@@ -348,7 +348,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW: case NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW:
case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: case NVGPU_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH: case NVGPU_RUNLIST_INTERLEAVE_LEVEL_HIGH:
ret = g->ops.fifo.set_runlist_interleave(g, ch->hw_chid, ret = g->ops.fifo.set_runlist_interleave(g, ch->chid,
false, 0, level); false, 0, level);
break; break;
default: default:
@@ -356,7 +356,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
break; break;
} }
gk20a_dbg(gpu_dbg_sched, "chid=%u interleave=%u", ch->hw_chid, level); gk20a_dbg(gpu_dbg_sched, "chid=%u interleave=%u", ch->chid, level);
return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true); return ret ? ret : g->ops.fifo.update_runlist(g, ch->runlist_id, ~0, true, true);
} }
@@ -381,7 +381,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
ch->error_notifier->status = 0xffff; ch->error_notifier->status = 0xffff;
nvgpu_err(ch->g, nvgpu_err(ch->g,
"error notifier set to %d for ch %d", error, ch->hw_chid); "error notifier set to %d for ch %d", error, ch->chid);
} }
} }
@@ -405,7 +405,7 @@ static void gk20a_wait_until_counter_is_N(
nvgpu_warn(ch->g, nvgpu_warn(ch->g,
"%s: channel %d, still waiting, %s left: %d, waiting for: %d", "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
caller, ch->hw_chid, counter_name, caller, ch->chid, counter_name,
atomic_read(counter), wait_value); atomic_read(counter), wait_value);
gk20a_channel_dump_ref_actions(ch); gk20a_channel_dump_ref_actions(ch);
@@ -462,7 +462,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
WARN_ON(ch->g == NULL); WARN_ON(ch->g == NULL);
trace_gk20a_free_channel(ch->hw_chid); trace_gk20a_free_channel(ch->chid);
/* abort channel and remove from runlist */ /* abort channel and remove from runlist */
gk20a_disable_channel(ch); gk20a_disable_channel(ch);
@@ -483,7 +483,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
nvgpu_spinlock_release(&ch->ref_obtain_lock); nvgpu_spinlock_release(&ch->ref_obtain_lock);
nvgpu_err(ch->g, nvgpu_err(ch->g,
"Extra %s() called to channel %u", "Extra %s() called to channel %u",
__func__, ch->hw_chid); __func__, ch->chid);
return; return;
} }
ch->referenceable = false; ch->referenceable = false;
@@ -597,7 +597,7 @@ unbind:
nvgpu_mutex_acquire(&dbg_s->ch_list_lock); nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
list_for_each_entry_safe(ch_data, tmp, list_for_each_entry_safe(ch_data, tmp,
&dbg_s->ch_list, ch_entry) { &dbg_s->ch_list, ch_entry) {
if (ch_data->chid == ch->hw_chid) if (ch_data->chid == ch->chid)
dbg_unbind_single_channel_gk20a(dbg_s, ch_data); dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
} }
nvgpu_mutex_release(&dbg_s->ch_list_lock); nvgpu_mutex_release(&dbg_s->ch_list_lock);
@@ -634,7 +634,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
nvgpu_spinlock_acquire(&ch->ref_actions_lock); nvgpu_spinlock_acquire(&ch->ref_actions_lock);
dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n",
ch->hw_chid, atomic_read(&ch->ref_count)); ch->chid, atomic_read(&ch->ref_count));
/* start at the oldest possible entry. put is next insertion point */ /* start at the oldest possible entry. put is next insertion point */
get = ch->ref_actions_put; get = ch->ref_actions_put;
@@ -695,7 +695,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch,
* reference must be held to it - either by you or the caller, which should be * reference must be held to it - either by you or the caller, which should be
* documented well or otherwise clearly seen. This usually boils down to the * documented well or otherwise clearly seen. This usually boils down to the
* file from ioctls directly, or an explicit get in exception handlers when the * file from ioctls directly, or an explicit get in exception handlers when the
* channel is found by a hw_chid. * channel is found by a chid.
* *
* Most global functions in this file require a reference to be held by the * Most global functions in this file require a reference to be held by the
* caller. * caller.
@@ -716,7 +716,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
nvgpu_spinlock_release(&ch->ref_obtain_lock); nvgpu_spinlock_release(&ch->ref_obtain_lock);
if (ret) if (ret)
trace_gk20a_channel_get(ch->hw_chid, caller); trace_gk20a_channel_get(ch->chid, caller);
return ret; return ret;
} }
@@ -724,7 +724,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller) void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
{ {
gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
trace_gk20a_channel_put(ch->hw_chid, caller); trace_gk20a_channel_put(ch->chid, caller);
atomic_dec(&ch->ref_count); atomic_dec(&ch->ref_count);
nvgpu_cond_broadcast(&ch->ref_count_dec_wq); nvgpu_cond_broadcast(&ch->ref_count_dec_wq);
@@ -809,7 +809,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
return NULL; return NULL;
} }
trace_gk20a_open_new_channel(ch->hw_chid); trace_gk20a_open_new_channel(ch->chid);
BUG_ON(ch->g); BUG_ON(ch->g);
ch->g = g; ch->g = g;
@@ -951,7 +951,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
if (!e) { if (!e) {
nvgpu_err(c->g, nvgpu_err(c->g,
"ch %d: priv cmd entry is null", "ch %d: priv cmd entry is null",
c->hw_chid); c->chid);
return -EINVAL; return -EINVAL;
} }
@@ -961,7 +961,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
size = orig_size + (q->size - q->put); size = orig_size + (q->size - q->put);
gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d", gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d",
c->hw_chid, q->get, q->put); c->chid, q->get, q->put);
free_count = (q->size - (q->put - q->get) - 1) % q->size; free_count = (q->size - (q->put - q->get) - 1) % q->size;
@@ -1268,7 +1268,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
if (c->gpfifo.mem.size) { if (c->gpfifo.mem.size) {
nvgpu_err(g, "channel %d :" nvgpu_err(g, "channel %d :"
"gpfifo already allocated", c->hw_chid); "gpfifo already allocated", c->chid);
err = -EEXIST; err = -EEXIST;
goto clean_up_idle; goto clean_up_idle;
} }
@@ -1294,7 +1294,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
c->gpfifo.get = c->gpfifo.put = 0; c->gpfifo.get = c->gpfifo.put = 0;
gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d", gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
c->hw_chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num); c->chid, c->gpfifo.mem.gpu_va, c->gpfifo.entry_num);
g->ops.fifo.setup_userd(c); g->ops.fifo.setup_userd(c);
@@ -1653,7 +1653,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
} }
nvgpu_err(g, "Job on channel %d timed out", nvgpu_err(g, "Job on channel %d timed out",
ch->hw_chid); ch->chid);
gk20a_debug_dump(g); gk20a_debug_dump(g);
gk20a_gr_debug_dump(g); gk20a_gr_debug_dump(g);
@@ -1934,7 +1934,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
rmb(); rmb();
if ((q->get != e->off) && e->off != 0) if ((q->get != e->off) && e->off != 0)
nvgpu_err(g, "requests out-of-order, ch=%d", nvgpu_err(g, "requests out-of-order, ch=%d",
c->hw_chid); c->chid);
q->get = e->off + e->size; q->get = e->off + e->size;
} }
@@ -2161,7 +2161,7 @@ void gk20a_channel_update(struct channel_gk20a *c)
return; return;
} }
trace_gk20a_channel_update(c->hw_chid); trace_gk20a_channel_update(c->chid);
/* A queued channel is always checked for job cleanup. */ /* A queued channel is always checked for job cleanup. */
gk20a_channel_worker_enqueue(c); gk20a_channel_worker_enqueue(c);
} }
@@ -2492,7 +2492,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
g->ops.ltc.sync_debugfs(g); g->ops.ltc.sync_debugfs(g);
#endif #endif
gk20a_dbg_info("channel %d", c->hw_chid); gk20a_dbg_info("channel %d", c->chid);
/* /*
* Job tracking is necessary for any of the following conditions: * Job tracking is necessary for any of the following conditions:
@@ -2585,7 +2585,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
down_read(&g->deterministic_busy); down_read(&g->deterministic_busy);
trace_gk20a_channel_submit_gpfifo(g->name, trace_gk20a_channel_submit_gpfifo(g->name,
c->hw_chid, c->chid,
num_entries, num_entries,
flags, flags,
fence ? fence->id : 0, fence ? fence->id : 0,
@@ -2661,7 +2661,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
up_read(&g->deterministic_busy); up_read(&g->deterministic_busy);
trace_gk20a_channel_submitted_gpfifo(g->name, trace_gk20a_channel_submitted_gpfifo(g->name,
c->hw_chid, c->chid,
num_entries, num_entries,
flags, flags,
post_fence ? post_fence->syncpt_id : 0, post_fence ? post_fence->syncpt_id : 0,
@@ -2771,7 +2771,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
int err; int err;
c->g = NULL; c->g = NULL;
c->hw_chid = chid; c->chid = chid;
atomic_set(&c->bound, false); atomic_set(&c->bound, false);
nvgpu_spinlock_init(&c->ref_obtain_lock); nvgpu_spinlock_init(&c->ref_obtain_lock);
atomic_set(&c->ref_count, 0); atomic_set(&c->ref_count, 0);

View File

@@ -185,7 +185,7 @@ struct channel_gk20a {
struct nvgpu_semaphore_int *hw_sema; struct nvgpu_semaphore_int *hw_sema;
int hw_chid; int chid;
bool wdt_enabled; bool wdt_enabled;
atomic_t bound; atomic_t bound;
bool first_init; bool first_init;

View File

@@ -331,10 +331,10 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
sp->nvhost_dev = c->g->nvhost_dev; sp->nvhost_dev = c->g->nvhost_dev;
snprintf(syncpt_name, sizeof(syncpt_name), snprintf(syncpt_name, sizeof(syncpt_name),
"%s_%d", c->g->name, c->hw_chid); "%s_%d", c->g->name, c->chid);
sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev, sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev,
c->hw_chid, syncpt_name); c->chid, syncpt_name);
if (!sp->id) { if (!sp->id) {
nvgpu_kfree(c->g, sp); nvgpu_kfree(c->g, sp);
nvgpu_err(c->g, "failed to get free syncpt"); nvgpu_err(c->g, "failed to get free syncpt");
@@ -497,7 +497,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd, struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
int cmd_size, bool acquire, bool wfi) int cmd_size, bool acquire, bool wfi)
{ {
int ch = c->hw_chid; int ch = c->chid;
u32 ob, off = cmd->off; u32 ob, off = cmd->off;
u64 va; u64 va;
@@ -557,7 +557,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u owner=%-3d" gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u owner=%-3d"
"va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
ch, nvgpu_semaphore_get_value(s), ch, nvgpu_semaphore_get_value(s),
s->hw_sema->ch->hw_chid, va, cmd->gva, s->hw_sema->ch->chid, va, cmd->gva,
cmd->mem->gpu_va, ob); cmd->mem->gpu_va, ob);
else else
gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) va=0x%llx " gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) va=0x%llx "
@@ -911,12 +911,12 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
if (c->vm->as_share) if (c->vm->as_share)
asid = c->vm->as_share->id; asid = c->vm->as_share->id;
sprintf(pool_name, "semaphore_pool-%d", c->hw_chid); sprintf(pool_name, "semaphore_pool-%d", c->chid);
sema->pool = c->vm->sema_pool; sema->pool = c->vm->sema_pool;
#ifdef CONFIG_SYNC #ifdef CONFIG_SYNC
sema->timeline = gk20a_sync_timeline_create( sema->timeline = gk20a_sync_timeline_create(
"gk20a_ch%d_as%d", c->hw_chid, asid); "gk20a_ch%d_as%d", c->chid, asid);
if (!sema->timeline) { if (!sema->timeline) {
gk20a_channel_semaphore_destroy(&sema->ops); gk20a_channel_semaphore_destroy(&sema->ops);
return NULL; return NULL;

View File

@@ -693,7 +693,7 @@ void gk20a_ctxsw_trace_channel_reset(struct gk20a *g, struct channel_gk20a *ch)
gk20a_ctxsw_trace_write(g, &entry); gk20a_ctxsw_trace_write(g, &entry);
gk20a_ctxsw_trace_wake_up(g, 0); gk20a_ctxsw_trace_wake_up(g, 0);
#endif #endif
trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid); trace_gk20a_channel_reset(ch->chid, ch->tsgid);
} }
void gk20a_ctxsw_trace_tsg_reset(struct gk20a *g, struct tsg_gk20a *tsg) void gk20a_ctxsw_trace_tsg_reset(struct gk20a *g, struct tsg_gk20a *tsg)

View File

@@ -445,7 +445,7 @@ int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
dbg_profiler_object_data, prof_obj_entry) { dbg_profiler_object_data, prof_obj_entry) {
if ((prof_obj->session_id == dbg_s->id) && if ((prof_obj->session_id == dbg_s->id) &&
(prof_obj->ch->hw_chid == chid)) { (prof_obj->ch->chid == chid)) {
if (prof_obj->has_reservation) { if (prof_obj->has_reservation) {
g->ops.dbg_session_ops. g->ops.dbg_session_ops.
release_profiler_reservation(dbg_s, prof_obj); release_profiler_reservation(dbg_s, prof_obj);
@@ -504,7 +504,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
nvgpu_mutex_acquire(&dbg_s->ch_list_lock); nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
dbg_session_channel_data, ch_entry) { dbg_session_channel_data, ch_entry) {
if (ch->hw_chid == ch_data->chid) { if (ch->chid == ch_data->chid) {
channel_found = true; channel_found = true;
break; break;
} }
@@ -601,7 +601,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
return -EINVAL; return -EINVAL;
} }
gk20a_dbg_fn("%s hwchid=%d", g->name, ch->hw_chid); gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
nvgpu_mutex_acquire(&ch->dbg_s_lock); nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -613,7 +613,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
} }
ch_data->ch_f = f; ch_data->ch_f = f;
ch_data->channel_fd = args->channel_fd; ch_data->channel_fd = args->channel_fd;
ch_data->chid = ch->hw_chid; ch_data->chid = ch->chid;
nvgpu_init_list_node(&ch_data->ch_entry); nvgpu_init_list_node(&ch_data->ch_entry);
session_data = nvgpu_kzalloc(g, sizeof(*session_data)); session_data = nvgpu_kzalloc(g, sizeof(*session_data));

View File

@@ -628,8 +628,8 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
u32 aperture; u32 aperture;
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw,
"hw_chid=%d context_ptr=%x inst_block=%llx", "chid=%d context_ptr=%x inst_block=%llx",
ch->hw_chid, context_ptr, ch->chid, context_ptr,
gk20a_mm_inst_block_addr(g, &ch->inst_block)); gk20a_mm_inst_block_addr(g, &ch->inst_block));
if (!trace) if (!trace)

View File

@@ -46,7 +46,7 @@
#define FECS_METHOD_WFI_RESTORE 0x80000 #define FECS_METHOD_WFI_RESTORE 0x80000
static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, u32 chid, bool add,
bool wait_for_finish); bool wait_for_finish);
static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg);
@@ -1395,7 +1395,7 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
struct channel_gk20a *refch) struct channel_gk20a *refch)
{ {
nvgpu_err(g, nvgpu_err(g,
"channel %d generated a mmu fault", refch->hw_chid); "channel %d generated a mmu fault", refch->chid);
gk20a_set_error_notifier(refch, gk20a_set_error_notifier(refch,
NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
} }
@@ -1455,7 +1455,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true); engines = gk20a_fifo_engines_on_id(g, ch->tsgid, true);
else else
engines = gk20a_fifo_engines_on_id(g, ch->hw_chid, false); engines = gk20a_fifo_engines_on_id(g, ch->chid, false);
if (!engines) if (!engines)
goto clean_up; goto clean_up;
@@ -1673,7 +1673,7 @@ static bool gk20a_fifo_handle_mmu_fault(
} else { } else {
nvgpu_err(g, nvgpu_err(g,
"mmu error in freed channel %d", "mmu error in freed channel %d",
ch->hw_chid); ch->chid);
} }
} else if (mmfault_info.inst_ptr == } else if (mmfault_info.inst_ptr ==
gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
@@ -1794,7 +1794,7 @@ static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
return engines; return engines;
} }
void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose)
{ {
u32 engines; u32 engines;
@@ -1803,12 +1803,12 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
gr_gk20a_disable_ctxsw(g); gr_gk20a_disable_ctxsw(g);
engines = gk20a_fifo_engines_on_id(g, hw_chid, false); engines = gk20a_fifo_engines_on_id(g, chid, false);
if (engines) if (engines)
gk20a_fifo_recover(g, engines, hw_chid, false, true, verbose); gk20a_fifo_recover(g, engines, chid, false, true, verbose);
else { else {
struct channel_gk20a *ch = &g->fifo.channel[hw_chid]; struct channel_gk20a *ch = &g->fifo.channel[chid];
if (gk20a_channel_get(ch)) { if (gk20a_channel_get(ch)) {
gk20a_channel_abort(ch, false); gk20a_channel_abort(ch, false);
@@ -1976,7 +1976,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
gk20a_fifo_recover_tsg(g, ch->tsgid, verbose); gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
} else { } else {
gk20a_set_error_notifier(ch, err_code); gk20a_set_error_notifier(ch, err_code);
gk20a_fifo_recover_ch(g, ch->hw_chid, verbose); gk20a_fifo_recover_ch(g, ch->chid, verbose);
} }
return 0; return 0;
@@ -2102,7 +2102,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
*/ */
if (progress) { if (progress) {
gk20a_dbg_info("progress on tsg=%d ch=%d", gk20a_dbg_info("progress on tsg=%d ch=%d",
tsg->tsgid, ch->hw_chid); tsg->tsgid, ch->chid);
gk20a_channel_put(ch); gk20a_channel_put(ch);
*ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000; *ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
list_for_each_entry(ch, &tsg->ch_list, ch_entry) { list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2119,7 +2119,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
*/ */
if (recover) { if (recover) {
gk20a_dbg_info("timeout on tsg=%d ch=%d", gk20a_dbg_info("timeout on tsg=%d ch=%d",
tsg->tsgid, ch->hw_chid); tsg->tsgid, ch->chid);
*ms = ch->timeout_accumulated_ms; *ms = ch->timeout_accumulated_ms;
gk20a_channel_put(ch); gk20a_channel_put(ch);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) { list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2629,7 +2629,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
return ret; return ret;
} }
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 ret = 0; u32 ret = 0;
@@ -2637,7 +2637,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
u32 mutex_ret = 0; u32 mutex_ret = 0;
u32 i; u32 i;
gk20a_dbg_fn("%d", hw_chid); gk20a_dbg_fn("%d", chid);
/* we have no idea which runlist we are using. lock all */ /* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++) for (i = 0; i < g->fifo.max_runlists; i++)
@@ -2645,7 +2645,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
ret = __locked_fifo_preempt(g, hw_chid, false); ret = __locked_fifo_preempt(g, chid, false);
if (!mutex_ret) if (!mutex_ret)
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -2690,7 +2690,7 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid); err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
else else
err = g->ops.fifo.preempt_channel(ch->g, ch->hw_chid); err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
return err; return err;
} }
@@ -2973,7 +2973,7 @@ u32 gk20a_fifo_default_timeslice_us(struct gk20a *g)
void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist) void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist)
{ {
runlist[0] = ram_rl_entry_chid_f(ch->hw_chid); runlist[0] = ram_rl_entry_chid_f(ch->chid);
runlist[1] = 0; runlist[1] = 0;
} }
@@ -3066,7 +3066,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
down_read(&tsg->ch_list_lock); down_read(&tsg->ch_list_lock);
/* add runnable channels bound to this TSG */ /* add runnable channels bound to this TSG */
list_for_each_entry(ch, &tsg->ch_list, ch_entry) { list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (!test_bit(ch->hw_chid, if (!test_bit(ch->chid,
runlist->active_channels)) runlist->active_channels))
continue; continue;
@@ -3076,7 +3076,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
} }
gk20a_dbg_info("add channel %d to runlist", gk20a_dbg_info("add channel %d to runlist",
ch->hw_chid); ch->chid);
f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry); f->g->ops.fifo.get_ch_runlist_entry(ch, runlist_entry);
gk20a_dbg_info( gk20a_dbg_info(
"run list count %d runlist [0] %x [1] %x\n", "run list count %d runlist [0] %x [1] %x\n",
@@ -3148,7 +3148,7 @@ int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
} }
static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, u32 chid, bool add,
bool wait_for_finish) bool wait_for_finish)
{ {
int ret = 0; int ret = 0;
@@ -3166,24 +3166,24 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* valid channel, add/remove it from active list. /* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */ Otherwise, keep active list untouched for suspend/resume. */
if (hw_chid != FIFO_INVAL_CHANNEL_ID) { if (chid != FIFO_INVAL_CHANNEL_ID) {
ch = &f->channel[hw_chid]; ch = &f->channel[chid];
if (gk20a_is_channel_marked_as_tsg(ch)) if (gk20a_is_channel_marked_as_tsg(ch))
tsg = &f->tsg[ch->tsgid]; tsg = &f->tsg[ch->tsgid];
if (add) { if (add) {
if (test_and_set_bit(hw_chid, if (test_and_set_bit(chid,
runlist->active_channels) == 1) runlist->active_channels) == 1)
return 0; return 0;
if (tsg && ++tsg->num_active_channels) if (tsg && ++tsg->num_active_channels)
set_bit(f->channel[hw_chid].tsgid, set_bit(f->channel[chid].tsgid,
runlist->active_tsgs); runlist->active_tsgs);
} else { } else {
if (test_and_clear_bit(hw_chid, if (test_and_clear_bit(chid,
runlist->active_channels) == 0) runlist->active_channels) == 0)
return 0; return 0;
if (tsg && --tsg->num_active_channels == 0) if (tsg && --tsg->num_active_channels == 0)
clear_bit(f->channel[hw_chid].tsgid, clear_bit(f->channel[chid].tsgid,
runlist->active_tsgs); runlist->active_tsgs);
} }
} }
@@ -3208,7 +3208,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
goto clean_up; goto clean_up;
} }
if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ if (chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */
add /* resume to add all channels back */) { add /* resume to add all channels back */) {
u32 max_entries = f->num_runlist_entries; u32 max_entries = f->num_runlist_entries;
u32 *runlist_end; u32 *runlist_end;
@@ -3270,7 +3270,7 @@ clean_up:
return ret; return ret;
} }
int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid, int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
u32 ret = -EINVAL; u32 ret = -EINVAL;
@@ -3284,7 +3284,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid,
ret = 0; ret = 0;
for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) { for_each_set_bit(runlist_id, &ulong_runlist_ids, 32) {
/* Capture the last failure error code */ /* Capture the last failure error code */
errcode = g->ops.fifo.update_runlist(g, runlist_id, hw_chid, add, wait_for_finish); errcode = g->ops.fifo.update_runlist(g, runlist_id, chid, add, wait_for_finish);
if (errcode) { if (errcode) {
nvgpu_err(g, nvgpu_err(g,
"failed to update_runlist %d %d", runlist_id, errcode); "failed to update_runlist %d %d", runlist_id, errcode);
@@ -3297,9 +3297,9 @@ end:
/* add/remove a channel from runlist /* add/remove a channel from runlist
special cases below: runlist->active_channels will NOT be changed. special cases below: runlist->active_channels will NOT be changed.
(hw_chid == ~0 && !add) means remove all active channels from runlist. (chid == ~0 && !add) means remove all active channels from runlist.
(hw_chid == ~0 && add) means restore all active channels on runlist. */ (chid == ~0 && add) means restore all active channels on runlist. */
int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid, int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
struct fifo_runlist_info_gk20a *runlist = NULL; struct fifo_runlist_info_gk20a *runlist = NULL;
@@ -3316,7 +3316,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add,
wait_for_finish); wait_for_finish);
if (!mutex_ret) if (!mutex_ret)
@@ -3427,11 +3427,11 @@ u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g)
return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f(); return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f();
} }
struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g, struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g,
u32 hw_chid) u32 chid)
{ {
if (hw_chid != FIFO_INVAL_CHANNEL_ID) if (chid != FIFO_INVAL_CHANNEL_ID)
return g->fifo.channel + hw_chid; return g->fifo.channel + chid;
else else
return NULL; return NULL;
} }
@@ -3487,14 +3487,14 @@ const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index)
void gk20a_dump_channel_status_ramfc(struct gk20a *g, void gk20a_dump_channel_status_ramfc(struct gk20a *g,
struct gk20a_debug_output *o, struct gk20a_debug_output *o,
u32 hw_chid, u32 chid,
struct ch_state *ch_state) struct ch_state *ch_state)
{ {
u32 channel = gk20a_readl(g, ccsr_channel_r(hw_chid)); u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
u32 status = ccsr_channel_status_v(channel); u32 status = ccsr_channel_status_v(channel);
u32 syncpointa, syncpointb; u32 syncpointa, syncpointb;
u32 *inst_mem; u32 *inst_mem;
struct channel_gk20a *c = g->fifo.channel + hw_chid; struct channel_gk20a *c = g->fifo.channel + chid;
struct nvgpu_semaphore_int *hw_sema = NULL; struct nvgpu_semaphore_int *hw_sema = NULL;
if (c->hw_sema) if (c->hw_sema)
@@ -3508,7 +3508,7 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
syncpointa = inst_mem[ram_fc_syncpointa_w()]; syncpointa = inst_mem[ram_fc_syncpointa_w()];
syncpointb = inst_mem[ram_fc_syncpointb_w()]; syncpointb = inst_mem[ram_fc_syncpointb_w()];
gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", hw_chid, gk20a_debug_output(o, "%d-%s, pid %d, refs %d%s: ", chid,
g->name, g->name,
ch_state->pid, ch_state->pid,
ch_state->refs, ch_state->refs,
@@ -3673,16 +3673,16 @@ void gk20a_dump_eng_status(struct gk20a *g,
void gk20a_fifo_enable_channel(struct channel_gk20a *ch) void gk20a_fifo_enable_channel(struct channel_gk20a *ch)
{ {
gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid)) | gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) |
ccsr_channel_enable_set_true_f()); ccsr_channel_enable_set_true_f());
} }
void gk20a_fifo_disable_channel(struct channel_gk20a *ch) void gk20a_fifo_disable_channel(struct channel_gk20a *ch)
{ {
gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid), gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
gk20a_readl(ch->g, gk20a_readl(ch->g,
ccsr_channel_r(ch->hw_chid)) | ccsr_channel_r(ch->chid)) |
ccsr_channel_enable_clr_true_f()); ccsr_channel_enable_clr_true_f());
} }
@@ -3693,23 +3693,23 @@ static void gk20a_fifo_channel_bind(struct channel_gk20a *c)
ram_in_base_shift_v(); ram_in_base_shift_v();
gk20a_dbg_info("bind channel %d inst ptr 0x%08x", gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
c->hw_chid, inst_ptr); c->chid, inst_ptr);
gk20a_writel(g, ccsr_channel_r(c->hw_chid), gk20a_writel(g, ccsr_channel_r(c->chid),
(gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & (gk20a_readl(g, ccsr_channel_r(c->chid)) &
~ccsr_channel_runlist_f(~0)) | ~ccsr_channel_runlist_f(~0)) |
ccsr_channel_runlist_f(c->runlist_id)); ccsr_channel_runlist_f(c->runlist_id));
gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), gk20a_writel(g, ccsr_channel_inst_r(c->chid),
ccsr_channel_inst_ptr_f(inst_ptr) | ccsr_channel_inst_ptr_f(inst_ptr) |
nvgpu_aperture_mask(g, &c->inst_block, nvgpu_aperture_mask(g, &c->inst_block,
ccsr_channel_inst_target_sys_mem_ncoh_f(), ccsr_channel_inst_target_sys_mem_ncoh_f(),
ccsr_channel_inst_target_vid_mem_f()) | ccsr_channel_inst_target_vid_mem_f()) |
ccsr_channel_inst_bind_true_f()); ccsr_channel_inst_bind_true_f());
gk20a_writel(g, ccsr_channel_r(c->hw_chid), gk20a_writel(g, ccsr_channel_r(c->chid),
(gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & (gk20a_readl(g, ccsr_channel_r(c->chid)) &
~ccsr_channel_enable_set_f(~0)) | ~ccsr_channel_enable_set_f(~0)) |
ccsr_channel_enable_set_true_f()); ccsr_channel_enable_set_true_f());
@@ -3725,7 +3725,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
gk20a_dbg_fn(""); gk20a_dbg_fn("");
if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) { if (atomic_cmpxchg(&ch_gk20a->bound, true, false)) {
gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid), gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
ccsr_channel_inst_ptr_f(0) | ccsr_channel_inst_ptr_f(0) |
ccsr_channel_inst_bind_false_f()); ccsr_channel_inst_bind_false_f());
} }
@@ -3743,7 +3743,7 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
addr_hi = u64_hi32(c->userd_iova); addr_hi = u64_hi32(c->userd_iova);
gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
c->hw_chid, (u64)c->userd_iova); c->chid, (u64)c->userd_iova);
nvgpu_mem_wr32(g, &c->inst_block, nvgpu_mem_wr32(g, &c->inst_block,
ram_in_ramfc_w() + ram_fc_userd_w(), ram_in_ramfc_w() + ram_fc_userd_w(),
@@ -3815,7 +3815,7 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
fifo_pb_timeslice_timescale_0_f() | fifo_pb_timeslice_timescale_0_f() |
fifo_pb_timeslice_enable_true_f()); fifo_pb_timeslice_enable_true_f());
nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
if (c->is_privileged_channel) if (c->is_privileged_channel)
gk20a_fifo_setup_ramfc_for_privileged_channel(c); gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -3834,7 +3834,7 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
c->g->ops.fifo.disable_channel(c); c->g->ops.fifo.disable_channel(c);
/* preempt the channel */ /* preempt the channel */
WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid)); WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->chid));
/* set new timeslice */ /* set new timeslice */
nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(), nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
@@ -3863,7 +3863,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
ch->timeslice_us = timeslice; ch->timeslice_us = timeslice;
gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us", gk20a_dbg(gpu_dbg_sched, "chid=%u timeslice=%u us",
ch->hw_chid, timeslice); ch->chid, timeslice);
return channel_gk20a_set_schedule_params(ch); return channel_gk20a_set_schedule_params(ch);
} }
@@ -3899,7 +3899,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_mem *mem = &c->inst_block; struct nvgpu_mem *mem = &c->inst_block;
gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid); gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->chid);
/* Enable HCE priv mode for phys mode transfer */ /* Enable HCE priv mode for phys mode transfer */
nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
@@ -3910,7 +3910,7 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_mem *mem = &g->fifo.userd; struct nvgpu_mem *mem = &g->fifo.userd;
u32 offset = c->hw_chid * g->fifo.userd_entry_size / sizeof(u32); u32 offset = c->chid * g->fifo.userd_entry_size / sizeof(u32);
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -3939,7 +3939,7 @@ int gk20a_fifo_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
return err; return err;
gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx", gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx",
ch->hw_chid, gk20a_mm_inst_block_addr(g, &ch->inst_block)); ch->chid, gk20a_mm_inst_block_addr(g, &ch->inst_block));
gk20a_dbg_fn("done"); gk20a_dbg_fn("done");
return 0; return 0;

View File

@@ -227,7 +227,7 @@ int gk20a_init_fifo_setup_hw(struct gk20a *g);
void gk20a_fifo_isr(struct gk20a *g); void gk20a_fifo_isr(struct gk20a *g);
int gk20a_fifo_nonstall_isr(struct gk20a *g); int gk20a_fifo_nonstall_isr(struct gk20a *g);
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid); int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch); int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
@@ -239,9 +239,9 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
bool wait_for_idle); bool wait_for_idle);
int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
bool wait_for_idle); bool wait_for_idle);
u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 hw_chid); u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid);
int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 hw_chid, int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
int gk20a_fifo_suspend(struct gk20a *g); int gk20a_fifo_suspend(struct gk20a *g);
@@ -253,7 +253,7 @@ void gk20a_fifo_recover(struct gk20a *g,
u32 hw_id, /* if ~0, will be queried from HW */ u32 hw_id, /* if ~0, will be queried from HW */
bool hw_id_is_tsg, /* ignored if hw_id == ~0 */ bool hw_id_is_tsg, /* ignored if hw_id == ~0 */
bool id_is_known, bool verbose); bool id_is_known, bool verbose);
void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose); void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose);
void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose); void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose);
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose); u32 err_code, bool verbose);
@@ -277,8 +277,8 @@ void gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg); bool gk20a_fifo_error_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch); bool gk20a_fifo_error_ch(struct gk20a *g, struct channel_gk20a *refch);
struct channel_gk20a *gk20a_fifo_channel_from_hw_chid(struct gk20a *g, struct channel_gk20a *gk20a_fifo_channel_from_chid(struct gk20a *g,
u32 hw_chid); u32 chid);
void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg); void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg);
int gk20a_fifo_set_runlist_interleave(struct gk20a *g, int gk20a_fifo_set_runlist_interleave(struct gk20a *g,
@@ -316,7 +316,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g);
bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id); bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 hw_chid, int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
int gk20a_fifo_init_engine_info(struct fifo_gk20a *f); int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
@@ -339,7 +339,7 @@ void gk20a_fifo_profile_release(struct gk20a *g,
void gk20a_dump_channel_status_ramfc(struct gk20a *g, void gk20a_dump_channel_status_ramfc(struct gk20a *g,
struct gk20a_debug_output *o, struct gk20a_debug_output *o,
u32 hw_chid, u32 chid,
struct ch_state *ch_state); struct ch_state *ch_state);
void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
struct gk20a_debug_output *o); struct gk20a_debug_output *o);

View File

@@ -426,10 +426,10 @@ struct gpu_ops {
unsigned long acquire_timeout, unsigned long acquire_timeout,
u32 flags); u32 flags);
int (*resetup_ramfc)(struct channel_gk20a *c); int (*resetup_ramfc)(struct channel_gk20a *c);
int (*preempt_channel)(struct gk20a *g, u32 hw_chid); int (*preempt_channel)(struct gk20a *g, u32 chid);
int (*preempt_tsg)(struct gk20a *g, u32 tsgid); int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
int (*update_runlist)(struct gk20a *g, u32 runlist_id, int (*update_runlist)(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, u32 chid, bool add,
bool wait_for_finish); bool wait_for_finish);
void (*trigger_mmu_fault)(struct gk20a *g, void (*trigger_mmu_fault)(struct gk20a *g,
unsigned long engine_ids); unsigned long engine_ids);
@@ -477,7 +477,7 @@ struct gpu_ops {
void (*dump_eng_status)(struct gk20a *g, void (*dump_eng_status)(struct gk20a *g,
struct gk20a_debug_output *o); struct gk20a_debug_output *o);
void (*dump_channel_status_ramfc)(struct gk20a *g, void (*dump_channel_status_ramfc)(struct gk20a *g,
struct gk20a_debug_output *o, u32 hw_chid, struct gk20a_debug_output *o, u32 chid,
struct ch_state *ch_state); struct ch_state *ch_state);
u32 (*intr_0_error_mask)(struct gk20a *g); u32 (*intr_0_error_mask)(struct gk20a *g);
int (*is_preempt_pending)(struct gk20a *g, u32 id, int (*is_preempt_pending)(struct gk20a *g, u32 id,

View File

@@ -725,7 +725,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
u32 ret; u32 ret;
gk20a_dbg_info("bind channel %d inst ptr 0x%08x", gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
c->hw_chid, inst_base_ptr); c->chid, inst_base_ptr);
ret = gr_gk20a_submit_fecs_method_op(g, ret = gr_gk20a_submit_fecs_method_op(g,
(struct fecs_method_op_gk20a) { (struct fecs_method_op_gk20a) {
@@ -5933,7 +5933,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
/* check cache first */ /* check cache first */
for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
if (gr->chid_tlb[i].curr_ctx == curr_ctx) { if (gr->chid_tlb[i].curr_ctx == curr_ctx) {
chid = gr->chid_tlb[i].hw_chid; chid = gr->chid_tlb[i].chid;
tsgid = gr->chid_tlb[i].tsgid; tsgid = gr->chid_tlb[i].tsgid;
ret = gk20a_channel_get(&f->channel[chid]); ret = gk20a_channel_get(&f->channel[chid]);
goto unlock; goto unlock;
@@ -5964,7 +5964,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
if (gr->chid_tlb[i].curr_ctx == 0) { if (gr->chid_tlb[i].curr_ctx == 0) {
gr->chid_tlb[i].curr_ctx = curr_ctx; gr->chid_tlb[i].curr_ctx = curr_ctx;
gr->chid_tlb[i].hw_chid = chid; gr->chid_tlb[i].chid = chid;
gr->chid_tlb[i].tsgid = tsgid; gr->chid_tlb[i].tsgid = tsgid;
goto unlock; goto unlock;
} }
@@ -5972,7 +5972,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
/* no free entry, flush one */ /* no free entry, flush one */
gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx; gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx;
gr->chid_tlb[gr->channel_tlb_flush_index].hw_chid = chid; gr->chid_tlb[gr->channel_tlb_flush_index].chid = chid;
gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid; gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid;
gr->channel_tlb_flush_index = gr->channel_tlb_flush_index =
@@ -6514,7 +6514,7 @@ int gk20a_gr_isr(struct gk20a *g)
ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid); ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
if (ch) { if (ch) {
isr_data.chid = ch->hw_chid; isr_data.chid = ch->chid;
} else { } else {
isr_data.chid = FIFO_INVAL_CHANNEL_ID; isr_data.chid = FIFO_INVAL_CHANNEL_ID;
nvgpu_err(g, "ch id is INVALID 0xffffffff"); nvgpu_err(g, "ch id is INVALID 0xffffffff");
@@ -6626,7 +6626,7 @@ int gk20a_gr_isr(struct gk20a *g)
gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC exception pending"); "GPC exception pending");
fault_ch = gk20a_fifo_channel_from_hw_chid(g, fault_ch = gk20a_fifo_channel_from_chid(g,
isr_data.chid); isr_data.chid);
/*isr_data.chid can be ~0 and fault_ch can be NULL */ /*isr_data.chid can be ~0 and fault_ch can be NULL */
@@ -6673,7 +6673,7 @@ int gk20a_gr_isr(struct gk20a *g)
tsgid, true, true, true); tsgid, true, true, true);
else if (ch) else if (ch)
gk20a_fifo_recover(g, gr_engine_id, gk20a_fifo_recover(g, gr_engine_id,
ch->hw_chid, false, true, true); ch->chid, false, true, true);
else else
gk20a_fifo_recover(g, gr_engine_id, gk20a_fifo_recover(g, gr_engine_id,
0, false, false, true); 0, false, false, true);
@@ -8337,16 +8337,16 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
"curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d"
" ch->hw_chid=%d", " ch->chid=%d",
curr_ch ? curr_ch->hw_chid : -1, curr_ch ? curr_ch->chid : -1,
curr_gr_tsgid, curr_gr_tsgid,
ch->tsgid, ch->tsgid,
ch->hw_chid); ch->chid);
if (!curr_ch) if (!curr_ch)
return false; return false;
if (ch->hw_chid == curr_ch->hw_chid) if (ch->chid == curr_ch->chid)
ret = true; ret = true;
if (gk20a_is_channel_marked_as_tsg(ch) && (ch->tsgid == curr_gr_tsgid)) if (gk20a_is_channel_marked_as_tsg(ch) && (ch->tsgid == curr_gr_tsgid))

View File

@@ -112,7 +112,7 @@ enum {
struct gr_channel_map_tlb_entry { struct gr_channel_map_tlb_entry {
u32 curr_ctx; u32 curr_ctx;
u32 hw_chid; u32 chid;
u32 tsgid; u32 tsgid;
}; };

View File

@@ -171,7 +171,7 @@ struct mmu_fault_info {
u32 faulted_pbdma; u32 faulted_pbdma;
u32 faulted_engine; u32 faulted_engine;
u32 faulted_subid; u32 faulted_subid;
u32 hw_chid; u32 chid;
struct channel_gk20a *refch; struct channel_gk20a *refch;
const char *client_type_desc; const char *client_type_desc;
const char *fault_type_desc; const char *fault_type_desc;

View File

@@ -360,7 +360,7 @@ static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt,
struct nvgpu_semaphore *s = pt->sema; struct nvgpu_semaphore *s = pt->sema;
snprintf(str, size, "S: c=%d [v=%u,r_v=%u]", snprintf(str, size, "S: c=%d [v=%u,r_v=%u]",
s->hw_sema->ch->hw_chid, s->hw_sema->ch->chid,
nvgpu_semaphore_get_value(s), nvgpu_semaphore_get_value(s),
nvgpu_semaphore_read(s)); nvgpu_semaphore_read(s));
} }

View File

@@ -61,7 +61,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
for (i = 0; i < f->max_runlists; ++i) { for (i = 0; i < f->max_runlists; ++i) {
runlist = &f->runlist_info[i]; runlist = &f->runlist_info[i];
if (test_bit(ch->hw_chid, runlist->active_channels)) if (test_bit(ch->chid, runlist->active_channels))
return true; return true;
} }
@@ -107,7 +107,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
kref_get(&tsg->refcount); kref_get(&tsg->refcount);
gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
tsg->tsgid, ch->hw_chid); tsg->tsgid, ch->chid);
gk20a_dbg_fn("done"); gk20a_dbg_fn("done");
return 0; return 0;

View File

@@ -36,18 +36,18 @@ static void channel_gm20b_bind(struct channel_gk20a *c)
>> ram_in_base_shift_v(); >> ram_in_base_shift_v();
gk20a_dbg_info("bind channel %d inst ptr 0x%08x", gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
c->hw_chid, inst_ptr); c->chid, inst_ptr);
gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), gk20a_writel(g, ccsr_channel_inst_r(c->chid),
ccsr_channel_inst_ptr_f(inst_ptr) | ccsr_channel_inst_ptr_f(inst_ptr) |
nvgpu_aperture_mask(g, &c->inst_block, nvgpu_aperture_mask(g, &c->inst_block,
ccsr_channel_inst_target_sys_mem_ncoh_f(), ccsr_channel_inst_target_sys_mem_ncoh_f(),
ccsr_channel_inst_target_vid_mem_f()) | ccsr_channel_inst_target_vid_mem_f()) |
ccsr_channel_inst_bind_true_f()); ccsr_channel_inst_bind_true_f());
gk20a_writel(g, ccsr_channel_r(c->hw_chid), gk20a_writel(g, ccsr_channel_r(c->chid),
(gk20a_readl(g, ccsr_channel_r(c->hw_chid)) & (gk20a_readl(g, ccsr_channel_r(c->chid)) &
~ccsr_channel_enable_set_f(~0)) | ~ccsr_channel_enable_set_f(~0)) |
ccsr_channel_enable_set_true_f()); ccsr_channel_enable_set_true_f());
wmb(); wmb();

View File

@@ -64,7 +64,7 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
addr_hi = u64_hi32(c->userd_iova); addr_hi = u64_hi32(c->userd_iova);
gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
c->hw_chid, (u64)c->userd_iova); c->chid, (u64)c->userd_iova);
nvgpu_mem_wr32(g, &c->inst_block, nvgpu_mem_wr32(g, &c->inst_block,
ram_in_ramfc_w() + ram_fc_userd_w(), ram_in_ramfc_w() + ram_fc_userd_w(),
@@ -134,7 +134,7 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
gp10b_set_pdb_fault_replay_flags(c->g, mem); gp10b_set_pdb_fault_replay_flags(c->g, mem);
nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
if (c->is_privileged_channel) { if (c->is_privileged_channel) {
/* Set privilege level for channel */ /* Set privilege level for channel */
@@ -176,7 +176,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
v = pbdma_allowed_syncpoints_0_valid_f(1); v = pbdma_allowed_syncpoints_0_valid_f(1);
gk20a_dbg_info("Channel %d, syncpt id %d\n", gk20a_dbg_info("Channel %d, syncpt id %d\n",
c->hw_chid, new_syncpt); c->chid, new_syncpt);
v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);

View File

@@ -1653,7 +1653,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: preempted tsg"); "CILP: preempted tsg");
} else { } else {
gk20a_fifo_issue_preempt(g, fault_ch->hw_chid, false); gk20a_fifo_issue_preempt(g, fault_ch->chid, false);
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: preempted channel"); "CILP: preempted channel");
} }
@@ -1675,7 +1675,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
if (gr_ctx->t18x.cilp_preempt_pending) { if (gr_ctx->t18x.cilp_preempt_pending) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP is already pending for chid %d", "CILP is already pending for chid %d",
fault_ch->hw_chid); fault_ch->chid);
return 0; return 0;
} }
@@ -1718,7 +1718,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP: disabling channel %d", "CILP: disabling channel %d",
fault_ch->hw_chid); fault_ch->chid);
ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch); ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch);
if (ret) { if (ret) {
@@ -1728,7 +1728,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
/* set cilp_preempt_pending = true and record the channel */ /* set cilp_preempt_pending = true and record the channel */
gr_ctx->t18x.cilp_preempt_pending = true; gr_ctx->t18x.cilp_preempt_pending = true;
g->gr.t18x.cilp_preempt_pending_chid = fault_ch->hw_chid; g->gr.t18x.cilp_preempt_pending_chid = fault_ch->chid;
if (gk20a_is_channel_marked_as_tsg(fault_ch)) { if (gk20a_is_channel_marked_as_tsg(fault_ch)) {
struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid]; struct tsg_gk20a *tsg = &g->fifo.tsg[fault_ch->tsgid];
@@ -1758,7 +1758,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
if (!gr_ctx->t18x.cilp_preempt_pending) { if (!gr_ctx->t18x.cilp_preempt_pending) {
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP is already cleared for chid %d\n", "CILP is already cleared for chid %d\n",
fault_ch->hw_chid); fault_ch->chid);
return 0; return 0;
} }
@@ -1879,7 +1879,7 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid)
chid = g->gr.t18x.cilp_preempt_pending_chid; chid = g->gr.t18x.cilp_preempt_pending_chid;
ch = gk20a_channel_get(gk20a_fifo_channel_from_hw_chid(g, chid)); ch = gk20a_channel_get(gk20a_fifo_channel_from_chid(g, chid));
if (!ch) if (!ch)
return ret; return ret;
@@ -1923,7 +1923,7 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
goto clean_up; goto clean_up;
ch = gk20a_channel_get( ch = gk20a_channel_get(
gk20a_fifo_channel_from_hw_chid(g, chid)); gk20a_fifo_channel_from_chid(g, chid));
if (!ch) if (!ch)
goto clean_up; goto clean_up;
@@ -2171,7 +2171,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d "
"graphics_preempt=%d compute_preempt=%d", "graphics_preempt=%d compute_preempt=%d",
ch->hw_chid, ch->chid,
ch->tsgid, ch->tsgid,
ch->tgid, ch->tgid,
graphics_preempt_mode, graphics_preempt_mode,

View File

@@ -299,7 +299,7 @@ static inline void __nvgpu_semaphore_release(struct nvgpu_semaphore *s,
nvgpu_mem_wr(hw_sema->ch->g, &hw_sema->p->rw_mem, hw_sema->offset, val); nvgpu_mem_wr(hw_sema->ch->g, &hw_sema->p->rw_mem, hw_sema->offset, val);
gpu_sema_verbose_dbg(hw_sema->p->sema_sea->gk20a, gpu_sema_verbose_dbg(hw_sema->p->sema_sea->gk20a,
"(c=%d) WRITE %u", hw_sema->ch->hw_chid, val); "(c=%d) WRITE %u", hw_sema->ch->chid, val);
} }
static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s) static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s)
@@ -325,7 +325,7 @@ static inline void nvgpu_semaphore_incr(struct nvgpu_semaphore *s)
gpu_sema_verbose_dbg(s->hw_sema->p->sema_sea->gk20a, gpu_sema_verbose_dbg(s->hw_sema->p->sema_sea->gk20a,
"INCR sema for c=%d (%u)", "INCR sema for c=%d (%u)",
s->hw_sema->ch->hw_chid, s->hw_sema->ch->chid,
nvgpu_semaphore_next_value(s)); nvgpu_semaphore_next_value(s));
} }
#endif #endif

View File

@@ -34,7 +34,7 @@ static void vgpu_channel_bind(struct channel_gk20a *ch)
&msg.params.channel_config; &msg.params.channel_config;
int err; int err;
gk20a_dbg_info("bind channel %d", ch->hw_chid); gk20a_dbg_info("bind channel %d", ch->chid);
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND;
msg.handle = vgpu_get_handle(ch->g); msg.handle = vgpu_get_handle(ch->g);
@@ -76,7 +76,7 @@ static int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
msg.handle = vgpu_get_handle(g); msg.handle = vgpu_get_handle(g);
p->id = ch->hw_chid; p->id = ch->chid;
p->pid = (u64)current->tgid; p->pid = (u64)current->tgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) { if (err || msg.ret) {
@@ -407,10 +407,10 @@ int vgpu_init_fifo_support(struct gk20a *g)
return err; return err;
} }
static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[hw_chid]; struct channel_gk20a *ch = &f->channel[chid];
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_config_params *p = struct tegra_vgpu_channel_config_params *p =
&msg.params.channel_config; &msg.params.channel_config;
@@ -428,7 +428,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
if (err || msg.ret) { if (err || msg.ret) {
nvgpu_err(g, nvgpu_err(g,
"preempt channel %d failed", hw_chid); "preempt channel %d failed", chid);
err = -ENOMEM; err = -ENOMEM;
} }
@@ -497,7 +497,7 @@ done:
} }
static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, u32 chid, bool add,
bool wait_for_finish) bool wait_for_finish)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
@@ -511,19 +511,19 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* valid channel, add/remove it from active list. /* valid channel, add/remove it from active list.
Otherwise, keep active list untouched for suspend/resume. */ Otherwise, keep active list untouched for suspend/resume. */
if (hw_chid != (u32)~0) { if (chid != (u32)~0) {
if (add) { if (add) {
if (test_and_set_bit(hw_chid, if (test_and_set_bit(chid,
runlist->active_channels) == 1) runlist->active_channels) == 1)
return 0; return 0;
} else { } else {
if (test_and_clear_bit(hw_chid, if (test_and_clear_bit(chid,
runlist->active_channels) == 0) runlist->active_channels) == 0)
return 0; return 0;
} }
} }
if (hw_chid != (u32)~0 || /* add/remove a valid channel */ if (chid != (u32)~0 || /* add/remove a valid channel */
add /* resume to add all channels back */) { add /* resume to add all channels back */) {
u32 chid; u32 chid;
@@ -544,10 +544,10 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
/* add/remove a channel from runlist /* add/remove a channel from runlist
special cases below: runlist->active_channels will NOT be changed. special cases below: runlist->active_channels will NOT be changed.
(hw_chid == ~0 && !add) means remove all active channels from runlist. (chid == ~0 && !add) means remove all active channels from runlist.
(hw_chid == ~0 && add) means restore all active channels on runlist. */ (chid == ~0 && add) means restore all active channels on runlist. */
static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id, static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
u32 hw_chid, bool add, bool wait_for_finish) u32 chid, bool add, bool wait_for_finish)
{ {
struct fifo_runlist_info_gk20a *runlist = NULL; struct fifo_runlist_info_gk20a *runlist = NULL;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
@@ -559,7 +559,7 @@ static int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
nvgpu_mutex_acquire(&runlist->mutex); nvgpu_mutex_acquire(&runlist->mutex);
ret = vgpu_fifo_update_runlist_locked(g, runlist_id, hw_chid, add, ret = vgpu_fifo_update_runlist_locked(g, runlist_id, chid, add,
wait_for_finish); wait_for_finish);
nvgpu_mutex_release(&runlist->mutex); nvgpu_mutex_release(&runlist->mutex);
@@ -580,7 +580,7 @@ static int vgpu_channel_set_priority(struct channel_gk20a *ch, u32 priority)
&msg.params.channel_priority; &msg.params.channel_priority;
int err; int err;
gk20a_dbg_info("channel %d set priority %u", ch->hw_chid, priority); gk20a_dbg_info("channel %d set priority %u", ch->chid, priority);
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_PRIORITY;
msg.handle = vgpu_get_handle(ch->g); msg.handle = vgpu_get_handle(ch->g);
@@ -739,7 +739,7 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
nvgpu_err(g, "fifo intr (%d) on ch %u", nvgpu_err(g, "fifo intr (%d) on ch %u",
info->type, info->chid); info->type, info->chid);
trace_gk20a_channel_reset(ch->hw_chid, ch->tsgid); trace_gk20a_channel_reset(ch->chid, ch->tsgid);
switch (info->type) { switch (info->type) {
case TEGRA_VGPU_FIFO_INTR_PBDMA: case TEGRA_VGPU_FIFO_INTR_PBDMA:

View File

@@ -69,7 +69,7 @@ static int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg,
if (err) { if (err) {
nvgpu_err(tsg->g, nvgpu_err(tsg->g,
"vgpu_tsg_bind_channel failed, ch %d tsgid %d", "vgpu_tsg_bind_channel failed, ch %d tsgid %d",
ch->hw_chid, tsg->tsgid); ch->chid, tsg->tsgid);
gk20a_tsg_unbind_channel(ch); gk20a_tsg_unbind_channel(ch);
} }

View File

@@ -284,14 +284,14 @@ TRACE_EVENT(gk20a_push_cmdbuf,
); );
TRACE_EVENT(gk20a_channel_submit_gpfifo, TRACE_EVENT(gk20a_channel_submit_gpfifo,
TP_PROTO(const char *name, u32 hw_chid, u32 num_entries, TP_PROTO(const char *name, u32 chid, u32 num_entries,
u32 flags, u32 wait_id, u32 wait_value), u32 flags, u32 wait_id, u32 wait_value),
TP_ARGS(name, hw_chid, num_entries, flags, wait_id, wait_value), TP_ARGS(name, chid, num_entries, flags, wait_id, wait_value),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const char *, name) __field(const char *, name)
__field(u32, hw_chid) __field(u32, chid)
__field(u32, num_entries) __field(u32, num_entries)
__field(u32, flags) __field(u32, flags)
__field(u32, wait_id) __field(u32, wait_id)
@@ -300,29 +300,29 @@ TRACE_EVENT(gk20a_channel_submit_gpfifo,
TP_fast_assign( TP_fast_assign(
__entry->name = name; __entry->name = name;
__entry->hw_chid = hw_chid; __entry->chid = chid;
__entry->num_entries = num_entries; __entry->num_entries = num_entries;
__entry->flags = flags; __entry->flags = flags;
__entry->wait_id = wait_id; __entry->wait_id = wait_id;
__entry->wait_value = wait_value; __entry->wait_value = wait_value;
), ),
TP_printk("name=%s, hw_chid=%d, num_entries=%u, flags=%u, wait_id=%d," TP_printk("name=%s, chid=%d, num_entries=%u, flags=%u, wait_id=%d,"
" wait_value=%u", " wait_value=%u",
__entry->name, __entry->hw_chid, __entry->num_entries, __entry->name, __entry->chid, __entry->num_entries,
__entry->flags, __entry->wait_id, __entry->wait_value) __entry->flags, __entry->wait_id, __entry->wait_value)
); );
TRACE_EVENT(gk20a_channel_submitted_gpfifo, TRACE_EVENT(gk20a_channel_submitted_gpfifo,
TP_PROTO(const char *name, u32 hw_chid, u32 num_entries, TP_PROTO(const char *name, u32 chid, u32 num_entries,
u32 flags, u32 incr_id, u32 incr_value), u32 flags, u32 incr_id, u32 incr_value),
TP_ARGS(name, hw_chid, num_entries, flags, TP_ARGS(name, chid, num_entries, flags,
incr_id, incr_value), incr_id, incr_value),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const char *, name) __field(const char *, name)
__field(u32, hw_chid) __field(u32, chid)
__field(u32, num_entries) __field(u32, num_entries)
__field(u32, flags) __field(u32, flags)
__field(u32, incr_id) __field(u32, incr_id)
@@ -331,36 +331,36 @@ TRACE_EVENT(gk20a_channel_submitted_gpfifo,
TP_fast_assign( TP_fast_assign(
__entry->name = name; __entry->name = name;
__entry->hw_chid = hw_chid; __entry->chid = chid;
__entry->num_entries = num_entries; __entry->num_entries = num_entries;
__entry->flags = flags; __entry->flags = flags;
__entry->incr_id = incr_id; __entry->incr_id = incr_id;
__entry->incr_value = incr_value; __entry->incr_value = incr_value;
), ),
TP_printk("name=%s, hw_chid=%d, num_entries=%u, flags=%u," TP_printk("name=%s, chid=%d, num_entries=%u, flags=%u,"
" incr_id=%u, incr_value=%u", " incr_id=%u, incr_value=%u",
__entry->name, __entry->hw_chid, __entry->num_entries, __entry->name, __entry->chid, __entry->num_entries,
__entry->flags, __entry->incr_id, __entry->incr_value) __entry->flags, __entry->incr_id, __entry->incr_value)
); );
TRACE_EVENT(gk20a_channel_reset, TRACE_EVENT(gk20a_channel_reset,
TP_PROTO(u32 hw_chid, u32 tsgid), TP_PROTO(u32 chid, u32 tsgid),
TP_ARGS(hw_chid, tsgid), TP_ARGS(chid, tsgid),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, hw_chid) __field(u32, chid)
__field(u32, tsgid) __field(u32, tsgid)
), ),
TP_fast_assign( TP_fast_assign(
__entry->hw_chid = hw_chid; __entry->chid = chid;
__entry->tsgid = tsgid; __entry->tsgid = tsgid;
), ),
TP_printk("hw_chid=%d, tsgid=%d", TP_printk("chid=%d, tsgid=%d",
__entry->hw_chid, __entry->tsgid) __entry->chid, __entry->tsgid)
); );