gpu: nvgpu: rename tsg_gk20a and channel_gk20a structs

rename struct tsg_gk20a to struct nvgpu_tsg and rename struct
channel_gk20a to struct nvgpu_channel

Jira NVGPU-3248

Change-Id: I2a227347d249f9eea59223d82f09eae23dfc1306
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2112424
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-05-06 10:06:09 +05:30
committed by mobile promotions
parent 400c10164e
commit 17486ec1f6
156 changed files with 1063 additions and 1063 deletions

View File

@@ -44,10 +44,10 @@ struct nvgpu_ce_gpu_ctx {
int gpu_ctx_state; int gpu_ctx_state;
/* tsg related data */ /* tsg related data */
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
/* channel related data */ /* channel related data */
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct vm_gk20a *vm; struct vm_gk20a *vm;
/* cmd buf mem_desc */ /* cmd buf mem_desc */

View File

@@ -50,7 +50,7 @@ static inline bool is_valid_cyclestats_bar0_offset_gk20a(struct gk20a *g,
} }
void nvgpu_cyclestats_exec(struct gk20a *g, void nvgpu_cyclestats_exec(struct gk20a *g,
struct channel_gk20a *ch, u32 offset) struct nvgpu_channel *ch, u32 offset)
{ {
void *virtual_address; void *virtual_address;
u32 buffer_size; u32 buffer_size;

View File

@@ -40,11 +40,11 @@
* API to get first channel from the list of all channels * API to get first channel from the list of all channels
* bound to the debug session * bound to the debug session
*/ */
struct channel_gk20a * struct nvgpu_channel *
nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s) nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
{ {
struct dbg_session_channel_data *ch_data; struct dbg_session_channel_data *ch_data;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
nvgpu_mutex_acquire(&dbg_s->ch_list_lock); nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
@@ -63,7 +63,7 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
return ch; return ch;
} }
void nvgpu_dbg_gpu_post_events(struct channel_gk20a *ch) void nvgpu_dbg_gpu_post_events(struct nvgpu_channel *ch)
{ {
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;
@@ -92,7 +92,7 @@ void nvgpu_dbg_gpu_post_events(struct channel_gk20a *ch)
nvgpu_mutex_release(&ch->dbg_s_lock); nvgpu_mutex_release(&ch->dbg_s_lock);
} }
bool nvgpu_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) bool nvgpu_dbg_gpu_broadcast_stop_trigger(struct nvgpu_channel *ch)
{ {
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;
@@ -120,7 +120,7 @@ bool nvgpu_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
return broadcast; return broadcast;
} }
void nvgpu_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) void nvgpu_dbg_gpu_clear_broadcast_stop_trigger(struct nvgpu_channel *ch)
{ {
struct dbg_session_data *session_data; struct dbg_session_data *session_data;
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;

View File

@@ -115,7 +115,7 @@ bool nvgpu_fence_is_expired(struct nvgpu_fence_type *f)
} }
} }
int nvgpu_fence_pool_alloc(struct channel_gk20a *ch, unsigned int count) int nvgpu_fence_pool_alloc(struct nvgpu_channel *ch, unsigned int count)
{ {
int err; int err;
size_t size; size_t size;
@@ -145,7 +145,7 @@ fail:
return err; return err;
} }
void nvgpu_fence_pool_free(struct channel_gk20a *ch) void nvgpu_fence_pool_free(struct nvgpu_channel *ch)
{ {
if (nvgpu_alloc_initialized(&ch->fence_allocator)) { if (nvgpu_alloc_initialized(&ch->fence_allocator)) {
struct nvgpu_fence_type *fence_pool; struct nvgpu_fence_type *fence_pool;
@@ -156,7 +156,7 @@ void nvgpu_fence_pool_free(struct channel_gk20a *ch)
} }
} }
struct nvgpu_fence_type *nvgpu_fence_alloc(struct channel_gk20a *ch) struct nvgpu_fence_type *nvgpu_fence_alloc(struct nvgpu_channel *ch)
{ {
struct nvgpu_fence_type *fence = NULL; struct nvgpu_fence_type *fence = NULL;

View File

@@ -55,24 +55,24 @@
#include <nvgpu/fence.h> #include <nvgpu/fence.h>
#include <nvgpu/preempt.h> #include <nvgpu/preempt.h>
static void free_channel(struct nvgpu_fifo *f, struct channel_gk20a *ch); static void free_channel(struct nvgpu_fifo *f, struct nvgpu_channel *ch);
static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch); static void gk20a_channel_dump_ref_actions(struct nvgpu_channel *ch);
static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *ch); static void channel_gk20a_free_priv_cmdbuf(struct nvgpu_channel *ch);
static void channel_gk20a_free_prealloc_resources(struct channel_gk20a *c); static void channel_gk20a_free_prealloc_resources(struct nvgpu_channel *c);
static void channel_gk20a_joblist_add(struct channel_gk20a *c, static void channel_gk20a_joblist_add(struct nvgpu_channel *c,
struct channel_gk20a_job *job); struct nvgpu_channel_job *job);
static void channel_gk20a_joblist_delete(struct channel_gk20a *c, static void channel_gk20a_joblist_delete(struct nvgpu_channel *c,
struct channel_gk20a_job *job); struct nvgpu_channel_job *job);
static struct channel_gk20a_job *channel_gk20a_joblist_peek( static struct nvgpu_channel_job *channel_gk20a_joblist_peek(
struct channel_gk20a *c); struct nvgpu_channel *c);
/* allocate GPU channel */ /* allocate GPU channel */
static struct channel_gk20a *allocate_channel(struct nvgpu_fifo *f) static struct nvgpu_channel *allocate_channel(struct nvgpu_fifo *f)
{ {
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
struct gk20a *g = f->g; struct gk20a *g = f->g;
nvgpu_mutex_acquire(&f->free_chs_mutex); nvgpu_mutex_acquire(&f->free_chs_mutex);
@@ -96,7 +96,7 @@ static struct channel_gk20a *allocate_channel(struct nvgpu_fifo *f)
} }
static void free_channel(struct nvgpu_fifo *f, static void free_channel(struct nvgpu_fifo *f,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct gk20a *g = f->g; struct gk20a *g = f->g;
@@ -121,7 +121,7 @@ static void free_channel(struct nvgpu_fifo *f,
} }
} }
int channel_gk20a_commit_va(struct channel_gk20a *c) int channel_gk20a_commit_va(struct nvgpu_channel *c)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -133,15 +133,15 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
return 0; return 0;
} }
int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add) int channel_gk20a_update_runlist(struct nvgpu_channel *c, bool add)
{ {
return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id, return c->g->ops.runlist.update_for_channel(c->g, c->runlist_id,
c, add, true); c, add, true);
} }
int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch) int gk20a_enable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
if (tsg != NULL) { if (tsg != NULL) {
@@ -152,9 +152,9 @@ int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
} }
} }
int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch) int gk20a_disable_channel_tsg(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
if (tsg != NULL) { if (tsg != NULL) {
@@ -165,7 +165,7 @@ int gk20a_disable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)
} }
} }
void nvgpu_channel_abort_clean_up(struct channel_gk20a *ch) void nvgpu_channel_abort_clean_up(struct nvgpu_channel *ch)
{ {
/* synchronize with actual job cleanup */ /* synchronize with actual job cleanup */
nvgpu_mutex_acquire(&ch->joblist.cleanup_lock); nvgpu_mutex_acquire(&ch->joblist.cleanup_lock);
@@ -189,14 +189,14 @@ void nvgpu_channel_abort_clean_up(struct channel_gk20a *ch)
gk20a_channel_update(ch); gk20a_channel_update(ch);
} }
void gk20a_channel_set_unserviceable(struct channel_gk20a *ch) void gk20a_channel_set_unserviceable(struct nvgpu_channel *ch)
{ {
nvgpu_spinlock_acquire(&ch->unserviceable_lock); nvgpu_spinlock_acquire(&ch->unserviceable_lock);
ch->unserviceable = true; ch->unserviceable = true;
nvgpu_spinlock_release(&ch->unserviceable_lock); nvgpu_spinlock_release(&ch->unserviceable_lock);
} }
bool gk20a_channel_check_unserviceable(struct channel_gk20a *ch) bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch)
{ {
bool unserviceable_status; bool unserviceable_status;
@@ -207,9 +207,9 @@ bool gk20a_channel_check_unserviceable(struct channel_gk20a *ch)
return unserviceable_status; return unserviceable_status;
} }
void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt) void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt)
{ {
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
nvgpu_log_fn(ch->g, " "); nvgpu_log_fn(ch->g, " ");
@@ -220,7 +220,7 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
} }
} }
int gk20a_wait_channel_idle(struct channel_gk20a *ch) int gk20a_wait_channel_idle(struct nvgpu_channel *ch)
{ {
bool channel_idle = false; bool channel_idle = false;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
@@ -249,7 +249,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
} }
void gk20a_wait_until_counter_is_N( void gk20a_wait_until_counter_is_N(
struct channel_gk20a *ch, nvgpu_atomic_t *counter, int wait_value, struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
struct nvgpu_cond *c, const char *caller, const char *counter_name) struct nvgpu_cond *c, const char *caller, const char *counter_name)
{ {
while (true) { while (true) {
@@ -270,10 +270,10 @@ void gk20a_wait_until_counter_is_N(
} }
/* call ONLY when no references to the channel exist: after the last put */ /* call ONLY when no references to the channel exist: after the last put */
static void gk20a_free_channel(struct channel_gk20a *ch, bool force) static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct vm_gk20a *ch_vm = ch->vm; struct vm_gk20a *ch_vm = ch->vm;
unsigned long timeout = nvgpu_get_poll_timeout(g); unsigned long timeout = nvgpu_get_poll_timeout(g);
@@ -502,7 +502,7 @@ unbind:
free_channel(f, ch); free_channel(f, ch);
} }
static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch) static void gk20a_channel_dump_ref_actions(struct nvgpu_channel *ch)
{ {
#if GK20A_CHANNEL_REFCOUNT_TRACKING #if GK20A_CHANNEL_REFCOUNT_TRACKING
size_t i, get; size_t i, get;
@@ -523,7 +523,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
* skipping not-yet-initialized entries. There is no ref_actions_get. * skipping not-yet-initialized entries. There is no ref_actions_get.
*/ */
for (i = 0; i < GK20A_CHANNEL_REFCOUNT_TRACKING; i++) { for (i = 0; i < GK20A_CHANNEL_REFCOUNT_TRACKING; i++) {
struct channel_gk20a_ref_action *act = &ch->ref_actions[get]; struct nvgpu_channel_ref_action *act = &ch->ref_actions[get];
if (act->trace.nr_entries) { if (act->trace.nr_entries) {
nvgpu_info(g, nvgpu_info(g,
@@ -545,11 +545,11 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch)
#endif #endif
} }
static void gk20a_channel_save_ref_source(struct channel_gk20a *ch, static void gk20a_channel_save_ref_source(struct nvgpu_channel *ch,
enum channel_gk20a_ref_action_type type) enum channel_gk20a_ref_action_type type)
{ {
#if GK20A_CHANNEL_REFCOUNT_TRACKING #if GK20A_CHANNEL_REFCOUNT_TRACKING
struct channel_gk20a_ref_action *act; struct nvgpu_channel_ref_action *act;
nvgpu_spinlock_acquire(&ch->ref_actions_lock); nvgpu_spinlock_acquire(&ch->ref_actions_lock);
@@ -580,9 +580,9 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch,
* Most global functions in this file require a reference to be held by the * Most global functions in this file require a reference to be held by the
* caller. * caller.
*/ */
struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch, struct nvgpu_channel *_gk20a_channel_get(struct nvgpu_channel *ch,
const char *caller) { const char *caller) {
struct channel_gk20a *ret; struct nvgpu_channel *ret;
nvgpu_spinlock_acquire(&ch->ref_obtain_lock); nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
@@ -603,7 +603,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch,
return ret; return ret;
} }
void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller) void _gk20a_channel_put(struct nvgpu_channel *ch, const char *caller)
{ {
gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put); gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_put);
trace_gk20a_channel_put(ch->chid, caller); trace_gk20a_channel_put(ch->chid, caller);
@@ -620,7 +620,7 @@ void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller)
WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable); WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
} }
struct channel_gk20a *_gk20a_channel_from_id(struct gk20a *g, u32 chid, struct nvgpu_channel *_gk20a_channel_from_id(struct gk20a *g, u32 chid,
const char *caller) const char *caller)
{ {
if (chid == NVGPU_INVALID_CHANNEL_ID) { if (chid == NVGPU_INVALID_CHANNEL_ID) {
@@ -630,7 +630,7 @@ struct channel_gk20a *_gk20a_channel_from_id(struct gk20a *g, u32 chid,
return _gk20a_channel_get(&g->fifo.channel[chid], caller); return _gk20a_channel_get(&g->fifo.channel[chid], caller);
} }
void gk20a_channel_close(struct channel_gk20a *ch) void gk20a_channel_close(struct nvgpu_channel *ch)
{ {
gk20a_free_channel(ch, false); gk20a_free_channel(ch, false);
} }
@@ -640,18 +640,18 @@ void gk20a_channel_close(struct channel_gk20a *ch)
* driver is otherwise dying. Ref counts and the like are ignored by this * driver is otherwise dying. Ref counts and the like are ignored by this
* version of the cleanup. * version of the cleanup.
*/ */
void __gk20a_channel_kill(struct channel_gk20a *ch) void __gk20a_channel_kill(struct nvgpu_channel *ch)
{ {
gk20a_free_channel(ch, true); gk20a_free_channel(ch, true);
} }
struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, struct nvgpu_channel *gk20a_open_new_channel(struct gk20a *g,
u32 runlist_id, u32 runlist_id,
bool is_privileged_channel, bool is_privileged_channel,
pid_t pid, pid_t tid) pid_t pid, pid_t tid)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
/* compatibility with existing code */ /* compatibility with existing code */
if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) { if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
@@ -749,7 +749,7 @@ clean_up:
/* allocate private cmd buffer. /* allocate private cmd buffer.
used for inserting commands before/after user submitted buffers. */ used for inserting commands before/after user submitted buffers. */
static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *ch, static int channel_gk20a_alloc_priv_cmdbuf(struct nvgpu_channel *ch,
u32 num_in_flight) u32 num_in_flight)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -814,7 +814,7 @@ clean_up:
return err; return err;
} }
static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *ch) static void channel_gk20a_free_priv_cmdbuf(struct nvgpu_channel *ch)
{ {
struct vm_gk20a *ch_vm = ch->vm; struct vm_gk20a *ch_vm = ch->vm;
struct priv_cmd_queue *q = &ch->priv_cmd_q; struct priv_cmd_queue *q = &ch->priv_cmd_q;
@@ -829,7 +829,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *ch)
} }
/* allocate a cmd buffer with given size. size is number of u32 entries */ /* allocate a cmd buffer with given size. size is number of u32 entries */
int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size, int gk20a_channel_alloc_priv_cmdbuf(struct nvgpu_channel *c, u32 orig_size,
struct priv_cmd_entry *e) struct priv_cmd_entry *e)
{ {
struct priv_cmd_queue *q = &c->priv_cmd_q; struct priv_cmd_queue *q = &c->priv_cmd_q;
@@ -892,7 +892,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
/* Don't call this to free an explict cmd entry. /* Don't call this to free an explict cmd entry.
* It doesn't update priv_cmd_queue get/put */ * It doesn't update priv_cmd_queue get/put */
void free_priv_cmdbuf(struct channel_gk20a *c, void free_priv_cmdbuf(struct nvgpu_channel *c,
struct priv_cmd_entry *e) struct priv_cmd_entry *e)
{ {
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
@@ -902,8 +902,8 @@ void free_priv_cmdbuf(struct channel_gk20a *c,
} }
} }
int channel_gk20a_alloc_job(struct channel_gk20a *c, int channel_gk20a_alloc_job(struct nvgpu_channel *c,
struct channel_gk20a_job **job_out) struct nvgpu_channel_job **job_out)
{ {
int err = 0; int err = 0;
@@ -927,7 +927,7 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
} }
} else { } else {
*job_out = nvgpu_kzalloc(c->g, *job_out = nvgpu_kzalloc(c->g,
sizeof(struct channel_gk20a_job)); sizeof(struct nvgpu_channel_job));
if (*job_out == NULL) { if (*job_out == NULL) {
err = -ENOMEM; err = -ENOMEM;
} }
@@ -936,8 +936,8 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
return err; return err;
} }
void channel_gk20a_free_job(struct channel_gk20a *c, void channel_gk20a_free_job(struct nvgpu_channel *c,
struct channel_gk20a_job *job) struct nvgpu_channel_job *job)
{ {
/* /*
* In case of pre_allocated jobs, we need to clean out * In case of pre_allocated jobs, we need to clean out
@@ -955,7 +955,7 @@ void channel_gk20a_free_job(struct channel_gk20a *c,
} }
} }
void channel_gk20a_joblist_lock(struct channel_gk20a *c) void channel_gk20a_joblist_lock(struct nvgpu_channel *c)
{ {
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock); nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock);
@@ -964,7 +964,7 @@ void channel_gk20a_joblist_lock(struct channel_gk20a *c)
} }
} }
void channel_gk20a_joblist_unlock(struct channel_gk20a *c) void channel_gk20a_joblist_unlock(struct nvgpu_channel *c)
{ {
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock); nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock);
@@ -973,11 +973,11 @@ void channel_gk20a_joblist_unlock(struct channel_gk20a *c)
} }
} }
static struct channel_gk20a_job *channel_gk20a_joblist_peek( static struct nvgpu_channel_job *channel_gk20a_joblist_peek(
struct channel_gk20a *c) struct nvgpu_channel *c)
{ {
u32 get; u32 get;
struct channel_gk20a_job *job = NULL; struct nvgpu_channel_job *job = NULL;
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
if (!channel_gk20a_joblist_is_empty(c)) { if (!channel_gk20a_joblist_is_empty(c)) {
@@ -994,8 +994,8 @@ static struct channel_gk20a_job *channel_gk20a_joblist_peek(
return job; return job;
} }
static void channel_gk20a_joblist_add(struct channel_gk20a *c, static void channel_gk20a_joblist_add(struct nvgpu_channel *c,
struct channel_gk20a_job *job) struct nvgpu_channel_job *job)
{ {
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) % c->joblist.pre_alloc.put = (c->joblist.pre_alloc.put + 1U) %
@@ -1005,8 +1005,8 @@ static void channel_gk20a_joblist_add(struct channel_gk20a *c,
} }
} }
static void channel_gk20a_joblist_delete(struct channel_gk20a *c, static void channel_gk20a_joblist_delete(struct nvgpu_channel *c,
struct channel_gk20a_job *job) struct nvgpu_channel_job *job)
{ {
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) % c->joblist.pre_alloc.get = (c->joblist.pre_alloc.get + 1U) %
@@ -1016,7 +1016,7 @@ static void channel_gk20a_joblist_delete(struct channel_gk20a *c,
} }
} }
bool channel_gk20a_joblist_is_empty(struct channel_gk20a *c) bool channel_gk20a_joblist_is_empty(struct nvgpu_channel *c)
{ {
if (channel_gk20a_is_prealloc_enabled(c)) { if (channel_gk20a_is_prealloc_enabled(c)) {
u32 get = c->joblist.pre_alloc.get; u32 get = c->joblist.pre_alloc.get;
@@ -1027,7 +1027,7 @@ bool channel_gk20a_joblist_is_empty(struct channel_gk20a *c)
return nvgpu_list_empty(&c->joblist.dynamic.jobs); return nvgpu_list_empty(&c->joblist.dynamic.jobs);
} }
bool channel_gk20a_is_prealloc_enabled(struct channel_gk20a *c) bool channel_gk20a_is_prealloc_enabled(struct nvgpu_channel *c)
{ {
bool pre_alloc_enabled = c->joblist.pre_alloc.enabled; bool pre_alloc_enabled = c->joblist.pre_alloc.enabled;
@@ -1035,7 +1035,7 @@ bool channel_gk20a_is_prealloc_enabled(struct channel_gk20a *c)
return pre_alloc_enabled; return pre_alloc_enabled;
} }
static int channel_gk20a_prealloc_resources(struct channel_gk20a *ch, static int channel_gk20a_prealloc_resources(struct nvgpu_channel *ch,
unsigned int num_jobs) unsigned int num_jobs)
{ {
unsigned int i; unsigned int i;
@@ -1052,7 +1052,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *ch,
* since vmalloc take in an unsigned long, we need * since vmalloc take in an unsigned long, we need
* to make sure we don't hit an overflow condition * to make sure we don't hit an overflow condition
*/ */
size = sizeof(struct channel_gk20a_job); size = sizeof(struct nvgpu_channel_job);
if (num_jobs <= ULONG_MAX / size) { if (num_jobs <= ULONG_MAX / size) {
ch->joblist.pre_alloc.jobs = nvgpu_vzalloc(ch->g, ch->joblist.pre_alloc.jobs = nvgpu_vzalloc(ch->g,
num_jobs * size); num_jobs * size);
@@ -1113,7 +1113,7 @@ clean_up:
return err; return err;
} }
static void channel_gk20a_free_prealloc_resources(struct channel_gk20a *c) static void channel_gk20a_free_prealloc_resources(struct nvgpu_channel *c)
{ {
nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs[0].wait_cmd); nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs[0].wait_cmd);
nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs); nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs);
@@ -1128,7 +1128,7 @@ static void channel_gk20a_free_prealloc_resources(struct channel_gk20a *c)
c->joblist.pre_alloc.enabled = false; c->joblist.pre_alloc.enabled = false;
} }
int nvgpu_channel_set_syncpt(struct channel_gk20a *ch) int nvgpu_channel_set_syncpt(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_channel_sync_syncpt *sync_syncpt; struct nvgpu_channel_sync_syncpt *sync_syncpt;
@@ -1163,7 +1163,7 @@ int nvgpu_channel_set_syncpt(struct channel_gk20a *ch)
return 0; return 0;
} }
int nvgpu_channel_setup_bind(struct channel_gk20a *c, int nvgpu_channel_setup_bind(struct nvgpu_channel *c,
struct nvgpu_setup_bind_args *args) struct nvgpu_setup_bind_args *args)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -1353,7 +1353,7 @@ clean_up_idle:
return err; return err;
} }
void gk20a_channel_free_usermode_buffers(struct channel_gk20a *c) void gk20a_channel_free_usermode_buffers(struct nvgpu_channel *c)
{ {
if (nvgpu_mem_is_valid(&c->usermode_userd)) { if (nvgpu_mem_is_valid(&c->usermode_userd)) {
nvgpu_dma_free(c->g, &c->usermode_userd); nvgpu_dma_free(c->g, &c->usermode_userd);
@@ -1368,7 +1368,7 @@ void gk20a_channel_free_usermode_buffers(struct channel_gk20a *c)
/* Update with this periodically to determine how the gpfifo is draining. */ /* Update with this periodically to determine how the gpfifo is draining. */
static inline u32 update_gp_get(struct gk20a *g, static inline u32 update_gp_get(struct gk20a *g,
struct channel_gk20a *c) struct nvgpu_channel *c)
{ {
u32 new_get = g->ops.userd.gp_get(g, c); u32 new_get = g->ops.userd.gp_get(g, c);
@@ -1379,14 +1379,14 @@ static inline u32 update_gp_get(struct gk20a *g,
return new_get; return new_get;
} }
u32 nvgpu_gp_free_count(struct channel_gk20a *c) u32 nvgpu_gp_free_count(struct nvgpu_channel *c)
{ {
return (c->gpfifo.entry_num - (c->gpfifo.put - c->gpfifo.get) - 1U) % return (c->gpfifo.entry_num - (c->gpfifo.put - c->gpfifo.get) - 1U) %
c->gpfifo.entry_num; c->gpfifo.entry_num;
} }
static bool nvgpu_channel_ctxsw_timeout_debug_dump_state(struct gk20a *g, static bool nvgpu_channel_ctxsw_timeout_debug_dump_state(struct gk20a *g,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
bool verbose = false; bool verbose = false;
if (nvgpu_is_error_notifier_set(ch, if (nvgpu_is_error_notifier_set(ch,
@@ -1398,7 +1398,7 @@ static bool nvgpu_channel_ctxsw_timeout_debug_dump_state(struct gk20a *g,
} }
static void nvgpu_channel_set_has_timedout_and_wakeup_wqs(struct gk20a *g, static void nvgpu_channel_set_has_timedout_and_wakeup_wqs(struct gk20a *g,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
/* mark channel as faulted */ /* mark channel as faulted */
gk20a_channel_set_unserviceable(ch); gk20a_channel_set_unserviceable(ch);
@@ -1408,7 +1408,7 @@ static void nvgpu_channel_set_has_timedout_and_wakeup_wqs(struct gk20a *g,
nvgpu_cond_broadcast_interruptible(&ch->notifier_wq); nvgpu_cond_broadcast_interruptible(&ch->notifier_wq);
} }
bool nvgpu_channel_mark_error(struct gk20a *g, struct channel_gk20a *ch) bool nvgpu_channel_mark_error(struct gk20a *g, struct nvgpu_channel *ch)
{ {
bool verbose; bool verbose;
@@ -1418,14 +1418,14 @@ bool nvgpu_channel_mark_error(struct gk20a *g, struct channel_gk20a *ch)
return verbose; return verbose;
} }
void nvgpu_channel_set_error_notifier(struct gk20a *g, struct channel_gk20a *ch, void nvgpu_channel_set_error_notifier(struct gk20a *g, struct nvgpu_channel *ch,
u32 error_notifier) u32 error_notifier)
{ {
g->ops.channel.set_error_notifier(ch, error_notifier); g->ops.channel.set_error_notifier(ch, error_notifier);
} }
void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g, void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
nvgpu_err(g, nvgpu_err(g,
"channel %d generated a mmu fault", ch->chid); "channel %d generated a mmu fault", ch->chid);
@@ -1433,7 +1433,7 @@ void nvgpu_channel_set_ctx_mmu_error(struct gk20a *g,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT);
} }
bool nvgpu_channel_update_and_check_ctxsw_timeout(struct channel_gk20a *ch, bool nvgpu_channel_update_and_check_ctxsw_timeout(struct nvgpu_channel *ch,
u32 timeout_delta_ms, bool *progress) u32 timeout_delta_ms, bool *progress)
{ {
u32 gpfifo_get = update_gp_get(ch->g, ch); u32 gpfifo_get = update_gp_get(ch->g, ch);
@@ -1454,13 +1454,13 @@ bool nvgpu_channel_update_and_check_ctxsw_timeout(struct channel_gk20a *ch,
ch->ctxsw_timeout_accumulated_ms > ch->ctxsw_timeout_max_ms; ch->ctxsw_timeout_accumulated_ms > ch->ctxsw_timeout_max_ms;
} }
u32 nvgpu_get_gp_free_count(struct channel_gk20a *c) u32 nvgpu_get_gp_free_count(struct nvgpu_channel *c)
{ {
update_gp_get(c->g, c); update_gp_get(c->g, c);
return nvgpu_gp_free_count(c); return nvgpu_gp_free_count(c);
} }
static void nvgpu_channel_wdt_init(struct channel_gk20a *ch) static void nvgpu_channel_wdt_init(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -1492,7 +1492,7 @@ static void nvgpu_channel_wdt_init(struct channel_gk20a *ch)
* actually stuck at that time. After the timeout duration has expired, a * actually stuck at that time. After the timeout duration has expired, a
* worker thread will consider the channel stuck and recover it if stuck. * worker thread will consider the channel stuck and recover it if stuck.
*/ */
static void nvgpu_channel_wdt_start(struct channel_gk20a *ch) static void nvgpu_channel_wdt_start(struct nvgpu_channel *ch)
{ {
if (!nvgpu_is_timeouts_enabled(ch->g)) { if (!nvgpu_is_timeouts_enabled(ch->g)) {
return; return;
@@ -1523,7 +1523,7 @@ static void nvgpu_channel_wdt_start(struct channel_gk20a *ch)
* (This should be called from an update handler running in the same thread * (This should be called from an update handler running in the same thread
* with the watchdog.) * with the watchdog.)
*/ */
static bool nvgpu_channel_wdt_stop(struct channel_gk20a *ch) static bool nvgpu_channel_wdt_stop(struct nvgpu_channel *ch)
{ {
bool was_running; bool was_running;
@@ -1543,7 +1543,7 @@ static bool nvgpu_channel_wdt_stop(struct channel_gk20a *ch)
* (This should be called from an update handler running in the same thread * (This should be called from an update handler running in the same thread
* with the watchdog.) * with the watchdog.)
*/ */
static void nvgpu_channel_wdt_continue(struct channel_gk20a *ch) static void nvgpu_channel_wdt_continue(struct nvgpu_channel *ch)
{ {
nvgpu_spinlock_acquire(&ch->wdt.lock); nvgpu_spinlock_acquire(&ch->wdt.lock);
ch->wdt.running = true; ch->wdt.running = true;
@@ -1560,7 +1560,7 @@ static void nvgpu_channel_wdt_continue(struct channel_gk20a *ch)
* timeouts. Stopped timeouts can only be started (which is technically a * timeouts. Stopped timeouts can only be started (which is technically a
* rewind too) or continued (where the stop is actually pause). * rewind too) or continued (where the stop is actually pause).
*/ */
static void nvgpu_channel_wdt_rewind(struct channel_gk20a *ch) static void nvgpu_channel_wdt_rewind(struct nvgpu_channel *ch)
{ {
nvgpu_spinlock_acquire(&ch->wdt.lock); nvgpu_spinlock_acquire(&ch->wdt.lock);
if (ch->wdt.running) { if (ch->wdt.running) {
@@ -1583,7 +1583,7 @@ void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g)
u32 chid; u32 chid;
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch != NULL) { if (ch != NULL) {
if (!gk20a_channel_check_unserviceable(ch)) { if (!gk20a_channel_check_unserviceable(ch)) {
@@ -1604,7 +1604,7 @@ void nvgpu_channel_wdt_restart_all_channels(struct gk20a *g)
* The gpu is implicitly on at this point, because the watchdog can only run on * The gpu is implicitly on at this point, because the watchdog can only run on
* channels that have submitted jobs pending for cleanup. * channels that have submitted jobs pending for cleanup.
*/ */
static void nvgpu_channel_wdt_handler(struct channel_gk20a *ch) static void nvgpu_channel_wdt_handler(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
u32 gp_get; u32 gp_get;
@@ -1660,7 +1660,7 @@ static void nvgpu_channel_wdt_handler(struct channel_gk20a *ch)
* The timeout is stopped (disabled) after the last job in a row finishes * The timeout is stopped (disabled) after the last job in a row finishes
* and marks the channel idle. * and marks the channel idle.
*/ */
static void nvgpu_channel_wdt_check(struct channel_gk20a *ch) static void nvgpu_channel_wdt_check(struct nvgpu_channel *ch)
{ {
bool running; bool running;
@@ -1682,7 +1682,7 @@ static void nvgpu_channel_poll_wdt(struct gk20a *g)
for (chid = 0; chid < g->fifo.num_channels; chid++) { for (chid = 0; chid < g->fifo.num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch != NULL) { if (ch != NULL) {
if (!gk20a_channel_check_unserviceable(ch)) { if (!gk20a_channel_check_unserviceable(ch)) {
@@ -1730,7 +1730,7 @@ static void nvgpu_channel_worker_poll_wakeup_post_process_item(
static void nvgpu_channel_worker_poll_wakeup_process_item( static void nvgpu_channel_worker_poll_wakeup_process_item(
struct nvgpu_list_node *work_item) struct nvgpu_list_node *work_item)
{ {
struct channel_gk20a *ch = channel_gk20a_from_worker_item(work_item); struct nvgpu_channel *ch = channel_gk20a_from_worker_item(work_item);
nvgpu_assert(ch != NULL); nvgpu_assert(ch != NULL);
@@ -1791,7 +1791,7 @@ void nvgpu_channel_worker_deinit(struct gk20a *g)
* because in that case it has been scheduled already but has not yet been * because in that case it has been scheduled already but has not yet been
* processed. * processed.
*/ */
static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) static void gk20a_channel_worker_enqueue(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
int ret; int ret;
@@ -1818,7 +1818,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
} }
} }
int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) int gk20a_free_priv_cmdbuf(struct nvgpu_channel *c, struct priv_cmd_entry *e)
{ {
struct priv_cmd_queue *q = &c->priv_cmd_q; struct priv_cmd_queue *q = &c->priv_cmd_q;
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -1842,8 +1842,8 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
return 0; return 0;
} }
int gk20a_channel_add_job(struct channel_gk20a *c, int gk20a_channel_add_job(struct nvgpu_channel *c,
struct channel_gk20a_job *job, struct nvgpu_channel_job *job,
bool skip_buffer_refcounting) bool skip_buffer_refcounting)
{ {
struct vm_gk20a *vm = c->vm; struct vm_gk20a *vm = c->vm;
@@ -1909,11 +1909,11 @@ err_put_buffers:
* per-job memory for completed jobs; in case of preallocated resources, this * per-job memory for completed jobs; in case of preallocated resources, this
* opens up slots for new jobs to be submitted. * opens up slots for new jobs to be submitted.
*/ */
void gk20a_channel_clean_up_jobs(struct channel_gk20a *c, void gk20a_channel_clean_up_jobs(struct nvgpu_channel *c,
bool clean_all) bool clean_all)
{ {
struct vm_gk20a *vm; struct vm_gk20a *vm;
struct channel_gk20a_job *job; struct nvgpu_channel_job *job;
struct gk20a *g; struct gk20a *g;
bool job_finished = false; bool job_finished = false;
bool watchdog_on = false; bool watchdog_on = false;
@@ -2067,7 +2067,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
* safe to call even if there is nothing to clean up. Any visible actions on * safe to call even if there is nothing to clean up. Any visible actions on
* jobs just before calling this are guaranteed to be processed. * jobs just before calling this are guaranteed to be processed.
*/ */
void gk20a_channel_update(struct channel_gk20a *c) void gk20a_channel_update(struct nvgpu_channel *c)
{ {
if (!c->g->power_on) { /* shutdown case */ if (!c->g->power_on) { /* shutdown case */
return; return;
@@ -2096,7 +2096,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
nvgpu_rwsem_down_write(&g->deterministic_busy); nvgpu_rwsem_down_write(&g->deterministic_busy);
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
continue; continue;
@@ -2134,7 +2134,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
u32 chid; u32 chid;
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
continue; continue;
@@ -2159,7 +2159,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
nvgpu_rwsem_up_write(&g->deterministic_busy); nvgpu_rwsem_up_write(&g->deterministic_busy);
} }
static void nvgpu_channel_destroy(struct gk20a *g, struct channel_gk20a *c) static void nvgpu_channel_destroy(struct gk20a *g, struct nvgpu_channel *c)
{ {
nvgpu_mutex_destroy(&c->ioctl_lock); nvgpu_mutex_destroy(&c->ioctl_lock);
nvgpu_mutex_destroy(&c->joblist.cleanup_lock); nvgpu_mutex_destroy(&c->joblist.cleanup_lock);
@@ -2181,7 +2181,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g)
* Make sure all channels are closed before deleting them. * Make sure all channels are closed before deleting them.
*/ */
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = &f->channel[chid]; struct nvgpu_channel *ch = &f->channel[chid];
/* /*
* Could race but worst that happens is we get an error message * Could race but worst that happens is we get an error message
@@ -2201,7 +2201,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g)
int gk20a_init_channel_support(struct gk20a *g, u32 chid) int gk20a_init_channel_support(struct gk20a *g, u32 chid)
{ {
struct channel_gk20a *c = g->fifo.channel+chid; struct nvgpu_channel *c = g->fifo.channel+chid;
int err; int err;
c->g = NULL; c->g = NULL;
@@ -2312,7 +2312,7 @@ int nvgpu_channel_setup_sw(struct gk20a *g)
clean_up: clean_up:
for (i = 0; i < chid; i++) { for (i = 0; i < chid; i++) {
struct channel_gk20a *ch = &f->channel[i]; struct nvgpu_channel *ch = &f->channel[i];
nvgpu_channel_destroy(g, ch); nvgpu_channel_destroy(g, ch);
} }
@@ -2337,7 +2337,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
continue; continue;
@@ -2368,7 +2368,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
nvgpu_runlist_reload_ids(g, active_runlist_ids, false); nvgpu_runlist_reload_ids(g, active_runlist_ids, false);
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch != NULL) { if (ch != NULL) {
if (gk20a_channel_check_unserviceable(ch)) { if (gk20a_channel_check_unserviceable(ch)) {
@@ -2397,7 +2397,7 @@ void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
continue; continue;
@@ -2435,13 +2435,13 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
g->ops.mm.cache.fb_flush(g); g->ops.mm.cache.fb_flush(g);
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *c = g->fifo.channel+chid; struct nvgpu_channel *c = g->fifo.channel+chid;
if (gk20a_channel_get(c) != NULL) { if (gk20a_channel_get(c) != NULL) {
if (nvgpu_atomic_read(&c->bound) != 0) { if (nvgpu_atomic_read(&c->bound) != 0) {
nvgpu_cond_broadcast_interruptible( nvgpu_cond_broadcast_interruptible(
&c->semaphore_wq); &c->semaphore_wq);
if (post_events) { if (post_events) {
struct tsg_gk20a *tsg = struct nvgpu_tsg *tsg =
tsg_gk20a_from_ch(c); tsg_gk20a_from_ch(c);
if (tsg != NULL) { if (tsg != NULL) {
g->ops.tsg.post_event_id(tsg, g->ops.tsg.post_event_id(tsg,
@@ -2469,7 +2469,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
} }
/* return with a reference to the channel, caller must put it back */ /* return with a reference to the channel, caller must put it back */
struct channel_gk20a *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g, struct nvgpu_channel *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g,
u64 inst_ptr) u64 inst_ptr)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -2479,7 +2479,7 @@ struct channel_gk20a *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g,
return NULL; return NULL;
} }
for (ci = 0; ci < f->num_channels; ci++) { for (ci = 0; ci < f->num_channels; ci++) {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
u64 ch_inst_ptr; u64 ch_inst_ptr;
ch = gk20a_channel_from_id(g, ci); ch = gk20a_channel_from_id(g, ci);
@@ -2498,7 +2498,7 @@ struct channel_gk20a *nvgpu_channel_refch_from_inst_ptr(struct gk20a *g,
return NULL; return NULL;
} }
int nvgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) int nvgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch)
{ {
int err; int err;
@@ -2516,7 +2516,7 @@ int nvgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
return 0; return 0;
} }
void nvgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) void nvgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch)
{ {
nvgpu_free_inst_block(g, &ch->inst_block); nvgpu_free_inst_block(g, &ch->inst_block);
} }
@@ -2535,7 +2535,7 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
} }
for (chid = 0U; chid < f->num_channels; chid++) { for (chid = 0U; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch != NULL) { if (ch != NULL) {
struct nvgpu_channel_dump_info *info; struct nvgpu_channel_dump_info *info;
@@ -2555,7 +2555,7 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
} }
for (chid = 0U; chid < f->num_channels; chid++) { for (chid = 0U; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = &f->channel[chid]; struct nvgpu_channel *ch = &f->channel[chid];
struct nvgpu_channel_dump_info *info = infos[chid]; struct nvgpu_channel_dump_info *info = infos[chid];
struct nvgpu_hw_semaphore *hw_sema = ch->hw_sema; struct nvgpu_hw_semaphore *hw_sema = ch->hw_sema;
@@ -2599,10 +2599,10 @@ void nvgpu_channel_debug_dump_all(struct gk20a *g,
} }
int nvgpu_channel_deferred_reset_engines(struct gk20a *g, int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
unsigned long engine_id, engines = 0U; unsigned long engine_id, engines = 0U;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
bool deferred_reset_pending; bool deferred_reset_pending;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
int err = 0; int err = 0;

View File

@@ -34,14 +34,14 @@
#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> #include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
void gk20a_channel_enable(struct channel_gk20a *ch) void gk20a_channel_enable(struct nvgpu_channel *ch)
{ {
gk20a_writel(ch->g, ccsr_channel_r(ch->chid), gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) | gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) |
ccsr_channel_enable_set_true_f()); ccsr_channel_enable_set_true_f());
} }
void gk20a_channel_disable(struct channel_gk20a *ch) void gk20a_channel_disable(struct nvgpu_channel *ch)
{ {
gk20a_writel(ch->g, ccsr_channel_r(ch->chid), gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
gk20a_readl(ch->g, gk20a_readl(ch->g,
@@ -49,7 +49,7 @@ void gk20a_channel_disable(struct channel_gk20a *ch)
ccsr_channel_enable_clr_true_f()); ccsr_channel_enable_clr_true_f());
} }
void gk20a_channel_unbind(struct channel_gk20a *ch) void gk20a_channel_unbind(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -82,7 +82,7 @@ static const char * const ccsr_chan_status_str[] = {
"N/A", "N/A",
}; };
void gk20a_channel_read_state(struct gk20a *g, struct channel_gk20a *ch, void gk20a_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *state) struct nvgpu_channel_hw_state *state)
{ {
u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid)); u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid));

View File

@@ -24,15 +24,15 @@
#define FIFO_CHANNEL_GK20A_H #define FIFO_CHANNEL_GK20A_H
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_channel_hw_state; struct nvgpu_channel_hw_state;
struct gk20a_debug_output; struct gk20a_debug_output;
struct nvgpu_channel_dump_info; struct nvgpu_channel_dump_info;
void gk20a_channel_enable(struct channel_gk20a *ch); void gk20a_channel_enable(struct nvgpu_channel *ch);
void gk20a_channel_disable(struct channel_gk20a *ch); void gk20a_channel_disable(struct nvgpu_channel *ch);
void gk20a_channel_unbind(struct channel_gk20a *ch); void gk20a_channel_unbind(struct nvgpu_channel *ch);
void gk20a_channel_read_state(struct gk20a *g, struct channel_gk20a *ch, void gk20a_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void gk20a_channel_debug_dump(struct gk20a *g, void gk20a_channel_debug_dump(struct gk20a *g,

View File

@@ -33,7 +33,7 @@
#include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h> #include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h>
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h> #include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
void gm20b_channel_bind(struct channel_gk20a *c) void gm20b_channel_bind(struct nvgpu_channel *c)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -63,7 +63,7 @@ u32 gm20b_channel_count(struct gk20a *g)
return ccsr_channel__size_1_v(); return ccsr_channel__size_1_v();
} }
void gm20b_channel_force_ctx_reload(struct channel_gk20a *ch) void gm20b_channel_force_ctx_reload(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid)); u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid));

View File

@@ -25,11 +25,11 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
struct gk20a; struct gk20a;
void gm20b_channel_bind(struct channel_gk20a *c); void gm20b_channel_bind(struct nvgpu_channel *c);
u32 gm20b_channel_count(struct gk20a *g); u32 gm20b_channel_count(struct gk20a *g);
void gm20b_channel_force_ctx_reload(struct channel_gk20a *ch); void gm20b_channel_force_ctx_reload(struct nvgpu_channel *ch);
#endif /* FIFO_CHANNEL_GM20B_H */ #endif /* FIFO_CHANNEL_GM20B_H */

View File

@@ -32,7 +32,7 @@
#include <nvgpu/hw/gv11b/hw_ccsr_gv11b.h> #include <nvgpu/hw/gv11b/hw_ccsr_gv11b.h>
void gv11b_channel_unbind(struct channel_gk20a *ch) void gv11b_channel_unbind(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -55,7 +55,7 @@ u32 gv11b_channel_count(struct gk20a *g)
return ccsr_channel__size_1_v(); return ccsr_channel__size_1_v();
} }
void gv11b_channel_read_state(struct gk20a *g, struct channel_gk20a *ch, void gv11b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *state) struct nvgpu_channel_hw_state *state)
{ {
u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid)); u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid));
@@ -66,7 +66,7 @@ void gv11b_channel_read_state(struct gk20a *g, struct channel_gk20a *ch,
ccsr_channel_eng_faulted_true_v(); ccsr_channel_eng_faulted_true_v();
} }
void gv11b_channel_reset_faulted(struct gk20a *g, struct channel_gk20a *ch, void gv11b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma) bool eng, bool pbdma)
{ {
u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid)); u32 reg = gk20a_readl(g, ccsr_channel_r(ch->chid));

View File

@@ -26,18 +26,18 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_channel_hw_state; struct nvgpu_channel_hw_state;
struct gk20a_debug_output; struct gk20a_debug_output;
struct nvgpu_channel_dump_info; struct nvgpu_channel_dump_info;
void gv11b_channel_unbind(struct channel_gk20a *ch); void gv11b_channel_unbind(struct nvgpu_channel *ch);
u32 gv11b_channel_count(struct gk20a *g); u32 gv11b_channel_count(struct gk20a *g);
void gv11b_channel_read_state(struct gk20a *g, struct channel_gk20a *ch, void gv11b_channel_read_state(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *state); struct nvgpu_channel_hw_state *state);
void gv11b_channel_reset_faulted(struct gk20a *g, struct channel_gk20a *ch, void gv11b_channel_reset_faulted(struct gk20a *g, struct nvgpu_channel *ch,
bool eng, bool pbdma); bool eng, bool pbdma);
void gv11b_channel_free_subctx_header(struct channel_gk20a *ch); void gv11b_channel_free_subctx_header(struct nvgpu_channel *ch);
void gv11b_channel_debug_dump(struct gk20a *g, void gv11b_channel_debug_dump(struct gk20a *g,
struct gk20a_debug_output *o, struct gk20a_debug_output *o,

View File

@@ -283,7 +283,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
u32 engine_chid = NVGPU_INVALID_CHANNEL_ID; u32 engine_chid = NVGPU_INVALID_CHANNEL_ID;
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = -EINVAL; int mutex_ret = -EINVAL;
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
int err = 0; int err = 0;
struct nvgpu_engine_status_info engine_status; struct nvgpu_engine_status_info engine_status;
struct nvgpu_pbdma_status_info pbdma_status; struct nvgpu_pbdma_status_info pbdma_status;

View File

@@ -33,10 +33,10 @@ u32 nvgpu_preempt_get_timeout(struct gk20a *g)
return g->ctxsw_timeout_period_ms; return g->ctxsw_timeout_period_ms;
} }
int nvgpu_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) int nvgpu_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{ {
int err; int err;
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
if (tsg != NULL) { if (tsg != NULL) {
err = g->ops.fifo.preempt_tsg(ch->g, tsg); err = g->ops.fifo.preempt_tsg(ch->g, tsg);
@@ -49,7 +49,7 @@ int nvgpu_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
/* called from rc */ /* called from rc */
void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g, void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g,
struct tsg_gk20a *tsg) struct nvgpu_tsg *tsg)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
u32 runlist_id; u32 runlist_id;

View File

@@ -60,11 +60,11 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
struct nvgpu_runlist_info *runlist, struct nvgpu_runlist_info *runlist,
u32 **runlist_entry, u32 **runlist_entry,
u32 *entries_left, u32 *entries_left,
struct tsg_gk20a *tsg) struct nvgpu_tsg *tsg)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
u32 runlist_entry_words = f->runlist_entry_size / (u32)sizeof(u32); u32 runlist_entry_words = f->runlist_entry_size / (u32)sizeof(u32);
struct channel_gk20a *ch; struct nvgpu_channel *ch;
u32 count = 0; u32 count = 0;
u32 timeslice; u32 timeslice;
@@ -135,7 +135,7 @@ static u32 nvgpu_runlist_append_prio(struct nvgpu_fifo *f,
nvgpu_log_fn(f->g, " "); nvgpu_log_fn(f->g, " ");
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(f->g, (u32)tsgid); struct nvgpu_tsg *tsg = nvgpu_tsg_get_from_id(f->g, (u32)tsgid);
u32 entries; u32 entries;
if (tsg->interleave_level == interleave_level) { if (tsg->interleave_level == interleave_level) {
@@ -178,7 +178,7 @@ static u32 nvgpu_runlist_append_med(struct nvgpu_fifo *f,
nvgpu_log_fn(f->g, " "); nvgpu_log_fn(f->g, " ");
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(f->g, (u32)tsgid); struct nvgpu_tsg *tsg = nvgpu_tsg_get_from_id(f->g, (u32)tsgid);
u32 entries; u32 entries;
if (tsg->interleave_level != if (tsg->interleave_level !=
@@ -217,7 +217,7 @@ static u32 nvgpu_runlist_append_low(struct nvgpu_fifo *f,
nvgpu_log_fn(f->g, " "); nvgpu_log_fn(f->g, " ");
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) { for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(f->g, (u32)tsgid); struct nvgpu_tsg *tsg = nvgpu_tsg_get_from_id(f->g, (u32)tsgid);
u32 entries; u32 entries;
if (tsg->interleave_level != if (tsg->interleave_level !=
@@ -315,11 +315,11 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
} }
static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id, static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add) struct nvgpu_channel *ch, bool add)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist = NULL; struct nvgpu_runlist_info *runlist = NULL;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
runlist = f->runlist_info[runlist_id]; runlist = f->runlist_info[runlist_id];
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
@@ -389,7 +389,7 @@ static int gk20a_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
} }
int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add, struct nvgpu_channel *ch, bool add,
bool wait_for_finish) bool wait_for_finish)
{ {
int ret = 0; int ret = 0;
@@ -445,7 +445,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
} }
/* trigger host to expire current timeslice and reschedule runlist from front */ /* trigger host to expire current timeslice and reschedule runlist from front */
int nvgpu_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next, int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
bool wait_preempt) bool wait_preempt)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -494,7 +494,7 @@ int nvgpu_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next,
(ch == NULL && !add) means remove all active channels from runlist. (ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */ (ch == NULL && add) means restore all active channels on runlist. */
static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id, static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
struct nvgpu_runlist_info *runlist = NULL; struct nvgpu_runlist_info *runlist = NULL;
@@ -532,7 +532,7 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
} }
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
nvgpu_assert(ch != NULL); nvgpu_assert(ch != NULL);

View File

@@ -36,9 +36,9 @@
/* /*
* Handle the submit synchronization - pre-fences and post-fences. * Handle the submit synchronization - pre-fences and post-fences.
*/ */
static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c, static int nvgpu_submit_prepare_syncs(struct nvgpu_channel *c,
struct nvgpu_channel_fence *fence, struct nvgpu_channel_fence *fence,
struct channel_gk20a_job *job, struct nvgpu_channel_job *job,
struct priv_cmd_entry **wait_cmd, struct priv_cmd_entry **wait_cmd,
struct priv_cmd_entry **incr_cmd, struct priv_cmd_entry **incr_cmd,
struct nvgpu_fence_type **post_fence, struct nvgpu_fence_type **post_fence,
@@ -183,7 +183,7 @@ fail:
return err; return err;
} }
static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c, static void nvgpu_submit_append_priv_cmdbuf(struct nvgpu_channel *c,
struct priv_cmd_entry *cmd) struct priv_cmd_entry *cmd)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -205,7 +205,7 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c,
c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U); c->gpfifo.put = (c->gpfifo.put + 1U) & (c->gpfifo.entry_num - 1U);
} }
static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c, static int nvgpu_submit_append_gpfifo_user_direct(struct nvgpu_channel *c,
struct nvgpu_gpfifo_userdata userdata, struct nvgpu_gpfifo_userdata userdata,
u32 num_entries) u32 num_entries)
{ {
@@ -248,7 +248,7 @@ static int nvgpu_submit_append_gpfifo_user_direct(struct channel_gk20a *c,
return 0; return 0;
} }
static void nvgpu_submit_append_gpfifo_common(struct channel_gk20a *c, static void nvgpu_submit_append_gpfifo_common(struct nvgpu_channel *c,
struct nvgpu_gpfifo_entry *src, u32 num_entries) struct nvgpu_gpfifo_entry *src, u32 num_entries)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
@@ -277,7 +277,7 @@ static void nvgpu_submit_append_gpfifo_common(struct channel_gk20a *c,
* Copy source gpfifo entries into the gpfifo ring buffer, potentially * Copy source gpfifo entries into the gpfifo ring buffer, potentially
* splitting into two memcpys to handle wrap-around. * splitting into two memcpys to handle wrap-around.
*/ */
static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c, static int nvgpu_submit_append_gpfifo(struct nvgpu_channel *c,
struct nvgpu_gpfifo_entry *kern_gpfifo, struct nvgpu_gpfifo_entry *kern_gpfifo,
struct nvgpu_gpfifo_userdata userdata, struct nvgpu_gpfifo_userdata userdata,
u32 num_entries) u32 num_entries)
@@ -320,7 +320,7 @@ static int nvgpu_submit_append_gpfifo(struct channel_gk20a *c,
return 0; return 0;
} }
static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c, static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
struct nvgpu_gpfifo_entry *gpfifo, struct nvgpu_gpfifo_entry *gpfifo,
struct nvgpu_gpfifo_userdata userdata, struct nvgpu_gpfifo_userdata userdata,
u32 num_entries, u32 num_entries,
@@ -333,7 +333,7 @@ static int nvgpu_submit_channel_gpfifo(struct channel_gk20a *c,
struct priv_cmd_entry *wait_cmd = NULL; struct priv_cmd_entry *wait_cmd = NULL;
struct priv_cmd_entry *incr_cmd = NULL; struct priv_cmd_entry *incr_cmd = NULL;
struct nvgpu_fence_type *post_fence = NULL; struct nvgpu_fence_type *post_fence = NULL;
struct channel_gk20a_job *job = NULL; struct nvgpu_channel_job *job = NULL;
/* we might need two extra gpfifo entries - one for pre fence /* we might need two extra gpfifo entries - one for pre fence
* and one for post fence. */ * and one for post fence. */
const u32 extra_entries = 2U; const u32 extra_entries = 2U;
@@ -601,7 +601,7 @@ clean_up:
return err; return err;
} }
int nvgpu_submit_channel_gpfifo_user(struct channel_gk20a *c, int nvgpu_submit_channel_gpfifo_user(struct nvgpu_channel *c,
struct nvgpu_gpfifo_userdata userdata, struct nvgpu_gpfifo_userdata userdata,
u32 num_entries, u32 num_entries,
u32 flags, u32 flags,
@@ -613,7 +613,7 @@ int nvgpu_submit_channel_gpfifo_user(struct channel_gk20a *c,
flags, fence, fence_out, profile); flags, fence, fence_out, profile);
} }
int nvgpu_submit_channel_gpfifo_kernel(struct channel_gk20a *c, int nvgpu_submit_channel_gpfifo_kernel(struct nvgpu_channel *c,
struct nvgpu_gpfifo_entry *gpfifo, struct nvgpu_gpfifo_entry *gpfifo,
u32 num_entries, u32 num_entries,
u32 flags, u32 flags,

View File

@@ -34,10 +34,10 @@
#include <nvgpu/gr/ctx.h> #include <nvgpu/gr/ctx.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
void nvgpu_tsg_disable(struct tsg_gk20a *tsg) void nvgpu_tsg_disable(struct nvgpu_tsg *tsg)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
@@ -46,7 +46,7 @@ void nvgpu_tsg_disable(struct tsg_gk20a *tsg)
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
struct tsg_gk20a *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid) struct nvgpu_tsg *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid)
{ {
if (tsgid == NVGPU_INVALID_TSG_ID) { if (tsgid == NVGPU_INVALID_TSG_ID) {
return NULL; return NULL;
@@ -56,7 +56,7 @@ struct tsg_gk20a *nvgpu_tsg_check_and_get_from_id(struct gk20a *g, u32 tsgid)
} }
struct tsg_gk20a *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid) struct nvgpu_tsg *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -64,7 +64,7 @@ struct tsg_gk20a *nvgpu_tsg_get_from_id(struct gk20a *g, u32 tsgid)
} }
static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) static bool gk20a_is_channel_active(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist; struct nvgpu_runlist_info *runlist;
@@ -85,7 +85,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
* *
* Note that channel is not runnable when we bind it to TSG * Note that channel is not runnable when we bind it to TSG
*/ */
int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
int err = 0; int err = 0;
@@ -133,7 +133,7 @@ int nvgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch)
} }
/* The caller must ensure that channel belongs to a tsg */ /* The caller must ensure that channel belongs to a tsg */
int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) int nvgpu_tsg_unbind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
int err; int err;
@@ -167,8 +167,8 @@ int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch)
return 0; return 0;
} }
int nvgpu_tsg_unbind_channel_common(struct tsg_gk20a *tsg, int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
int err; int err;
@@ -238,8 +238,8 @@ fail_enable_tsg:
return err; return err;
} }
int nvgpu_tsg_unbind_channel_check_hw_state(struct tsg_gk20a *tsg, int nvgpu_tsg_unbind_channel_check_hw_state(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_channel_hw_state hw_state; struct nvgpu_channel_hw_state hw_state;
@@ -264,12 +264,12 @@ int nvgpu_tsg_unbind_channel_check_hw_state(struct tsg_gk20a *tsg,
return 0; return 0;
} }
void nvgpu_tsg_unbind_channel_check_ctx_reload(struct tsg_gk20a *tsg, void nvgpu_tsg_unbind_channel_check_ctx_reload(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *hw_state) struct nvgpu_channel_hw_state *hw_state)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct channel_gk20a *temp_ch; struct nvgpu_channel *temp_ch;
/* If CTX_RELOAD is set on a channel, move it to some other channel */ /* If CTX_RELOAD is set on a channel, move it to some other channel */
if (hw_state->ctx_reload) { if (hw_state->ctx_reload) {
@@ -285,18 +285,18 @@ void nvgpu_tsg_unbind_channel_check_ctx_reload(struct tsg_gk20a *tsg,
} }
} }
static void nvgpu_tsg_destroy(struct gk20a *g, struct tsg_gk20a *tsg) static void nvgpu_tsg_destroy(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
nvgpu_mutex_destroy(&tsg->event_id_list_lock); nvgpu_mutex_destroy(&tsg->event_id_list_lock);
} }
/* force reset tsg that the channel is bound to */ /* force reset tsg that the channel is bound to */
int nvgpu_tsg_force_reset_ch(struct channel_gk20a *ch, int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
u32 err_code, bool verbose) u32 err_code, bool verbose)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch); struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
if (tsg != NULL) { if (tsg != NULL) {
nvgpu_tsg_set_error_notifier(g, tsg, err_code); nvgpu_tsg_set_error_notifier(g, tsg, err_code);
@@ -315,7 +315,7 @@ void nvgpu_tsg_cleanup_sw(struct gk20a *g)
u32 tsgid; u32 tsgid;
for (tsgid = 0; tsgid < f->num_channels; tsgid++) { for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
struct tsg_gk20a *tsg = &f->tsg[tsgid]; struct nvgpu_tsg *tsg = &f->tsg[tsgid];
nvgpu_tsg_destroy(g, tsg); nvgpu_tsg_destroy(g, tsg);
} }
@@ -327,7 +327,7 @@ void nvgpu_tsg_cleanup_sw(struct gk20a *g)
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
{ {
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
if (tsgid >= g->fifo.num_channels) { if (tsgid >= g->fifo.num_channels) {
return -EINVAL; return -EINVAL;
@@ -378,7 +378,7 @@ int nvgpu_tsg_setup_sw(struct gk20a *g)
clean_up: clean_up:
for (i = 0; i < tsgid; i++) { for (i = 0; i < tsgid; i++) {
struct tsg_gk20a *tsg = &g->fifo.tsg[i]; struct nvgpu_tsg *tsg = &g->fifo.tsg[i];
nvgpu_tsg_destroy(g, tsg); nvgpu_tsg_destroy(g, tsg);
} }
@@ -391,9 +391,9 @@ clean_up_mutex:
} }
bool nvgpu_tsg_mark_error(struct gk20a *g, bool nvgpu_tsg_mark_error(struct gk20a *g,
struct tsg_gk20a *tsg) struct nvgpu_tsg *tsg)
{ {
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
bool verbose = false; bool verbose = false;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
@@ -411,9 +411,9 @@ bool nvgpu_tsg_mark_error(struct gk20a *g,
} }
void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct tsg_gk20a *tsg, u32 ms) void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct nvgpu_tsg *tsg, u32 ms)
{ {
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
@@ -425,9 +425,9 @@ void nvgpu_tsg_set_ctxsw_timeout_accumulated_ms(struct tsg_gk20a *tsg, u32 ms)
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct tsg_gk20a *tsg) bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct nvgpu_tsg *tsg)
{ {
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
bool verbose = false; bool verbose = false;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
@@ -444,10 +444,10 @@ bool nvgpu_tsg_ctxsw_timeout_debug_dump_state(struct tsg_gk20a *tsg)
return verbose; return verbose;
} }
void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct tsg_gk20a *tsg, void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct nvgpu_tsg *tsg,
u32 error_notifier) u32 error_notifier)
{ {
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
@@ -459,7 +459,7 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct tsg_gk20a *tsg,
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g, struct tsg_gk20a *tsg) void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
nvgpu_err(g, "TSG %d generated a mmu fault", tsg->tsgid); nvgpu_err(g, "TSG %d generated a mmu fault", tsg->tsgid);
@@ -467,10 +467,10 @@ void nvgpu_tsg_set_ctx_mmu_error(struct gk20a *g, struct tsg_gk20a *tsg)
NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_MMU_ERR_FLT);
} }
bool nvgpu_tsg_check_ctxsw_timeout(struct tsg_gk20a *tsg, bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
bool *debug_dump, u32 *ms) bool *debug_dump, u32 *ms)
{ {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
bool recover = false; bool recover = false;
bool progress = false; bool progress = false;
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -530,7 +530,7 @@ bool nvgpu_tsg_check_ctxsw_timeout(struct tsg_gk20a *tsg,
return recover; return recover;
} }
int nvgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 level) int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
int ret; int ret;
@@ -560,7 +560,7 @@ int nvgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 level)
return g->ops.runlist.reload(g, tsg->runlist_id, true, true); return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
} }
int nvgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice_us) int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -577,7 +577,7 @@ int nvgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice_us)
return g->ops.runlist.reload(g, tsg->runlist_id, true, true); return g->ops.runlist.reload(g, tsg->runlist_id, true, true);
} }
u32 nvgpu_tsg_get_timeslice(struct tsg_gk20a *tsg) u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg)
{ {
return tsg->timeslice_us; return tsg->timeslice_us;
} }
@@ -587,29 +587,29 @@ u32 nvgpu_tsg_default_timeslice_us(struct gk20a *g)
return NVGPU_TSG_TIMESLICE_DEFAULT_US; return NVGPU_TSG_TIMESLICE_DEFAULT_US;
} }
void nvgpu_tsg_enable_sched(struct gk20a *g, struct tsg_gk20a *tsg) void nvgpu_tsg_enable_sched(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
nvgpu_fifo_runlist_set_state(g, BIT32(tsg->runlist_id), nvgpu_fifo_runlist_set_state(g, BIT32(tsg->runlist_id),
RUNLIST_ENABLED); RUNLIST_ENABLED);
} }
void nvgpu_tsg_disable_sched(struct gk20a *g, struct tsg_gk20a *tsg) void nvgpu_tsg_disable_sched(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
nvgpu_fifo_runlist_set_state(g, BIT32(tsg->runlist_id), nvgpu_fifo_runlist_set_state(g, BIT32(tsg->runlist_id),
RUNLIST_DISABLED); RUNLIST_DISABLED);
} }
static void release_used_tsg(struct nvgpu_fifo *f, struct tsg_gk20a *tsg) static void release_used_tsg(struct nvgpu_fifo *f, struct nvgpu_tsg *tsg)
{ {
nvgpu_mutex_acquire(&f->tsg_inuse_mutex); nvgpu_mutex_acquire(&f->tsg_inuse_mutex);
f->tsg[tsg->tsgid].in_use = false; f->tsg[tsg->tsgid].in_use = false;
nvgpu_mutex_release(&f->tsg_inuse_mutex); nvgpu_mutex_release(&f->tsg_inuse_mutex);
} }
static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct nvgpu_fifo *f) static struct nvgpu_tsg *gk20a_tsg_acquire_unused_tsg(struct nvgpu_fifo *f)
{ {
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
unsigned int tsgid; unsigned int tsgid;
nvgpu_mutex_acquire(&f->tsg_inuse_mutex); nvgpu_mutex_acquire(&f->tsg_inuse_mutex);
@@ -625,7 +625,7 @@ static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct nvgpu_fifo *f)
return tsg; return tsg;
} }
int nvgpu_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid) int nvgpu_tsg_open_common(struct gk20a *g, struct nvgpu_tsg *tsg, pid_t pid)
{ {
u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g); u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g);
int err; int err;
@@ -681,9 +681,9 @@ clean_up:
return err; return err;
} }
struct tsg_gk20a *nvgpu_tsg_open(struct gk20a *g, pid_t pid) struct nvgpu_tsg *nvgpu_tsg_open(struct gk20a *g, pid_t pid)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
int err; int err;
tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo);
@@ -703,7 +703,7 @@ struct tsg_gk20a *nvgpu_tsg_open(struct gk20a *g, pid_t pid)
return tsg; return tsg;
} }
void nvgpu_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg) void nvgpu_tsg_release_common(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
if (g->ops.tsg.release != NULL) { if (g->ops.tsg.release != NULL) {
g->ops.tsg.release(tsg); g->ops.tsg.release(tsg);
@@ -728,15 +728,15 @@ void nvgpu_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg)
} }
} }
static struct tsg_gk20a *tsg_gk20a_from_ref(struct nvgpu_ref *ref) static struct nvgpu_tsg *tsg_gk20a_from_ref(struct nvgpu_ref *ref)
{ {
return (struct tsg_gk20a *) return (struct nvgpu_tsg *)
((uintptr_t)ref - offsetof(struct tsg_gk20a, refcount)); ((uintptr_t)ref - offsetof(struct nvgpu_tsg, refcount));
} }
void nvgpu_tsg_release(struct nvgpu_ref *ref) void nvgpu_tsg_release(struct nvgpu_ref *ref)
{ {
struct tsg_gk20a *tsg = tsg_gk20a_from_ref(ref); struct nvgpu_tsg *tsg = tsg_gk20a_from_ref(ref);
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
if (tsg->gr_ctx != NULL && nvgpu_mem_is_valid( if (tsg->gr_ctx != NULL && nvgpu_mem_is_valid(
@@ -758,9 +758,9 @@ void nvgpu_tsg_release(struct nvgpu_ref *ref)
nvgpu_log(g, gpu_dbg_fn, "tsg released %d", tsg->tsgid); nvgpu_log(g, gpu_dbg_fn, "tsg released %d", tsg->tsgid);
} }
struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) struct nvgpu_tsg *tsg_gk20a_from_ch(struct nvgpu_channel *ch)
{ {
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
u32 tsgid = ch->tsgid; u32 tsgid = ch->tsgid;
if (tsgid != NVGPU_INVALID_TSG_ID) { if (tsgid != NVGPU_INVALID_TSG_ID) {
@@ -776,7 +776,7 @@ struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch)
} }
int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,
struct tsg_gk20a *tsg, struct nvgpu_tsg *tsg,
u32 num_sm) u32 num_sm)
{ {
int err = 0; int err = 0;
@@ -802,7 +802,7 @@ int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,
return err; return err;
} }
void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, void gk20a_tsg_update_sm_error_state_locked(struct nvgpu_tsg *tsg,
u32 sm_id, u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state) struct nvgpu_tsg_sm_error_state *sm_error_state)
{ {
@@ -822,10 +822,10 @@ void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg,
sm_error_state->hww_warp_esr_report_mask; sm_error_state->hww_warp_esr_report_mask;
} }
int gk20a_tsg_set_sm_exception_type_mask(struct channel_gk20a *ch, int gk20a_tsg_set_sm_exception_type_mask(struct nvgpu_channel *ch,
u32 exception_mask) u32 exception_mask)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
if (tsg == NULL) { if (tsg == NULL) {
@@ -839,9 +839,9 @@ int gk20a_tsg_set_sm_exception_type_mask(struct channel_gk20a *ch,
return 0; return 0;
} }
void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt) void nvgpu_tsg_abort(struct gk20a *g, struct nvgpu_tsg *tsg, bool preempt)
{ {
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -871,10 +871,10 @@ void nvgpu_tsg_abort(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct tsg_gk20a *tsg, void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct nvgpu_tsg *tsg,
bool eng, bool pbdma) bool eng, bool pbdma)
{ {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
if (g->ops.channel.reset_faulted == NULL) { if (g->ops.channel.reset_faulted == NULL) {
return; return;

View File

@@ -78,7 +78,7 @@ void nvgpu_userd_free_slabs(struct gk20a *g)
} }
#endif #endif
int nvgpu_userd_init_channel(struct gk20a *g, struct channel_gk20a *c) int nvgpu_userd_init_channel(struct gk20a *g, struct nvgpu_channel *c)
{ {
#ifdef NVGPU_USERD #ifdef NVGPU_USERD
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;

View File

@@ -63,7 +63,7 @@ static void gr_intr_report_ctxsw_error(struct gk20a *g, u32 err_type, u32 chid,
} }
static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
int tmp_ret, ret = 0; int tmp_ret, ret = 0;
@@ -137,7 +137,7 @@ static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
return ret; return ret;
} }
static void gr_intr_post_bpt_events(struct gk20a *g, struct tsg_gk20a *tsg, static void gr_intr_post_bpt_events(struct gk20a *g, struct nvgpu_tsg *tsg,
u32 global_esr) u32 global_esr)
{ {
if (g->ops.gr.esr_bpt_pending_events(global_esr, if (g->ops.gr.esr_bpt_pending_events(global_esr,
@@ -188,7 +188,7 @@ static void gr_intr_report_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
{ {
int ret; int ret;
struct gr_sm_mcerr_info err_info; struct gr_sm_mcerr_info err_info;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct gr_err_info info; struct gr_err_info info;
u32 tsgid, chid, curr_ctx, inst = 0; u32 tsgid, chid, curr_ctx, inst = 0;
@@ -232,7 +232,7 @@ static void gr_intr_report_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
* A small tlb is used here to cache translation. * A small tlb is used here to cache translation.
* *
* Returned channel must be freed with gk20a_channel_put() */ * Returned channel must be freed with gk20a_channel_put() */
struct channel_gk20a *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g, struct nvgpu_channel *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
u32 curr_ctx, u32 *curr_tsgid) u32 curr_ctx, u32 *curr_tsgid)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -240,7 +240,7 @@ struct channel_gk20a *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
u32 chid; u32 chid;
u32 tsgid = NVGPU_INVALID_TSG_ID; u32 tsgid = NVGPU_INVALID_TSG_ID;
u32 i; u32 i;
struct channel_gk20a *ret_ch = NULL; struct nvgpu_channel *ret_ch = NULL;
/* when contexts are unloaded from GR, the valid bit is reset /* when contexts are unloaded from GR, the valid bit is reset
* but the instance pointer information remains intact. * but the instance pointer information remains intact.
@@ -262,7 +262,7 @@ struct channel_gk20a *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
/* slow path */ /* slow path */
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
continue; continue;
@@ -313,7 +313,7 @@ void nvgpu_gr_intr_report_exception(struct gk20a *g, u32 inst,
u32 err_type, u32 status) u32 err_type, u32 status)
{ {
int ret = 0; int ret = 0;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct gr_exception_info err_info; struct gr_exception_info err_info;
struct gr_err_info info; struct gr_err_info info;
u32 tsgid, chid, curr_ctx; u32 tsgid, chid, curr_ctx;
@@ -350,8 +350,8 @@ void nvgpu_gr_intr_report_exception(struct gk20a *g, u32 inst,
void nvgpu_gr_intr_set_error_notifier(struct gk20a *g, void nvgpu_gr_intr_set_error_notifier(struct gk20a *g,
struct nvgpu_gr_isr_data *isr_data, u32 error_notifier) struct nvgpu_gr_isr_data *isr_data, u32 error_notifier)
{ {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
ch = isr_data->ch; ch = isr_data->ch;
@@ -368,7 +368,7 @@ void nvgpu_gr_intr_set_error_notifier(struct gk20a *g,
} }
int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
int ret = 0; int ret = 0;
@@ -473,7 +473,7 @@ int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
return ret; return ret;
} }
int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_gr_isr_data *isr_data) struct nvgpu_gr_isr_data *isr_data)
{ {
u32 gr_fecs_intr, mailbox_value; u32 gr_fecs_intr, mailbox_value;
@@ -565,7 +565,7 @@ int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
} }
int nvgpu_gr_intr_handle_gpc_exception(struct gk20a *g, bool *post_event, int nvgpu_gr_intr_handle_gpc_exception(struct gk20a *g, bool *post_event,
struct nvgpu_gr_config *gr_config, struct channel_gk20a *fault_ch, struct nvgpu_gr_config *gr_config, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
int tmp_ret, ret = 0; int tmp_ret, ret = 0;
@@ -634,7 +634,7 @@ int nvgpu_gr_intr_handle_gpc_exception(struct gk20a *g, bool *post_event,
void nvgpu_gr_intr_handle_notify_pending(struct gk20a *g, void nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
struct nvgpu_gr_isr_data *isr_data) struct nvgpu_gr_isr_data *isr_data)
{ {
struct channel_gk20a *ch = isr_data->ch; struct nvgpu_channel *ch = isr_data->ch;
int err; int err;
if (ch == NULL) { if (ch == NULL) {
@@ -660,8 +660,8 @@ void nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
void nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g, void nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
struct nvgpu_gr_isr_data *isr_data) struct nvgpu_gr_isr_data *isr_data)
{ {
struct channel_gk20a *ch = isr_data->ch; struct nvgpu_channel *ch = isr_data->ch;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
if (ch == NULL) { if (ch == NULL) {
return; return;
@@ -688,10 +688,10 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
struct nvgpu_gr_isr_data isr_data; struct nvgpu_gr_isr_data isr_data;
struct nvgpu_gr_intr_info intr_info; struct nvgpu_gr_intr_info intr_info;
bool need_reset = false; bool need_reset = false;
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
struct channel_gk20a *fault_ch = NULL; struct nvgpu_channel *fault_ch = NULL;
u32 tsgid = NVGPU_INVALID_TSG_ID; u32 tsgid = NVGPU_INVALID_TSG_ID;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
u32 global_esr = 0; u32 global_esr = 0;
u32 chid; u32 chid;
struct nvgpu_gr_config *gr_config = g->gr->config; struct nvgpu_gr_config *gr_config = g->gr->config;

View File

@@ -26,7 +26,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/lock.h> #include <nvgpu/lock.h>
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_gr_intr_info { struct nvgpu_gr_intr_info {
u32 notify; u32 notify;
@@ -51,7 +51,7 @@ struct nvgpu_gr_isr_data {
u32 data_lo; u32 data_lo;
u32 data_hi; u32 data_hi;
u32 curr_ctx; u32 curr_ctx;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
u32 offset; u32 offset;
u32 sub_chan; u32 sub_chan;
u32 class_num; u32 class_num;

View File

@@ -32,7 +32,7 @@
#include "gr_priv.h" #include "gr_priv.h"
static int nvgpu_gr_setup_zcull(struct gk20a *g, struct channel_gk20a *c, static int nvgpu_gr_setup_zcull(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_gr_ctx *gr_ctx) struct nvgpu_gr_ctx *gr_ctx)
{ {
int ret = 0; int ret = 0;
@@ -67,10 +67,10 @@ static int nvgpu_gr_setup_zcull(struct gk20a *g, struct channel_gk20a *c,
return ret; return ret;
} }
int nvgpu_gr_setup_bind_ctxsw_zcull(struct gk20a *g, struct channel_gk20a *c, int nvgpu_gr_setup_bind_ctxsw_zcull(struct gk20a *g, struct nvgpu_channel *c,
u64 zcull_va, u32 mode) u64 zcull_va, u32 mode)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
@@ -84,12 +84,12 @@ int nvgpu_gr_setup_bind_ctxsw_zcull(struct gk20a *g, struct channel_gk20a *c,
return nvgpu_gr_setup_zcull(g, c, gr_ctx); return nvgpu_gr_setup_zcull(g, c, gr_ctx);
} }
int nvgpu_gr_setup_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
u32 flags) u32 flags)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
int err = 0; int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -194,7 +194,7 @@ void nvgpu_gr_setup_free_gr_ctx(struct gk20a *g,
} }
} }
void nvgpu_gr_setup_free_subctx(struct channel_gk20a *c) void nvgpu_gr_setup_free_subctx(struct nvgpu_channel *c)
{ {
nvgpu_log_fn(c->g, " "); nvgpu_log_fn(c->g, " ");
@@ -207,13 +207,13 @@ void nvgpu_gr_setup_free_subctx(struct channel_gk20a *c)
} }
} }
int nvgpu_gr_setup_set_preemption_mode(struct channel_gk20a *ch, int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
u32 graphics_preempt_mode, u32 graphics_preempt_mode,
u32 compute_preempt_mode) u32 compute_preempt_mode)
{ {
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct vm_gk20a *vm; struct vm_gk20a *vm;
u32 class; u32 class;
int err = 0; int err = 0;

View File

@@ -62,7 +62,7 @@ int vm_aspace_id(struct vm_gk20a *vm)
return (vm->as_share != NULL) ? vm->as_share->id : -1; return (vm->as_share != NULL) ? vm->as_share->id : -1;
} }
int nvgpu_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) int nvgpu_vm_bind_channel(struct vm_gk20a *vm, struct nvgpu_channel *ch)
{ {
int err = 0; int err = 0;

View File

@@ -103,7 +103,7 @@ static int css_gr_create_shared_data(struct gk20a *g)
return 0; return 0;
} }
int nvgpu_css_enable_snapshot(struct channel_gk20a *ch, int nvgpu_css_enable_snapshot(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -205,7 +205,7 @@ nvgpu_css_gr_search_client(struct nvgpu_list_node *clients, u32 perfmon)
return NULL; return NULL;
} }
static int css_gr_flush_snapshots(struct channel_gk20a *ch) static int css_gr_flush_snapshots(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct gk20a_cs_snapshot *css = g->cs_data; struct gk20a_cs_snapshot *css = g->cs_data;
@@ -458,7 +458,7 @@ static int css_gr_create_client_data(struct gk20a *g,
} }
int nvgpu_css_attach(struct channel_gk20a *ch, int nvgpu_css_attach(struct nvgpu_channel *ch,
u32 perfmon_count, u32 perfmon_count,
u32 *perfmon_start, u32 *perfmon_start,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
@@ -525,7 +525,7 @@ failed:
return ret; return ret;
} }
int nvgpu_css_detach(struct channel_gk20a *ch, int nvgpu_css_detach(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
{ {
int ret = 0; int ret = 0;
@@ -555,7 +555,7 @@ int nvgpu_css_detach(struct channel_gk20a *ch,
return ret; return ret;
} }
int nvgpu_css_flush(struct channel_gk20a *ch, int nvgpu_css_flush(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
{ {
int ret = 0; int ret = 0;
@@ -581,7 +581,7 @@ void nvgpu_free_cyclestats_snapshot_data(struct gk20a *g)
nvgpu_mutex_destroy(&g->cs_lock); nvgpu_mutex_destroy(&g->cs_lock);
} }
int nvgpu_css_check_data_available(struct channel_gk20a *ch, u32 *pending, int nvgpu_css_check_data_available(struct nvgpu_channel *ch, u32 *pending,
bool *hw_overflow) bool *hw_overflow)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;

View File

@@ -58,7 +58,7 @@ void nvgpu_rc_fifo_recover(struct gk20a *g, u32 eng_bitmask,
} }
void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask, void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
struct tsg_gk20a *tsg, bool debug_dump) struct nvgpu_tsg *tsg, bool debug_dump)
{ {
nvgpu_tsg_set_error_notifier(g, tsg, nvgpu_tsg_set_error_notifier(g, tsg,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
@@ -85,14 +85,14 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f,
/* Remove channel from runlist */ /* Remove channel from runlist */
id = pbdma_status.id; id = pbdma_status.id;
if (pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_TSGID) { if (pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_TSGID) {
struct tsg_gk20a *tsg = nvgpu_tsg_get_from_id(g, id); struct nvgpu_tsg *tsg = nvgpu_tsg_get_from_id(g, id);
nvgpu_tsg_set_error_notifier(g, tsg, error_notifier); nvgpu_tsg_set_error_notifier(g, tsg, error_notifier);
nvgpu_rc_tsg_and_related_engines(g, tsg, true, nvgpu_rc_tsg_and_related_engines(g, tsg, true,
RC_TYPE_PBDMA_FAULT); RC_TYPE_PBDMA_FAULT);
} else if(pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_CHID) { } else if(pbdma_status.id_type == PBDMA_STATUS_ID_TYPE_CHID) {
struct channel_gk20a *ch = gk20a_channel_from_id(g, id); struct nvgpu_channel *ch = gk20a_channel_from_id(g, id);
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
if (ch == NULL) { if (ch == NULL) {
nvgpu_err(g, "channel is not referenceable"); nvgpu_err(g, "channel is not referenceable");
return; return;
@@ -123,7 +123,7 @@ void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id)
} }
} }
void nvgpu_rc_preempt_timeout(struct gk20a *g, struct tsg_gk20a *tsg) void nvgpu_rc_preempt_timeout(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
nvgpu_tsg_set_error_notifier(g, tsg, nvgpu_tsg_set_error_notifier(g, tsg,
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT); NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
@@ -131,8 +131,8 @@ void nvgpu_rc_preempt_timeout(struct gk20a *g, struct tsg_gk20a *tsg)
nvgpu_rc_tsg_and_related_engines(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT); nvgpu_rc_tsg_and_related_engines(g, tsg, true, RC_TYPE_PREEMPT_TIMEOUT);
} }
void nvgpu_rc_gr_fault(struct gk20a *g, struct tsg_gk20a *tsg, void nvgpu_rc_gr_fault(struct gk20a *g, struct nvgpu_tsg *tsg,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
u32 gr_engine_id; u32 gr_engine_id;
u32 gr_eng_bitmask = 0U; u32 gr_eng_bitmask = 0U;
@@ -164,7 +164,7 @@ void nvgpu_rc_sched_error_bad_tsg(struct gk20a *g)
RC_TYPE_SCHED_ERR); RC_TYPE_SCHED_ERR);
} }
void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct tsg_gk20a *tsg, void nvgpu_rc_tsg_and_related_engines(struct gk20a *g, struct nvgpu_tsg *tsg,
bool debug_dump, u32 rc_type) bool debug_dump, u32 rc_type)
{ {
u32 eng_bitmask = 0U; u32 eng_bitmask = 0U;

View File

@@ -81,7 +81,7 @@ static bool validate_reg_ops(struct gk20a *g,
bool is_profiler); bool is_profiler);
int exec_regops_gk20a(struct gk20a *g, int exec_regops_gk20a(struct gk20a *g,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
struct nvgpu_dbg_reg_op *ops, struct nvgpu_dbg_reg_op *ops,
u32 num_ops, u32 num_ops,
bool is_profiler, bool is_profiler,

View File

@@ -40,7 +40,7 @@
* Since semaphores are ref-counted there's no explicit free for external code * Since semaphores are ref-counted there's no explicit free for external code
* to use. When the ref-count hits 0 the internal free will happen. * to use. When the ref-count hits 0 the internal free will happen.
*/ */
struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch) struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct nvgpu_channel *ch)
{ {
struct nvgpu_semaphore *s; struct nvgpu_semaphore *s;
int ret; int ret;

View File

@@ -30,7 +30,7 @@
#include "semaphore_priv.h" #include "semaphore_priv.h"
int nvgpu_hw_semaphore_init(struct channel_gk20a *ch) int nvgpu_hw_semaphore_init(struct nvgpu_channel *ch)
{ {
int hw_sema_idx; int hw_sema_idx;
int ret = 0; int ret = 0;
@@ -78,7 +78,7 @@ fail:
/* /*
* Free the channel used semaphore index * Free the channel used semaphore index
*/ */
void nvgpu_hw_semaphore_free(struct channel_gk20a *ch) void nvgpu_hw_semaphore_free(struct nvgpu_channel *ch)
{ {
struct nvgpu_semaphore_pool *p = ch->vm->sema_pool; struct nvgpu_semaphore_pool *p = ch->vm->sema_pool;
struct nvgpu_hw_semaphore *hw_sema = ch->hw_sema; struct nvgpu_hw_semaphore *hw_sema = ch->hw_sema;

View File

@@ -125,7 +125,7 @@ struct nvgpu_semaphore_loc {
struct nvgpu_hw_semaphore { struct nvgpu_hw_semaphore {
struct nvgpu_semaphore_loc location; struct nvgpu_semaphore_loc location;
nvgpu_atomic_t next_value; /* Next available value. */ nvgpu_atomic_t next_value; /* Next available value. */
struct channel_gk20a *ch; /* Channel that owns this sema. */ struct nvgpu_channel *ch; /* Channel that owns this sema. */
}; };
/* /*

View File

@@ -39,7 +39,7 @@
#include "channel_sync_priv.h" #include "channel_sync_priv.h"
struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c, struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct nvgpu_channel *c,
bool user_managed) bool user_managed)
{ {
if (nvgpu_has_syncpoints(c->g)) { if (nvgpu_has_syncpoints(c->g)) {

View File

@@ -41,7 +41,7 @@
struct nvgpu_channel_sync_semaphore { struct nvgpu_channel_sync_semaphore {
struct nvgpu_channel_sync ops; struct nvgpu_channel_sync ops;
struct channel_gk20a *c; struct nvgpu_channel *c;
/* A semaphore pool owned by this channel. */ /* A semaphore pool owned by this channel. */
struct nvgpu_semaphore_pool *pool; struct nvgpu_semaphore_pool *pool;
@@ -55,7 +55,7 @@ nvgpu_channel_sync_semaphore_from_ops(struct nvgpu_channel_sync *ops)
offsetof(struct nvgpu_channel_sync_semaphore, ops)); offsetof(struct nvgpu_channel_sync_semaphore, ops));
} }
static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c, static void add_sema_cmd(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd, struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
u32 offset, bool acquire, bool wfi) u32 offset, bool acquire, bool wfi)
{ {
@@ -98,7 +98,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
} }
} }
static void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c, static void channel_sync_semaphore_gen_wait_cmd(struct nvgpu_channel *c,
struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size, u32 pos) u32 wait_cmd_size, u32 pos)
{ {
@@ -124,7 +124,7 @@ static int channel_sync_semaphore_wait_fd(
{ {
struct nvgpu_channel_sync_semaphore *sema = struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_ops(s); nvgpu_channel_sync_semaphore_from_ops(s);
struct channel_gk20a *c = sema->c; struct nvgpu_channel *c = sema->c;
struct nvgpu_os_fence os_fence = {0}; struct nvgpu_os_fence os_fence = {0};
struct nvgpu_os_fence_sema os_fence_sema = {0}; struct nvgpu_os_fence_sema os_fence_sema = {0};
@@ -182,7 +182,7 @@ static int channel_sync_semaphore_incr_common(
u32 incr_cmd_size; u32 incr_cmd_size;
struct nvgpu_channel_sync_semaphore *sp = struct nvgpu_channel_sync_semaphore *sp =
nvgpu_channel_sync_semaphore_from_ops(s); nvgpu_channel_sync_semaphore_from_ops(s);
struct channel_gk20a *c = sp->c; struct nvgpu_channel *c = sp->c;
struct nvgpu_semaphore *semaphore; struct nvgpu_semaphore *semaphore;
int err = 0; int err = 0;
struct nvgpu_os_fence os_fence = {0}; struct nvgpu_os_fence os_fence = {0};
@@ -279,7 +279,7 @@ static void channel_sync_semaphore_set_min_eq_max(struct nvgpu_channel_sync *s)
{ {
struct nvgpu_channel_sync_semaphore *sp = struct nvgpu_channel_sync_semaphore *sp =
nvgpu_channel_sync_semaphore_from_ops(s); nvgpu_channel_sync_semaphore_from_ops(s);
struct channel_gk20a *c = sp->c; struct nvgpu_channel *c = sp->c;
bool updated; bool updated;
if (c->hw_sema == NULL) { if (c->hw_sema == NULL) {
@@ -303,7 +303,7 @@ static void channel_sync_semaphore_destroy(struct nvgpu_channel_sync *s)
struct nvgpu_channel_sync_semaphore *sema = struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_ops(s); nvgpu_channel_sync_semaphore_from_ops(s);
struct channel_gk20a *c = sema->c; struct nvgpu_channel *c = sema->c;
struct gk20a *g = c->g; struct gk20a *g = c->g;
if (c->has_os_fence_framework_support && if (c->has_os_fence_framework_support &&
@@ -333,7 +333,7 @@ struct nvgpu_channel_sync_semaphore *
struct nvgpu_channel_sync * struct nvgpu_channel_sync *
nvgpu_channel_sync_semaphore_create( nvgpu_channel_sync_semaphore_create(
struct channel_gk20a *c, bool user_managed) struct nvgpu_channel *c, bool user_managed)
{ {
struct nvgpu_channel_sync_semaphore *sema; struct nvgpu_channel_sync_semaphore *sema;
struct gk20a *g = c->g; struct gk20a *g = c->g;

View File

@@ -40,7 +40,7 @@
struct nvgpu_channel_sync_syncpt { struct nvgpu_channel_sync_syncpt {
struct nvgpu_channel_sync ops; struct nvgpu_channel_sync ops;
struct channel_gk20a *c; struct nvgpu_channel *c;
struct nvgpu_nvhost_dev *nvhost_dev; struct nvgpu_nvhost_dev *nvhost_dev;
u32 id; u32 id;
struct nvgpu_mem syncpt_buf; struct nvgpu_mem syncpt_buf;
@@ -54,7 +54,7 @@ nvgpu_channel_sync_syncpt_from_ops(struct nvgpu_channel_sync *ops)
offsetof(struct nvgpu_channel_sync_syncpt, ops)); offsetof(struct nvgpu_channel_sync_syncpt, ops));
} }
static int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c, static int channel_sync_syncpt_gen_wait_cmd(struct nvgpu_channel *c,
u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size, u32 pos, bool preallocated) u32 wait_cmd_size, u32 pos, bool preallocated)
{ {
@@ -91,7 +91,7 @@ static int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
static int channel_sync_syncpt_wait_raw(struct nvgpu_channel_sync_syncpt *s, static int channel_sync_syncpt_wait_raw(struct nvgpu_channel_sync_syncpt *s,
u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd) u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd)
{ {
struct channel_gk20a *c = s->c; struct nvgpu_channel *c = s->c;
int err = 0; int err = 0;
u32 wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size(); u32 wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size();
@@ -112,7 +112,7 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
struct nvgpu_os_fence_syncpt os_fence_syncpt = {0}; struct nvgpu_os_fence_syncpt os_fence_syncpt = {0};
struct nvgpu_channel_sync_syncpt *sp = struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_ops(s); nvgpu_channel_sync_syncpt_from_ops(s);
struct channel_gk20a *c = sp->c; struct nvgpu_channel *c = sp->c;
int err = 0; int err = 0;
u32 i, num_fences, wait_cmd_size; u32 i, num_fences, wait_cmd_size;
u32 syncpt_id = 0U; u32 syncpt_id = 0U;
@@ -172,7 +172,7 @@ cleanup:
static void channel_sync_syncpt_update(void *priv, int nr_completed) static void channel_sync_syncpt_update(void *priv, int nr_completed)
{ {
struct channel_gk20a *ch = priv; struct nvgpu_channel *ch = priv;
gk20a_channel_update(ch); gk20a_channel_update(ch);
@@ -191,7 +191,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
int err; int err;
struct nvgpu_channel_sync_syncpt *sp = struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_ops(s); nvgpu_channel_sync_syncpt_from_ops(s);
struct channel_gk20a *c = sp->c; struct nvgpu_channel *c = sp->c;
struct nvgpu_os_fence os_fence = {0}; struct nvgpu_os_fence os_fence = {0};
err = gk20a_channel_alloc_priv_cmdbuf(c, err = gk20a_channel_alloc_priv_cmdbuf(c,
@@ -210,7 +210,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
c->g->ops.sync.syncpt.get_incr_per_release()); c->g->ops.sync.syncpt.get_incr_per_release());
if (register_irq) { if (register_irq) {
struct channel_gk20a *referenced = gk20a_channel_get(c); struct nvgpu_channel *referenced = gk20a_channel_get(c);
WARN_ON(!referenced); WARN_ON(!referenced);
@@ -358,7 +358,7 @@ nvgpu_channel_sync_to_syncpt(struct nvgpu_channel_sync *sync)
} }
struct nvgpu_channel_sync * struct nvgpu_channel_sync *
nvgpu_channel_sync_syncpt_create(struct channel_gk20a *c, bool user_managed) nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c, bool user_managed)
{ {
struct nvgpu_channel_sync_syncpt *sp; struct nvgpu_channel_sync_syncpt *sp;
char syncpt_name[32]; char syncpt_name[32];

View File

@@ -34,7 +34,7 @@
#include "common/vgpu/ivc/comm_vgpu.h" #include "common/vgpu/ivc/comm_vgpu.h"
int vgpu_exec_regops(struct gk20a *g, int vgpu_exec_regops(struct gk20a *g,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
struct nvgpu_dbg_reg_op *ops, struct nvgpu_dbg_reg_op *ops,
u32 num_ops, u32 num_ops,
bool is_profiler, bool is_profiler,

View File

@@ -27,10 +27,10 @@ struct dbg_session_gk20a;
struct nvgpu_dbg_reg_op; struct nvgpu_dbg_reg_op;
struct dbg_profiler_object_data; struct dbg_profiler_object_data;
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
int vgpu_exec_regops(struct gk20a *g, int vgpu_exec_regops(struct gk20a *g,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
struct nvgpu_dbg_reg_op *ops, struct nvgpu_dbg_reg_op *ops,
u32 num_ops, u32 num_ops,
bool is_profiler, bool is_profiler,

View File

@@ -47,7 +47,7 @@
#include "common/vgpu/gr/subctx_vgpu.h" #include "common/vgpu/gr/subctx_vgpu.h"
#include "common/vgpu/ivc/comm_vgpu.h" #include "common/vgpu/ivc/comm_vgpu.h"
void vgpu_channel_bind(struct channel_gk20a *ch) void vgpu_channel_bind(struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_config_params *p = struct tegra_vgpu_channel_config_params *p =
@@ -67,7 +67,7 @@ void vgpu_channel_bind(struct channel_gk20a *ch)
nvgpu_atomic_set(&ch->bound, true); nvgpu_atomic_set(&ch->bound, true);
} }
void vgpu_channel_unbind(struct channel_gk20a *ch) void vgpu_channel_unbind(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -88,7 +88,7 @@ void vgpu_channel_unbind(struct channel_gk20a *ch)
} }
int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch) int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
@@ -111,7 +111,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
return 0; return 0;
} }
void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch) void vgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx; struct tegra_vgpu_channel_hwctx_params *p = &msg.params.channel_hwctx;
@@ -126,7 +126,7 @@ void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch)
WARN_ON(err || msg.ret); WARN_ON(err || msg.ret);
} }
void vgpu_channel_enable(struct channel_gk20a *ch) void vgpu_channel_enable(struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_config_params *p = struct tegra_vgpu_channel_config_params *p =
@@ -143,7 +143,7 @@ void vgpu_channel_enable(struct channel_gk20a *ch)
WARN_ON(err || msg.ret); WARN_ON(err || msg.ret);
} }
void vgpu_channel_disable(struct channel_gk20a *ch) void vgpu_channel_disable(struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_config_params *p = struct tegra_vgpu_channel_config_params *p =
@@ -306,7 +306,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
return 0; return 0;
} }
int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) int vgpu_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_config_params *p = struct tegra_vgpu_channel_config_params *p =
@@ -333,7 +333,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
return err; return err;
} }
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) int vgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_tsg_preempt_params *p = struct tegra_vgpu_tsg_preempt_params *p =
@@ -356,11 +356,11 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
return err; return err;
} }
int vgpu_tsg_force_reset_ch(struct channel_gk20a *ch, int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
u32 err_code, bool verbose) u32 err_code, bool verbose)
{ {
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
struct channel_gk20a *ch_tsg = NULL; struct nvgpu_channel *ch_tsg = NULL;
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_channel_config_params *p = struct tegra_vgpu_channel_config_params *p =
@@ -400,7 +400,7 @@ int vgpu_tsg_force_reset_ch(struct channel_gk20a *ch,
} }
static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g, static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
/* /*
* If error code is already set, this mmu fault * If error code is already set, this mmu fault
@@ -420,10 +420,10 @@ static void vgpu_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
} }
static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g, static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
struct channel_gk20a *ch_tsg = NULL; struct nvgpu_channel *ch_tsg = NULL;
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
if (tsg != NULL) { if (tsg != NULL) {
@@ -445,7 +445,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info) int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
{ {
struct channel_gk20a *ch = gk20a_channel_from_id(g, info->chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, info->chid);
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!ch) { if (!ch) {
@@ -493,7 +493,7 @@ u32 vgpu_channel_count(struct gk20a *g)
return priv->constants.num_channels; return priv->constants.num_channels;
} }
void vgpu_channel_free_ctx_header(struct channel_gk20a *c) void vgpu_channel_free_ctx_header(struct nvgpu_channel *c)
{ {
vgpu_free_subctx_header(c->g, c->subctx, c->vm, c->virt_ctx); vgpu_free_subctx_header(c->g, c->subctx, c->vm, c->virt_ctx);
} }
@@ -501,7 +501,7 @@ void vgpu_channel_free_ctx_header(struct channel_gk20a *c)
void vgpu_handle_channel_event(struct gk20a *g, void vgpu_handle_channel_event(struct gk20a *g,
struct tegra_vgpu_channel_event_info *info) struct tegra_vgpu_channel_event_info *info)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
if (!info->is_tsg) { if (!info->is_tsg) {
nvgpu_err(g, "channel event posted"); nvgpu_err(g, "channel event posted");
@@ -521,7 +521,7 @@ void vgpu_handle_channel_event(struct gk20a *g,
void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid) void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
{ {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
nvgpu_err(g, "invalid channel id %d", chid); nvgpu_err(g, "invalid channel id %d", chid);
@@ -536,7 +536,7 @@ void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
void vgpu_set_error_notifier(struct gk20a *g, void vgpu_set_error_notifier(struct gk20a *g,
struct tegra_vgpu_channel_set_error_notifier *p) struct tegra_vgpu_channel_set_error_notifier *p)
{ {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
if (p->chid >= g->fifo.num_channels) { if (p->chid >= g->fifo.num_channels) {
nvgpu_err(g, "invalid chid %d", p->chid); nvgpu_err(g, "invalid chid %d", p->chid);

View File

@@ -26,9 +26,9 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_fifo; struct nvgpu_fifo;
struct tsg_gk20a; struct nvgpu_tsg;
struct tegra_vgpu_fifo_intr_info; struct tegra_vgpu_fifo_intr_info;
struct tegra_vgpu_channel_event_info; struct tegra_vgpu_channel_event_info;
struct tegra_vgpu_channel_set_error_notifier; struct tegra_vgpu_channel_set_error_notifier;
@@ -36,29 +36,29 @@ struct tegra_vgpu_channel_set_error_notifier;
int vgpu_fifo_setup_sw(struct gk20a *g); int vgpu_fifo_setup_sw(struct gk20a *g);
void vgpu_fifo_cleanup_sw(struct gk20a *g); void vgpu_fifo_cleanup_sw(struct gk20a *g);
int vgpu_init_fifo_setup_hw(struct gk20a *g); int vgpu_init_fifo_setup_hw(struct gk20a *g);
void vgpu_channel_bind(struct channel_gk20a *ch); void vgpu_channel_bind(struct nvgpu_channel *ch);
void vgpu_channel_unbind(struct channel_gk20a *ch); void vgpu_channel_unbind(struct nvgpu_channel *ch);
int vgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch); int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch);
void vgpu_channel_free_inst(struct gk20a *g, struct channel_gk20a *ch); void vgpu_channel_free_inst(struct gk20a *g, struct nvgpu_channel *ch);
void vgpu_channel_enable(struct channel_gk20a *ch); void vgpu_channel_enable(struct nvgpu_channel *ch);
void vgpu_channel_disable(struct channel_gk20a *ch); void vgpu_channel_disable(struct nvgpu_channel *ch);
u32 vgpu_channel_count(struct gk20a *g); u32 vgpu_channel_count(struct gk20a *g);
int vgpu_fifo_init_engine_info(struct nvgpu_fifo *f); int vgpu_fifo_init_engine_info(struct nvgpu_fifo *f);
int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); int vgpu_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); int vgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice); int vgpu_channel_set_timeslice(struct nvgpu_channel *ch, u32 timeslice);
int vgpu_tsg_force_reset_ch(struct channel_gk20a *ch, int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
u32 err_code, bool verbose); u32 err_code, bool verbose);
u32 vgpu_tsg_default_timeslice_us(struct gk20a *g); u32 vgpu_tsg_default_timeslice_us(struct gk20a *g);
int vgpu_tsg_open(struct tsg_gk20a *tsg); int vgpu_tsg_open(struct nvgpu_tsg *tsg);
void vgpu_tsg_release(struct tsg_gk20a *tsg); void vgpu_tsg_release(struct nvgpu_tsg *tsg);
int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch); int vgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch);
int vgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch); int vgpu_tsg_unbind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch);
int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice); int vgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice);
int vgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 level); int vgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level);
void vgpu_tsg_enable(struct tsg_gk20a *tsg); void vgpu_tsg_enable(struct nvgpu_tsg *tsg);
int vgpu_set_sm_exception_type_mask(struct channel_gk20a *ch, u32 mask); int vgpu_set_sm_exception_type_mask(struct nvgpu_channel *ch, u32 mask);
void vgpu_channel_free_ctx_header(struct channel_gk20a *c); void vgpu_channel_free_ctx_header(struct nvgpu_channel *c);
int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info); int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info);
void vgpu_handle_channel_event(struct gk20a *g, void vgpu_handle_channel_event(struct gk20a *g,
struct tegra_vgpu_channel_event_info *info); struct tegra_vgpu_channel_event_info *info);

View File

@@ -29,7 +29,7 @@
#include "ramfc_vgpu.h" #include "ramfc_vgpu.h"
#include "common/vgpu/ivc/comm_vgpu.h" #include "common/vgpu/ivc/comm_vgpu.h"
int vgpu_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int vgpu_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags) u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;

View File

@@ -25,9 +25,9 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
int vgpu_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int vgpu_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags); u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags);
#endif /* NVGPU_RAMFC_VGPU_H */ #endif /* NVGPU_RAMFC_VGPU_H */

View File

@@ -74,7 +74,7 @@ done:
} }
static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id, static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add) struct nvgpu_channel *ch, bool add)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist_info *runlist; struct nvgpu_runlist_info *runlist;
@@ -128,7 +128,7 @@ static void vgpu_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
} }
static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id, static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, bool add, struct nvgpu_channel *ch, bool add,
bool wait_for_finish) bool wait_for_finish)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -164,7 +164,7 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
(ch == NULL && !add) means remove all active channels from runlist. (ch == NULL && !add) means remove all active channels from runlist.
(ch == NULL && add) means restore all active channels on runlist. */ (ch == NULL && add) means restore all active channels on runlist. */
static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id, static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
struct nvgpu_runlist_info *runlist = NULL; struct nvgpu_runlist_info *runlist = NULL;
@@ -185,7 +185,7 @@ static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
} }
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish) bool add, bool wait_for_finish)
{ {
nvgpu_assert(ch != NULL); nvgpu_assert(ch != NULL);

View File

@@ -23,10 +23,10 @@
*/ */
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id, int vgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);
int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id, int vgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
bool add, bool wait_for_finish); bool add, bool wait_for_finish);

View File

@@ -73,7 +73,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
return 0; return 0;
} }
int vgpu_gv11b_fifo_alloc_buf(struct channel_gk20a *c, int vgpu_gv11b_fifo_alloc_buf(struct nvgpu_channel *c,
u32 syncpt_id, struct nvgpu_mem *syncpt_buf) u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
{ {
int err; int err;
@@ -120,7 +120,7 @@ int vgpu_gv11b_fifo_alloc_buf(struct channel_gk20a *c,
return 0; return 0;
} }
void vgpu_gv11b_fifo_free_buf(struct channel_gk20a *c, void vgpu_gv11b_fifo_free_buf(struct nvgpu_channel *c,
struct nvgpu_mem *syncpt_buf) struct nvgpu_mem *syncpt_buf)
{ {
nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va); nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);

View File

@@ -26,9 +26,9 @@
struct gk20a; struct gk20a;
int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g); int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g);
int vgpu_gv11b_fifo_alloc_buf(struct channel_gk20a *c, int vgpu_gv11b_fifo_alloc_buf(struct nvgpu_channel *c,
u32 syncpt_id, struct nvgpu_mem *syncpt_buf); u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
void vgpu_gv11b_fifo_free_buf(struct channel_gk20a *c, void vgpu_gv11b_fifo_free_buf(struct nvgpu_channel *c,
struct nvgpu_mem *syncpt_buf); struct nvgpu_mem *syncpt_buf);
int vgpu_gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm, int vgpu_gv11b_fifo_get_sync_ro_map(struct vm_gk20a *vm,
u64 *base_gpuva, u32 *sync_size); u64 *base_gpuva, u32 *sync_size);

View File

@@ -87,7 +87,7 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g)
priv->constants.sm_arch_warp_count; priv->constants.sm_arch_warp_count;
} }
static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va) static int vgpu_gr_commit_inst(struct nvgpu_channel *c, u64 gpu_va)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
@@ -121,7 +121,7 @@ static int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
} }
static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *c, bool patch) struct nvgpu_channel *c, bool patch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
@@ -213,12 +213,12 @@ int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
return 0; return 0;
} }
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) int vgpu_gr_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, u32 flags)
{ {
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct nvgpu_gr_ctx *gr_ctx = NULL; struct nvgpu_gr_ctx *gr_ctx = NULL;
struct nvgpu_gr *gr = g->gr; struct nvgpu_gr *gr = g->gr;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
int err = 0; int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -486,7 +486,7 @@ static int vgpu_gr_init_gr_zcull(struct gk20a *g, struct nvgpu_gr *gr,
return 0; return 0;
} }
int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct channel_gk20a *c, int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct nvgpu_channel *c,
u64 zcull_va, u32 mode) u64 zcull_va, u32 mode)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
@@ -759,7 +759,7 @@ int vgpu_init_gr_support(struct gk20a *g)
int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info) int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
{ {
struct channel_gk20a *ch = gk20a_channel_from_id(g, info->chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, info->chid);
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -821,7 +821,7 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info)
} }
int vgpu_gr_set_sm_debug_mode(struct gk20a *g, int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable) struct nvgpu_channel *ch, u64 sms, bool enable)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode; struct tegra_vgpu_sm_debug_mode *p = &msg.params.sm_debug_mode;
@@ -841,7 +841,7 @@ int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
} }
int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *ch, bool enable) struct nvgpu_channel *ch, bool enable)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
@@ -866,9 +866,9 @@ int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
} }
int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 gpu_va, u32 mode) struct nvgpu_channel *ch, u64 gpu_va, u32 mode)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode; struct tegra_vgpu_channel_set_ctxsw_mode *p = &msg.params.set_ctxsw_mode;
@@ -952,12 +952,12 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
} }
int vgpu_gr_clear_sm_error_state(struct gk20a *g, int vgpu_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id) struct nvgpu_channel *ch, u32 sm_id)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_clear_sm_error_state *p = struct tegra_vgpu_clear_sm_error_state *p =
&msg.params.clear_sm_error_state; &msg.params.clear_sm_error_state;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
int err; int err;
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
@@ -1076,7 +1076,7 @@ void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
struct tegra_vgpu_sm_esr_info *info) struct tegra_vgpu_sm_esr_info *info)
{ {
struct nvgpu_tsg_sm_error_state *sm_error_states; struct nvgpu_tsg_sm_error_state *sm_error_states;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g); u32 no_of_sm = g->ops.gr.init.get_no_of_sm(g);
if (info->sm_id >= no_of_sm) { if (info->sm_id >= no_of_sm) {
@@ -1173,7 +1173,7 @@ int vgpu_gr_init_fs_state(struct gk20a *g)
return g->ops.gr.config.init_sm_id_table(g, g->gr->config); return g->ops.gr.config.init_sm_id_table(g, g->gr->config);
} }
int vgpu_gr_update_pc_sampling(struct channel_gk20a *ch, bool enable) int vgpu_gr_update_pc_sampling(struct nvgpu_channel *ch, bool enable)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_channel_update_pc_sampling *p = struct tegra_vgpu_channel_update_pc_sampling *p =
@@ -1406,13 +1406,13 @@ fail:
return err; return err;
} }
int vgpu_gr_set_preemption_mode(struct channel_gk20a *ch, int vgpu_gr_set_preemption_mode(struct nvgpu_channel *ch,
u32 graphics_preempt_mode, u32 graphics_preempt_mode,
u32 compute_preempt_mode) u32 compute_preempt_mode)
{ {
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct vm_gk20a *vm; struct vm_gk20a *vm;
u32 class; u32 class;
int err; int err;

View File

@@ -26,14 +26,14 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct gr_gk20a; struct gr_gk20a;
struct nvgpu_gr_zcull_info; struct nvgpu_gr_zcull_info;
struct nvgpu_gr_zbc; struct nvgpu_gr_zbc;
struct nvgpu_gr_zbc_entry; struct nvgpu_gr_zbc_entry;
struct nvgpu_gr_zbc_query_params; struct nvgpu_gr_zbc_query_params;
struct dbg_session_gk20a; struct dbg_session_gk20a;
struct tsg_gk20a; struct nvgpu_tsg;
struct vm_gk20a; struct vm_gk20a;
struct nvgpu_gr_ctx; struct nvgpu_gr_ctx;
struct nvgpu_gr_zcull; struct nvgpu_gr_zcull;
@@ -45,10 +45,10 @@ void vgpu_gr_detect_sm_arch(struct gk20a *g);
int vgpu_gr_init_ctx_state(struct gk20a *g, int vgpu_gr_init_ctx_state(struct gk20a *g,
struct nvgpu_gr_falcon_query_sizes *sizes); struct nvgpu_gr_falcon_query_sizes *sizes);
int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g); int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g);
void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg); void vgpu_gr_free_channel_ctx(struct nvgpu_channel *c, bool is_tsg);
void vgpu_gr_free_tsg_ctx(struct tsg_gk20a *tsg); void vgpu_gr_free_tsg_ctx(struct nvgpu_tsg *tsg);
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags); int vgpu_gr_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, u32 flags);
int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct channel_gk20a *c, int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct nvgpu_channel *c,
u64 zcull_va, u32 mode); u64 zcull_va, u32 mode);
int vgpu_gr_get_zcull_info(struct gk20a *g, int vgpu_gr_get_zcull_info(struct gk20a *g,
struct nvgpu_gr_config *gr_config, struct nvgpu_gr_config *gr_config,
@@ -64,13 +64,13 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
int vgpu_gr_query_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc, int vgpu_gr_query_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
struct nvgpu_gr_zbc_query_params *query_params); struct nvgpu_gr_zbc_query_params *query_params);
int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *ch, bool enable); struct nvgpu_channel *ch, bool enable);
int vgpu_gr_set_sm_debug_mode(struct gk20a *g, int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable); struct nvgpu_channel *ch, u64 sms, bool enable);
int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 gpu_va, u32 mode); struct nvgpu_channel *ch, u64 gpu_va, u32 mode);
int vgpu_gr_clear_sm_error_state(struct gk20a *g, int vgpu_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id); struct nvgpu_channel *ch, u32 sm_id);
int vgpu_gr_suspend_contexts(struct gk20a *g, int vgpu_gr_suspend_contexts(struct gk20a *g,
struct dbg_session_gk20a *dbg_s, struct dbg_session_gk20a *dbg_s,
int *ctx_resident_ch_fd); int *ctx_resident_ch_fd);
@@ -80,9 +80,9 @@ int vgpu_gr_resume_contexts(struct gk20a *g,
int vgpu_gr_init_sm_id_table(struct gk20a *g, int vgpu_gr_init_sm_id_table(struct gk20a *g,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
int vgpu_gr_init_fs_state(struct gk20a *g); int vgpu_gr_init_fs_state(struct gk20a *g);
int vgpu_gr_update_pc_sampling(struct channel_gk20a *ch, bool enable); int vgpu_gr_update_pc_sampling(struct nvgpu_channel *ch, bool enable);
void vgpu_gr_init_cyclestats(struct gk20a *g); void vgpu_gr_init_cyclestats(struct gk20a *g);
int vgpu_gr_set_preemption_mode(struct channel_gk20a *ch, int vgpu_gr_set_preemption_mode(struct nvgpu_channel *ch,
u32 graphics_preempt_mode, u32 graphics_preempt_mode,
u32 compute_preempt_mode); u32 compute_preempt_mode);
int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info); int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);

View File

@@ -28,8 +28,8 @@
#include "vgpu_tsg_gv11b.h" #include "vgpu_tsg_gv11b.h"
#include "common/vgpu/ivc/comm_vgpu.h" #include "common/vgpu/ivc/comm_vgpu.h"
int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg, int vgpu_gv11b_tsg_bind_channel(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_bind_channel_ex_params *p = struct tegra_vgpu_tsg_bind_channel_ex_params *p =

View File

@@ -23,7 +23,7 @@
#ifndef NVGPU_VGPU_TSG_GV11B_H #ifndef NVGPU_VGPU_TSG_GV11B_H
#define NVGPU_VGPU_TSG_GV11B_H #define NVGPU_VGPU_TSG_GV11B_H
int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg, int vgpu_gv11b_tsg_bind_channel(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch); struct nvgpu_channel *ch);
#endif /* NVGPU_VGPU_TSG_GV11B_H */ #endif /* NVGPU_VGPU_TSG_GV11B_H */

View File

@@ -133,7 +133,7 @@ u64 vgpu_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
} }
int vgpu_vm_bind_channel(struct vm_gk20a *vm, int vgpu_vm_bind_channel(struct vm_gk20a *vm,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;

View File

@@ -24,7 +24,7 @@
#define NVGPU_MM_VGPU_H #define NVGPU_MM_VGPU_H
struct nvgpu_mem; struct nvgpu_mem;
struct channel_gk20a; struct nvgpu_channel;
struct vm_gk20a_mapping_batch; struct vm_gk20a_mapping_batch;
struct gk20a_as_share; struct gk20a_as_share;
struct vm_gk20a; struct vm_gk20a;
@@ -41,7 +41,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
bool sparse, bool sparse,
struct vm_gk20a_mapping_batch *batch); struct vm_gk20a_mapping_batch *batch);
int vgpu_vm_bind_channel(struct vm_gk20a *vm, int vgpu_vm_bind_channel(struct vm_gk20a *vm,
struct channel_gk20a *ch); struct nvgpu_channel *ch);
int vgpu_mm_fb_flush(struct gk20a *g); int vgpu_mm_fb_flush(struct gk20a *g);
void vgpu_mm_l2_invalidate(struct gk20a *g); void vgpu_mm_l2_invalidate(struct gk20a *g);
int vgpu_mm_l2_flush(struct gk20a *g, bool invalidate); int vgpu_mm_l2_flush(struct gk20a *g, bool invalidate);

View File

@@ -142,7 +142,7 @@ void vgpu_css_release_snapshot_buffer(struct gk20a *g)
nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n"); nvgpu_log_info(g, "cyclestats(vgpu): buffer for snapshots released\n");
} }
int vgpu_css_flush_snapshots(struct channel_gk20a *ch, int vgpu_css_flush_snapshots(struct nvgpu_channel *ch,
u32 *pending, bool *hw_overflow) u32 *pending, bool *hw_overflow)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -170,7 +170,7 @@ int vgpu_css_flush_snapshots(struct channel_gk20a *ch,
return err; return err;
} }
static int vgpu_css_attach(struct channel_gk20a *ch, static int vgpu_css_attach(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -198,7 +198,7 @@ static int vgpu_css_attach(struct channel_gk20a *ch,
return err; return err;
} }
int vgpu_css_detach(struct channel_gk20a *ch, int vgpu_css_detach(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -225,7 +225,7 @@ int vgpu_css_detach(struct channel_gk20a *ch,
return err; return err;
} }
int vgpu_css_enable_snapshot_buffer(struct channel_gk20a *ch, int vgpu_css_enable_snapshot_buffer(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client) struct gk20a_cs_snapshot_client *cs_client)
{ {
int ret; int ret;

View File

@@ -26,15 +26,15 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct gk20a_cs_snapshot_client; struct gk20a_cs_snapshot_client;
void vgpu_css_release_snapshot_buffer(struct gk20a *g); void vgpu_css_release_snapshot_buffer(struct gk20a *g);
int vgpu_css_flush_snapshots(struct channel_gk20a *ch, int vgpu_css_flush_snapshots(struct nvgpu_channel *ch,
u32 *pending, bool *hw_overflow); u32 *pending, bool *hw_overflow);
int vgpu_css_detach(struct channel_gk20a *ch, int vgpu_css_detach(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client); struct gk20a_cs_snapshot_client *cs_client);
int vgpu_css_enable_snapshot_buffer(struct channel_gk20a *ch, int vgpu_css_enable_snapshot_buffer(struct nvgpu_channel *ch,
struct gk20a_cs_snapshot_client *cs_client); struct gk20a_cs_snapshot_client *cs_client);
u32 vgpu_css_get_buffer_size(struct gk20a *g); u32 vgpu_css_get_buffer_size(struct gk20a *g);
#endif /* NVGPU_CSS_VGPU_H */ #endif /* NVGPU_CSS_VGPU_H */

View File

@@ -31,7 +31,7 @@
#include "fifo/fifo_vgpu.h" #include "fifo/fifo_vgpu.h"
#include "common/vgpu/ivc/comm_vgpu.h" #include "common/vgpu/ivc/comm_vgpu.h"
int vgpu_tsg_open(struct tsg_gk20a *tsg) int vgpu_tsg_open(struct nvgpu_tsg *tsg)
{ {
struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_open_rel_params *p = struct tegra_vgpu_tsg_open_rel_params *p =
@@ -55,7 +55,7 @@ int vgpu_tsg_open(struct tsg_gk20a *tsg)
return err; return err;
} }
void vgpu_tsg_release(struct tsg_gk20a *tsg) void vgpu_tsg_release(struct nvgpu_tsg *tsg)
{ {
struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_open_rel_params *p = struct tegra_vgpu_tsg_open_rel_params *p =
@@ -76,10 +76,10 @@ void vgpu_tsg_release(struct tsg_gk20a *tsg)
} }
} }
void vgpu_tsg_enable(struct tsg_gk20a *tsg) void vgpu_tsg_enable(struct nvgpu_tsg *tsg)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
@@ -88,7 +88,7 @@ void vgpu_tsg_enable(struct tsg_gk20a *tsg)
nvgpu_rwsem_up_read(&tsg->ch_list_lock); nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} }
int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) int vgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_bind_unbind_channel_params *p = struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
@@ -112,7 +112,7 @@ int vgpu_tsg_bind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch)
return err; return err;
} }
int vgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch) int vgpu_tsg_unbind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
{ {
struct tegra_vgpu_cmd_msg msg = {}; struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_tsg_bind_unbind_channel_params *p = struct tegra_vgpu_tsg_bind_unbind_channel_params *p =
@@ -132,7 +132,7 @@ int vgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch)
return err; return err;
} }
int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) int vgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice)
{ {
struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_timeslice_params *p = struct tegra_vgpu_tsg_timeslice_params *p =
@@ -156,7 +156,7 @@ int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
return err; return err;
} }
int vgpu_set_sm_exception_type_mask(struct channel_gk20a *ch, int vgpu_set_sm_exception_type_mask(struct nvgpu_channel *ch,
u32 exception_mask) u32 exception_mask)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
@@ -178,7 +178,7 @@ int vgpu_set_sm_exception_type_mask(struct channel_gk20a *ch,
return err; return err;
} }
int vgpu_tsg_set_interleave(struct tsg_gk20a *tsg, u32 new_level) int vgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 new_level)
{ {
struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_runlist_interleave_params *p = struct tegra_vgpu_tsg_runlist_interleave_params *p =

View File

@@ -104,7 +104,7 @@ int vgpu_get_attribute(u64 handle, u32 attrib, u32 *value)
static void vgpu_handle_channel_event(struct gk20a *g, static void vgpu_handle_channel_event(struct gk20a *g,
struct tegra_vgpu_channel_event_info *info) struct tegra_vgpu_channel_event_info *info)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
if (!info->is_tsg) { if (!info->is_tsg) {
nvgpu_err(g, "channel event posted"); nvgpu_err(g, "channel event posted");
@@ -125,7 +125,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid) static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
{ {
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid); struct nvgpu_channel *ch = gk20a_channel_from_id(g, chid);
if (ch == NULL) { if (ch == NULL) {
nvgpu_err(g, "invalid channel id %d", chid); nvgpu_err(g, "invalid channel id %d", chid);
@@ -140,7 +140,7 @@ static void vgpu_channel_abort_cleanup(struct gk20a *g, u32 chid)
static void vgpu_set_error_notifier(struct gk20a *g, static void vgpu_set_error_notifier(struct gk20a *g,
struct tegra_vgpu_channel_set_error_notifier *p) struct tegra_vgpu_channel_set_error_notifier *p)
{ {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
if (p->chid >= g->fifo.num_channels) { if (p->chid >= g->fifo.num_channels) {
nvgpu_err(g, "invalid chid %d", p->chid); nvgpu_err(g, "invalid chid %d", p->chid);

View File

@@ -31,8 +31,8 @@
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/profile.h> #include <nvgpu/profile.h>
struct channel_gk20a; struct nvgpu_channel;
struct tsg_gk20a; struct nvgpu_tsg;
struct nvgpu_fifo { struct nvgpu_fifo {
struct gk20a *g; struct gk20a *g;
@@ -76,14 +76,14 @@ struct nvgpu_fifo {
u64 userd_gpu_va; u64 userd_gpu_va;
unsigned int used_channels; unsigned int used_channels;
struct channel_gk20a *channel; struct nvgpu_channel *channel;
/* zero-kref'd channels here */ /* zero-kref'd channels here */
struct nvgpu_list_node free_chs; struct nvgpu_list_node free_chs;
struct nvgpu_mutex free_chs_mutex; struct nvgpu_mutex free_chs_mutex;
struct nvgpu_mutex engines_reset_mutex; struct nvgpu_mutex engines_reset_mutex;
struct nvgpu_spinlock runlist_submit_lock; struct nvgpu_spinlock runlist_submit_lock;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_mutex tsg_inuse_mutex; struct nvgpu_mutex tsg_inuse_mutex;
void (*remove_support)(struct nvgpu_fifo *f); void (*remove_support)(struct nvgpu_fifo *f);

View File

@@ -368,7 +368,7 @@ void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
u32 addr_lo, addr_hi; u32 addr_lo, addr_hi;
u64 inst_ptr; u64 inst_ptr;
u32 chid = NVGPU_INVALID_CHANNEL_ID; u32 chid = NVGPU_INVALID_CHANNEL_ID;
struct channel_gk20a *refch; struct nvgpu_channel *refch;
(void) memset(mmufault, 0, sizeof(*mmufault)); (void) memset(mmufault, 0, sizeof(*mmufault));

View File

@@ -57,8 +57,8 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
u32 id = U32_MAX; u32 id = U32_MAX;
bool is_tsg = false; bool is_tsg = false;
bool recover = false; bool recover = false;
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
u32 ms = 0; u32 ms = 0;
bool debug_dump = false; bool debug_dump = false;

View File

@@ -179,7 +179,7 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g)
u32 timeout_val, ctxsw_timeout_engines; u32 timeout_val, ctxsw_timeout_engines;
u32 info_status; u32 info_status;
const char *info_status_str; const char *info_status_str;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
/* get ctxsw timedout engines */ /* get ctxsw timedout engines */

View File

@@ -272,9 +272,9 @@ bool gk20a_fifo_handle_mmu_fault_locked(
u32 engine_id = nvgpu_engine_mmu_fault_id_to_engine_id(g, u32 engine_id = nvgpu_engine_mmu_fault_id_to_engine_id(g,
(u32)engine_mmu_fault_id); (u32)engine_mmu_fault_id);
struct mmu_fault_info mmfault_info; struct mmu_fault_info mmfault_info;
struct channel_gk20a *ch = NULL; struct nvgpu_channel *ch = NULL;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
struct channel_gk20a *refch = NULL; struct nvgpu_channel *refch = NULL;
bool ctxsw; bool ctxsw;
/* read and parse engine status */ /* read and parse engine status */

View File

@@ -89,7 +89,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
return ret; return ret;
} }
int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{ {
int ret = 0; int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;
@@ -117,7 +117,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
"ctxsw timeout will trigger recovery if needed", "ctxsw timeout will trigger recovery if needed",
ch->chid); ch->chid);
} else { } else {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
nvgpu_err(g, "preempt channel %d timeout", ch->chid); nvgpu_err(g, "preempt channel %d timeout", ch->chid);
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
@@ -134,7 +134,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
return ret; return ret;
} }
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
int ret = 0; int ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 token = PMU_INVALID_MUTEX_OWNER_ID;

View File

@@ -25,12 +25,12 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct tsg_gk20a; struct nvgpu_tsg;
void gk20a_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type); void gk20a_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type); unsigned int id_type);

View File

@@ -397,9 +397,9 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
return ret; return ret;
} }
int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch) int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
tsg = tsg_gk20a_from_ch(ch); tsg = tsg_gk20a_from_ch(ch);
@@ -414,7 +414,7 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
return g->ops.fifo.preempt_tsg(g, tsg); return g->ops.fifo.preempt_tsg(g, tsg);
} }
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg) int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
int ret = 0; int ret = 0;

View File

@@ -27,12 +27,12 @@
#define PREEMPT_PENDING_POLL_PRE_SI_RETRIES 200000U /* 1G/500KHz * 100 */ #define PREEMPT_PENDING_POLL_PRE_SI_RETRIES 200000U /* 1G/500KHz * 100 */
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct tsg_gk20a; struct nvgpu_tsg;
void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type); void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
int gv11b_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type); unsigned int id_type);
void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask); void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask);

View File

@@ -31,7 +31,7 @@
#include <nvgpu/hw/gk20a/hw_ram_gk20a.h> #include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
int gk20a_ramfc_commit_userd(struct channel_gk20a *ch) int gk20a_ramfc_commit_userd(struct nvgpu_channel *ch)
{ {
u32 addr_lo; u32 addr_lo;
u32 addr_hi; u32 addr_hi;
@@ -57,7 +57,7 @@ int gk20a_ramfc_commit_userd(struct channel_gk20a *ch)
return 0; return 0;
} }
int gk20a_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int gk20a_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags) u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -108,7 +108,7 @@ int gk20a_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base,
return g->ops.ramfc.commit_userd(ch); return g->ops.ramfc.commit_userd(ch);
} }
void gk20a_ramfc_capture_ram_dump(struct gk20a *g, struct channel_gk20a *ch, void gk20a_ramfc_capture_ram_dump(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_dump_info *info) struct nvgpu_channel_dump_info *info)
{ {
struct nvgpu_mem *mem = &ch->inst_block; struct nvgpu_mem *mem = &ch->inst_block;

View File

@@ -24,13 +24,13 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_channel_dump_info; struct nvgpu_channel_dump_info;
int gk20a_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int gk20a_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags); u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags);
int gk20a_ramfc_commit_userd(struct channel_gk20a *ch); int gk20a_ramfc_commit_userd(struct nvgpu_channel *ch);
void gk20a_ramfc_capture_ram_dump(struct gk20a *g, struct channel_gk20a *ch, void gk20a_ramfc_capture_ram_dump(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_dump_info *info); struct nvgpu_channel_dump_info *info);
#endif /* NVGPU_RAMFC_GK20A_H */ #endif /* NVGPU_RAMFC_GK20A_H */

View File

@@ -33,7 +33,7 @@
#include <nvgpu/hw/gp10b/hw_ram_gp10b.h> #include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
int gp10b_ramfc_commit_userd(struct channel_gk20a *ch) int gp10b_ramfc_commit_userd(struct nvgpu_channel *ch)
{ {
u32 addr_lo; u32 addr_lo;
u32 addr_hi; u32 addr_hi;
@@ -59,7 +59,7 @@ int gp10b_ramfc_commit_userd(struct channel_gk20a *ch)
return 0; return 0;
} }
int gp10b_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int gp10b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags) u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -111,7 +111,7 @@ int gp10b_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base,
return g->ops.ramfc.commit_userd(ch); return g->ops.ramfc.commit_userd(ch);
} }
u32 gp10b_ramfc_get_syncpt(struct channel_gk20a *ch) u32 gp10b_ramfc_get_syncpt(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
u32 v, syncpt; u32 v, syncpt;
@@ -122,7 +122,7 @@ u32 gp10b_ramfc_get_syncpt(struct channel_gk20a *ch)
return syncpt; return syncpt;
} }
void gp10b_ramfc_set_syncpt(struct channel_gk20a *ch, u32 syncpt) void gp10b_ramfc_set_syncpt(struct nvgpu_channel *ch, u32 syncpt)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
u32 v = g->ops.pbdma.allowed_syncpoints_0_valid_f() | u32 v = g->ops.pbdma.allowed_syncpoints_0_valid_f() |

View File

@@ -25,12 +25,12 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
int gp10b_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int gp10b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags); u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags);
int gp10b_ramfc_commit_userd(struct channel_gk20a *ch); int gp10b_ramfc_commit_userd(struct nvgpu_channel *ch);
u32 gp10b_ramfc_get_syncpt(struct channel_gk20a *ch); u32 gp10b_ramfc_get_syncpt(struct nvgpu_channel *ch);
void gp10b_ramfc_set_syncpt(struct channel_gk20a *ch, u32 syncpt); void gp10b_ramfc_set_syncpt(struct nvgpu_channel *ch, u32 syncpt);
#endif /* NVGPU_RAMFC_GP10B_H */ #endif /* NVGPU_RAMFC_GP10B_H */

View File

@@ -32,7 +32,7 @@
#include "hal/fifo/ramfc_gv11b.h" #include "hal/fifo/ramfc_gv11b.h"
int gv11b_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int gv11b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags) u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
@@ -100,7 +100,7 @@ int gv11b_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base,
return g->ops.ramfc.commit_userd(ch); return g->ops.ramfc.commit_userd(ch);
} }
void gv11b_ramfc_capture_ram_dump(struct gk20a *g, struct channel_gk20a *ch, void gv11b_ramfc_capture_ram_dump(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_dump_info *info) struct nvgpu_channel_dump_info *info)
{ {
struct nvgpu_mem *mem = &ch->inst_block; struct nvgpu_mem *mem = &ch->inst_block;

View File

@@ -26,12 +26,12 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_channel_dump_info; struct nvgpu_channel_dump_info;
int gv11b_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int gv11b_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags); u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags);
void gv11b_ramfc_capture_ram_dump(struct gk20a *g, struct channel_gk20a *ch, void gv11b_ramfc_capture_ram_dump(struct gk20a *g, struct nvgpu_channel *ch,
struct nvgpu_channel_dump_info *info); struct nvgpu_channel_dump_info *info);
#endif /* NVGPU_RAMFC_GV11B_H */ #endif /* NVGPU_RAMFC_GV11B_H */

View File

@@ -33,7 +33,7 @@
#include <nvgpu/hw/tu104/hw_ram_tu104.h> #include <nvgpu/hw/tu104/hw_ram_tu104.h>
int tu104_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int tu104_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags) u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;

View File

@@ -25,9 +25,9 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
int tu104_ramfc_setup(struct channel_gk20a *ch, u64 gpfifo_base, int tu104_ramfc_setup(struct nvgpu_channel *ch, u64 gpfifo_base,
u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags); u32 gpfifo_entries, u64 pbdma_acquire_timeout, u32 flags);
#endif /* NVGPU_RAMFC_TU104_H */ #endif /* NVGPU_RAMFC_TU104_H */

View File

@@ -35,7 +35,7 @@
#define FECS_MAILBOX_0_ACK_RESTORE 0x4U #define FECS_MAILBOX_0_ACK_RESTORE 0x4U
int gk20a_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next) int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
{ {
return nvgpu_runlist_reschedule(ch, preempt_next, true); return nvgpu_runlist_reschedule(ch, preempt_next, true);
} }
@@ -137,7 +137,7 @@ void gk20a_runlist_write_state(struct gk20a *g, u32 runlists_mask,
} }
/* trigger host preempt of GR pending load ctx if that ctx is not for ch */ /* trigger host preempt of GR pending load ctx if that ctx is not for ch */
int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch, int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt) bool wait_preempt)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;

View File

@@ -25,12 +25,12 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
struct tsg_gk20a; struct nvgpu_tsg;
struct gk20a; struct gk20a;
int gk20a_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next); int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next);
int gk20a_fifo_reschedule_preempt_next(struct channel_gk20a *ch, int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt); bool wait_preempt);
u32 gk20a_runlist_count_max(void); u32 gk20a_runlist_count_max(void);
u32 gk20a_runlist_length_max(struct gk20a *g); u32 gk20a_runlist_length_max(struct gk20a *g);

View File

@@ -26,7 +26,7 @@
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h> #include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
int gv11b_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next) int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
{ {
/* /*
* gv11b allows multiple outstanding preempts, * gv11b allows multiple outstanding preempts,

View File

@@ -25,9 +25,9 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
int gv11b_runlist_reschedule(struct channel_gk20a *ch, bool preempt_next); int gv11b_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next);
u32 gv11b_runlist_count_max(void); u32 gv11b_runlist_count_max(void);
#endif /* NVGPU_RUNLIST_FIFO_GV11B_H */ #endif /* NVGPU_RUNLIST_FIFO_GV11B_H */

View File

@@ -42,7 +42,7 @@ u32 gk20a_runlist_entry_size(struct gk20a *g)
return ram_rl_entry_size_v(); return ram_rl_entry_size_v();
} }
void gk20a_runlist_get_tsg_entry(struct tsg_gk20a *tsg, void gk20a_runlist_get_tsg_entry(struct nvgpu_tsg *tsg,
u32 *runlist, u32 timeslice) u32 *runlist, u32 timeslice)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -70,7 +70,7 @@ void gk20a_runlist_get_tsg_entry(struct tsg_gk20a *tsg,
runlist[1] = 0; runlist[1] = 0;
} }
void gk20a_runlist_get_ch_entry(struct channel_gk20a *ch, u32 *runlist) void gk20a_runlist_get_ch_entry(struct nvgpu_channel *ch, u32 *runlist)
{ {
runlist[0] = ram_rl_entry_chid_f(ch->chid); runlist[0] = ram_rl_entry_chid_f(ch->chid);
runlist[1] = 0; runlist[1] = 0;

View File

@@ -25,13 +25,13 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
struct tsg_gk20a; struct nvgpu_tsg;
struct gk20a; struct gk20a;
u32 gk20a_runlist_entry_size(struct gk20a *g); u32 gk20a_runlist_entry_size(struct gk20a *g);
void gk20a_runlist_get_tsg_entry(struct tsg_gk20a *tsg, void gk20a_runlist_get_tsg_entry(struct nvgpu_tsg *tsg,
u32 *runlist, u32 timeslice); u32 *runlist, u32 timeslice);
void gk20a_runlist_get_ch_entry(struct channel_gk20a *ch, u32 *runlist); void gk20a_runlist_get_ch_entry(struct nvgpu_channel *ch, u32 *runlist);
#endif /* NVGPU_RUNLIST_RAM_GK20A_H */ #endif /* NVGPU_RUNLIST_RAM_GK20A_H */

View File

@@ -36,7 +36,7 @@ u32 gv11b_runlist_entry_size(struct gk20a *g)
return ram_rl_entry_size_v(); return ram_rl_entry_size_v();
} }
void gv11b_runlist_get_tsg_entry(struct tsg_gk20a *tsg, void gv11b_runlist_get_tsg_entry(struct nvgpu_tsg *tsg,
u32 *runlist, u32 timeslice) u32 *runlist, u32 timeslice)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -68,7 +68,7 @@ void gv11b_runlist_get_tsg_entry(struct tsg_gk20a *tsg,
} }
void gv11b_runlist_get_ch_entry(struct channel_gk20a *ch, u32 *runlist) void gv11b_runlist_get_ch_entry(struct nvgpu_channel *ch, u32 *runlist)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
u32 addr_lo, addr_hi; u32 addr_lo, addr_hi;

View File

@@ -25,12 +25,12 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
struct tsg_gk20a; struct nvgpu_tsg;
u32 gv11b_runlist_entry_size(struct gk20a *g); u32 gv11b_runlist_entry_size(struct gk20a *g);
void gv11b_runlist_get_tsg_entry(struct tsg_gk20a *tsg, void gv11b_runlist_get_tsg_entry(struct nvgpu_tsg *tsg,
u32 *runlist, u32 timeslice); u32 *runlist, u32 timeslice);
void gv11b_runlist_get_ch_entry(struct channel_gk20a *ch, u32 *runlist); void gv11b_runlist_get_ch_entry(struct nvgpu_channel *ch, u32 *runlist);
#endif /* NVGPU_RUNLIST_RAM_GV11B_H */ #endif /* NVGPU_RUNLIST_RAM_GV11B_H */

View File

@@ -27,10 +27,10 @@
#include "hal/fifo/tsg_gk20a.h" #include "hal/fifo/tsg_gk20a.h"
void gk20a_tsg_enable(struct tsg_gk20a *tsg) void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
nvgpu_tsg_disable_sched(g, tsg); nvgpu_tsg_disable_sched(g, tsg);

View File

@@ -23,8 +23,8 @@
#ifndef NVGPU_TSG_GK20A_H #ifndef NVGPU_TSG_GK20A_H
#define NVGPU_TSG_GK20A_H #define NVGPU_TSG_GK20A_H
struct tsg_gk20a; struct nvgpu_tsg;
void gk20a_tsg_enable(struct tsg_gk20a *tsg); void gk20a_tsg_enable(struct nvgpu_tsg *tsg);
#endif /* NVGPU_TSG_GK20A_H */ #endif /* NVGPU_TSG_GK20A_H */

View File

@@ -35,11 +35,11 @@
#define ASYNC_CE_RUNQUE 2U /* pbdma 2 */ #define ASYNC_CE_RUNQUE 2U /* pbdma 2 */
/* TSG enable sequence applicable for Volta and onwards */ /* TSG enable sequence applicable for Volta and onwards */
void gv11b_tsg_enable(struct tsg_gk20a *tsg) void gv11b_tsg_enable(struct nvgpu_tsg *tsg)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct channel_gk20a *last_ch = NULL; struct nvgpu_channel *last_ch = NULL;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
@@ -53,8 +53,8 @@ void gv11b_tsg_enable(struct tsg_gk20a *tsg)
} }
} }
void gv11b_tsg_unbind_channel_check_eng_faulted(struct tsg_gk20a *tsg, void gv11b_tsg_unbind_channel_check_eng_faulted(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *hw_state) struct nvgpu_channel_hw_state *hw_state)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -81,8 +81,8 @@ void gv11b_tsg_unbind_channel_check_eng_faulted(struct tsg_gk20a *tsg,
} }
} }
void gv11b_tsg_bind_channel_eng_method_buffers(struct tsg_gk20a *tsg, void gv11b_tsg_bind_channel_eng_method_buffers(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch) struct nvgpu_channel *ch)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
u64 gpu_va; u64 gpu_va;
@@ -114,7 +114,7 @@ static u32 gv11b_tsg_get_eng_method_buffer_size(struct gk20a *g)
return buffer_size; return buffer_size;
} }
void gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct tsg_gk20a *tsg) void gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
{ {
struct vm_gk20a *vm = g->mm.bar2.vm; struct vm_gk20a *vm = g->mm.bar2.vm;
int err = 0; int err = 0;
@@ -158,7 +158,7 @@ void gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct tsg_gk20a *tsg)
} }
void gv11b_tsg_deinit_eng_method_buffers(struct gk20a *g, void gv11b_tsg_deinit_eng_method_buffers(struct gk20a *g,
struct tsg_gk20a *tsg) struct nvgpu_tsg *tsg)
{ {
struct vm_gk20a *vm = g->mm.bar2.vm; struct vm_gk20a *vm = g->mm.bar2.vm;
unsigned int runque; unsigned int runque;

View File

@@ -23,19 +23,19 @@
#ifndef NVGPU_TSG_GV11B_H #ifndef NVGPU_TSG_GV11B_H
#define NVGPU_TSG_GV11B_H #define NVGPU_TSG_GV11B_H
struct tsg_gk20a; struct nvgpu_tsg;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_channel_hw_state; struct nvgpu_channel_hw_state;
void gv11b_tsg_enable(struct tsg_gk20a *tsg); void gv11b_tsg_enable(struct nvgpu_tsg *tsg);
void gv11b_tsg_unbind_channel_check_eng_faulted(struct tsg_gk20a *tsg, void gv11b_tsg_unbind_channel_check_eng_faulted(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
struct nvgpu_channel_hw_state *hw_state); struct nvgpu_channel_hw_state *hw_state);
void gv11b_tsg_init_eng_method_buffers(struct gk20a *g, void gv11b_tsg_init_eng_method_buffers(struct gk20a *g,
struct tsg_gk20a *tsg); struct nvgpu_tsg *tsg);
void gv11b_tsg_deinit_eng_method_buffers(struct gk20a *g, void gv11b_tsg_deinit_eng_method_buffers(struct gk20a *g,
struct tsg_gk20a *tsg); struct nvgpu_tsg *tsg);
void gv11b_tsg_bind_channel_eng_method_buffers(struct tsg_gk20a *tsg, void gv11b_tsg_bind_channel_eng_method_buffers(struct nvgpu_tsg *tsg,
struct channel_gk20a *ch); struct nvgpu_channel *ch);
#endif /* NVGPU_TSG_GV11B_H */ #endif /* NVGPU_TSG_GV11B_H */

View File

@@ -32,7 +32,7 @@
#include <nvgpu/hw/gk20a/hw_ram_gk20a.h> #include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
void gk20a_userd_init_mem(struct gk20a *g, struct channel_gk20a *c) void gk20a_userd_init_mem(struct gk20a *g, struct nvgpu_channel *c)
{ {
struct nvgpu_mem *mem = c->userd_mem; struct nvgpu_mem *mem = c->userd_mem;
u32 offset = c->userd_offset / U32(sizeof(u32)); u32 offset = c->userd_offset / U32(sizeof(u32));
@@ -50,7 +50,7 @@ void gk20a_userd_init_mem(struct gk20a *g, struct channel_gk20a *c)
nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0); nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0);
} }
u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c)
{ {
u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c); u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c);
u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w(); u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_get_w();
@@ -60,7 +60,7 @@ u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
return nvgpu_bar1_readl(g, (u32)addr); return nvgpu_bar1_readl(g, (u32)addr);
} }
u64 gk20a_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c)
{ {
u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c); u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c);
u64 lo_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_w(); u64 lo_addr = userd_gpu_va + sizeof(u32) * ram_userd_get_w();
@@ -74,7 +74,7 @@ u64 gk20a_userd_pb_get(struct gk20a *g, struct channel_gk20a *c)
return ((u64)hi << 32) | lo; return ((u64)hi << 32) | lo;
} }
void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) void gk20a_userd_gp_put(struct gk20a *g, struct nvgpu_channel *c)
{ {
u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c); u64 userd_gpu_va = gk20a_channel_userd_gpu_va(c);
u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w(); u64 addr = userd_gpu_va + sizeof(u32) * ram_userd_gp_put_w();

View File

@@ -24,12 +24,12 @@
#define USERD_GK20A_H #define USERD_GK20A_H
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
void gk20a_userd_init_mem(struct gk20a *g, struct channel_gk20a *c); void gk20a_userd_init_mem(struct gk20a *g, struct nvgpu_channel *c);
u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); u32 gk20a_userd_gp_get(struct gk20a *g, struct nvgpu_channel *c);
u64 gk20a_userd_pb_get(struct gk20a *g, struct channel_gk20a *c); u64 gk20a_userd_pb_get(struct gk20a *g, struct nvgpu_channel *c);
void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); void gk20a_userd_gp_put(struct gk20a *g, struct nvgpu_channel *c);
u32 gk20a_userd_entry_size(struct gk20a *g); u32 gk20a_userd_entry_size(struct gk20a *g);
#endif /* USERD_GK20A_H */ #endif /* USERD_GK20A_H */

View File

@@ -31,7 +31,7 @@
#include "userd_gv11b.h" #include "userd_gv11b.h"
u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *ch) u32 gv11b_userd_gp_get(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct nvgpu_mem *mem = ch->userd_mem; struct nvgpu_mem *mem = ch->userd_mem;
u32 offset = ch->userd_offset / U32(sizeof(u32)); u32 offset = ch->userd_offset / U32(sizeof(u32));
@@ -39,7 +39,7 @@ u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *ch)
return nvgpu_mem_rd32(g, mem, offset + ram_userd_gp_get_w()); return nvgpu_mem_rd32(g, mem, offset + ram_userd_gp_get_w());
} }
u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *ch) u64 gv11b_userd_pb_get(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct nvgpu_mem *mem = ch->userd_mem; struct nvgpu_mem *mem = ch->userd_mem;
u32 offset = ch->userd_offset / U32(sizeof(u32)); u32 offset = ch->userd_offset / U32(sizeof(u32));
@@ -51,7 +51,7 @@ u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *ch)
return ((u64)hi << 32) | lo; return ((u64)hi << 32) | lo;
} }
void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *ch) void gv11b_userd_gp_put(struct gk20a *g, struct nvgpu_channel *ch)
{ {
struct nvgpu_mem *mem = ch->userd_mem; struct nvgpu_mem *mem = ch->userd_mem;
u32 offset = ch->userd_offset / U32(sizeof(u32)); u32 offset = ch->userd_offset / U32(sizeof(u32));

View File

@@ -24,10 +24,10 @@
#define USERD_GV11B_H #define USERD_GV11B_H
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *ch); u32 gv11b_userd_gp_get(struct gk20a *g, struct nvgpu_channel *ch);
u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *ch); u64 gv11b_userd_pb_get(struct gk20a *g, struct nvgpu_channel *ch);
void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *ch); void gv11b_userd_gp_put(struct gk20a *g, struct nvgpu_channel *ch);
#endif /* USERD_GV11B_H */ #endif /* USERD_GV11B_H */

View File

@@ -40,7 +40,7 @@ u64 gv11b_usermode_bus_base(struct gk20a *g)
return usermode_cfg0_r(); return usermode_cfg0_r();
} }
u32 gv11b_usermode_doorbell_token(struct channel_gk20a *ch) u32 gv11b_usermode_doorbell_token(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -49,7 +49,7 @@ u32 gv11b_usermode_doorbell_token(struct channel_gk20a *ch)
return usermode_notify_channel_pending_id_f(hw_chid); return usermode_notify_channel_pending_id_f(hw_chid);
} }
void gv11b_usermode_ring_doorbell(struct channel_gk20a *ch) void gv11b_usermode_ring_doorbell(struct nvgpu_channel *ch)
{ {
nvgpu_log_info(ch->g, "channel ring door bell %d", ch->chid); nvgpu_log_info(ch->g, "channel ring door bell %d", ch->chid);

View File

@@ -25,11 +25,11 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct channel_gk20a; struct nvgpu_channel;
u64 gv11b_usermode_base(struct gk20a *g); u64 gv11b_usermode_base(struct gk20a *g);
u64 gv11b_usermode_bus_base(struct gk20a *g); u64 gv11b_usermode_bus_base(struct gk20a *g);
u32 gv11b_usermode_doorbell_token(struct channel_gk20a *ch); u32 gv11b_usermode_doorbell_token(struct nvgpu_channel *ch);
void gv11b_usermode_ring_doorbell(struct channel_gk20a *ch); void gv11b_usermode_ring_doorbell(struct nvgpu_channel *ch);
#endif /* NVGPU_USERMODE_GV11B_H */ #endif /* NVGPU_USERMODE_GV11B_H */

View File

@@ -53,7 +53,7 @@ void tu104_usermode_setup_hw(struct gk20a *g)
nvgpu_writel(g, ctrl_virtual_channel_cfg_r(0), val); nvgpu_writel(g, ctrl_virtual_channel_cfg_r(0), val);
} }
u32 tu104_usermode_doorbell_token(struct channel_gk20a *ch) u32 tu104_usermode_doorbell_token(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -63,7 +63,7 @@ u32 tu104_usermode_doorbell_token(struct channel_gk20a *ch)
ctrl_doorbell_runlist_id_f(ch->runlist_id); ctrl_doorbell_runlist_id_f(ch->runlist_id);
} }
void tu104_usermode_ring_doorbell(struct channel_gk20a *ch) void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch)
{ {
nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d", nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d",
ch->chid, ch->runlist_id); ch->chid, ch->runlist_id);

View File

@@ -26,12 +26,12 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
u64 tu104_usermode_base(struct gk20a *g); u64 tu104_usermode_base(struct gk20a *g);
u64 tu104_usermode_bus_base(struct gk20a *g); u64 tu104_usermode_bus_base(struct gk20a *g);
void tu104_usermode_setup_hw(struct gk20a *g); void tu104_usermode_setup_hw(struct gk20a *g);
u32 tu104_usermode_doorbell_token(struct channel_gk20a *ch); u32 tu104_usermode_doorbell_token(struct nvgpu_channel *ch);
void tu104_usermode_ring_doorbell(struct channel_gk20a *ch); void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch);
#endif /* NVGPU_USERMODE_TU104_H */ #endif /* NVGPU_USERMODE_TU104_H */

View File

@@ -53,10 +53,10 @@
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> #include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *c, struct nvgpu_channel *c,
bool enable_smpc_ctxsw) bool enable_smpc_ctxsw)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
int ret; int ret;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -86,11 +86,11 @@ out:
} }
int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *c, struct nvgpu_channel *c,
u64 gpu_va, u64 gpu_va,
u32 mode) u32 mode)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
bool skip_update = false; bool skip_update = false;
int ret; int ret;
@@ -146,7 +146,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
} }
if (c->subctx != NULL) { if (c->subctx != NULL) {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
@@ -647,7 +647,7 @@ void gk20a_gr_init_ovr_sm_dsm_perf(void)
* write will actually occur. so later we should put a lazy, * write will actually occur. so later we should put a lazy,
* map-and-hold system in the patch write state */ * map-and-hold system in the patch write state */
static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
struct channel_gk20a *ch, struct nvgpu_channel *ch,
u32 addr, u32 data, u32 addr, u32 data,
struct nvgpu_gr_ctx *gr_ctx) struct nvgpu_gr_ctx *gr_ctx)
{ {
@@ -1329,14 +1329,14 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) bool gk20a_is_channel_ctx_resident(struct nvgpu_channel *ch)
{ {
u32 curr_gr_ctx; u32 curr_gr_ctx;
u32 curr_gr_tsgid; u32 curr_gr_tsgid;
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct channel_gk20a *curr_ch; struct nvgpu_channel *curr_ch;
bool ret = false; bool ret = false;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
curr_gr_ctx = g->ops.gr.falcon.get_current_ctx(g); curr_gr_ctx = g->ops.gr.falcon.get_current_ctx(g);
@@ -1377,13 +1377,13 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
return ret; return ret;
} }
static int gr_exec_ctx_ops(struct channel_gk20a *ch, static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops,
u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, u32 num_ctx_wr_ops, u32 num_ctx_rd_ops,
bool ch_is_curr_ctx) bool ch_is_curr_ctx)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
bool gr_ctx_ready = false; bool gr_ctx_ready = false;
bool pm_ctx_ready = false; bool pm_ctx_ready = false;
@@ -1629,7 +1629,7 @@ static int gr_exec_ctx_ops(struct channel_gk20a *ch,
return err; return err;
} }
int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, int gr_gk20a_exec_ctx_ops(struct nvgpu_channel *ch,
struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops,
u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, u32 num_ctx_wr_ops, u32 num_ctx_rd_ops,
bool *is_curr_ctx) bool *is_curr_ctx)
@@ -1895,7 +1895,7 @@ void gk20a_gr_resume_all_sms(struct gk20a *g)
} }
int gr_gk20a_set_sm_debug_mode(struct gk20a *g, int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable) struct nvgpu_channel *ch, u64 sms, bool enable)
{ {
struct nvgpu_dbg_reg_op *ops; struct nvgpu_dbg_reg_op *ops;
unsigned int i = 0, sm_id; unsigned int i = 0, sm_id;
@@ -1963,7 +1963,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
* Returns bool value indicating if context was resident * Returns bool value indicating if context was resident
* or not * or not
*/ */
bool gr_gk20a_suspend_context(struct channel_gk20a *ch) bool gr_gk20a_suspend_context(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
bool ctx_resident = false; bool ctx_resident = false;
@@ -1978,7 +1978,7 @@ bool gr_gk20a_suspend_context(struct channel_gk20a *ch)
return ctx_resident; return ctx_resident;
} }
bool gr_gk20a_resume_context(struct channel_gk20a *ch) bool gr_gk20a_resume_context(struct nvgpu_channel *ch)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
bool ctx_resident = false; bool ctx_resident = false;
@@ -1999,7 +1999,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
{ {
int local_ctx_resident_ch_fd = -1; int local_ctx_resident_ch_fd = -1;
bool ctx_resident; bool ctx_resident;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct dbg_session_channel_data *ch_data; struct dbg_session_channel_data *ch_data;
int err = 0; int err = 0;
@@ -2044,7 +2044,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
{ {
int local_ctx_resident_ch_fd = -1; int local_ctx_resident_ch_fd = -1;
bool ctx_resident; bool ctx_resident;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
int err = 0; int err = 0;
struct dbg_session_channel_data *ch_data; struct dbg_session_channel_data *ch_data;

View File

@@ -27,7 +27,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_warpstate; struct nvgpu_warpstate;
struct dbg_session_gk20a; struct dbg_session_gk20a;
struct nvgpu_dbg_reg_op; struct nvgpu_dbg_reg_op;
@@ -37,7 +37,7 @@ enum ctxsw_addr_type;
/* sm */ /* sm */
bool gk20a_gr_sm_debugger_attached(struct gk20a *g); bool gk20a_gr_sm_debugger_attached(struct gk20a *g);
u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g); u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, int gr_gk20a_exec_ctx_ops(struct nvgpu_channel *ch,
struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops, struct nvgpu_dbg_reg_op *ctx_ops, u32 num_ops,
u32 num_ctx_wr_ops, u32 num_ctx_rd_ops, u32 num_ctx_wr_ops, u32 num_ctx_rd_ops,
bool *is_curr_ctx); bool *is_curr_ctx);
@@ -50,10 +50,10 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
u32 *offsets, u32 *offset_addrs, u32 *offsets, u32 *offset_addrs,
u32 *num_offsets); u32 *num_offsets);
int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *c, struct nvgpu_channel *c,
bool enable_smpc_ctxsw); bool enable_smpc_ctxsw);
int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *c, struct nvgpu_channel *c,
u64 gpu_va, u32 mode); u64 gpu_va, u32 mode);
void gk20a_gr_resume_single_sm(struct gk20a *g, void gk20a_gr_resume_single_sm(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm); u32 gpc, u32 tpc, u32 sm);
@@ -64,8 +64,8 @@ void gk20a_gr_suspend_single_sm(struct gk20a *g,
void gk20a_gr_suspend_all_sms(struct gk20a *g, void gk20a_gr_suspend_all_sms(struct gk20a *g,
u32 global_esr_mask, bool check_errors); u32 global_esr_mask, bool check_errors);
int gr_gk20a_set_sm_debug_mode(struct gk20a *g, int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable); struct nvgpu_channel *ch, u64 sms, bool enable);
bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch); bool gk20a_is_channel_ctx_resident(struct nvgpu_channel *ch);
int gk20a_gr_lock_down_sm(struct gk20a *g, int gk20a_gr_lock_down_sm(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask, u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask,
bool check_errors); bool check_errors);
@@ -73,8 +73,8 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
u32 global_esr_mask, bool check_errors); u32 global_esr_mask, bool check_errors);
u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm); u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
u32 gk20a_gr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm); u32 gk20a_gr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
bool gr_gk20a_suspend_context(struct channel_gk20a *ch); bool gr_gk20a_suspend_context(struct nvgpu_channel *ch);
bool gr_gk20a_resume_context(struct channel_gk20a *ch); bool gr_gk20a_resume_context(struct nvgpu_channel *ch);
int gr_gk20a_suspend_contexts(struct gk20a *g, int gr_gk20a_suspend_contexts(struct gk20a *g,
struct dbg_session_gk20a *dbg_s, struct dbg_session_gk20a *dbg_s,
int *ctx_resident_ch_fd); int *ctx_resident_ch_fd);

View File

@@ -437,10 +437,10 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
return 0; return 0;
} }
int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, int gr_gm20b_update_pc_sampling(struct nvgpu_channel *c,
bool enable) bool enable)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct nvgpu_mem *mem; struct nvgpu_mem *mem;
@@ -574,7 +574,7 @@ static void gm20b_gr_read_sm_error_state(struct gk20a *g,
} }
int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
struct channel_gk20a *fault_ch) struct nvgpu_channel *fault_ch)
{ {
int sm_id; int sm_id;
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
@@ -582,7 +582,7 @@ int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
GPU_LIT_TPC_IN_GPC_STRIDE); GPU_LIT_TPC_IN_GPC_STRIDE);
u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
struct nvgpu_tsg_sm_error_state *sm_error_states = NULL; struct nvgpu_tsg_sm_error_state *sm_error_states = NULL;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -608,11 +608,11 @@ record_fail:
} }
int gm20b_gr_clear_sm_error_state(struct gk20a *g, int gm20b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id) struct nvgpu_channel *ch, u32 sm_id)
{ {
u32 gpc, tpc, offset; u32 gpc, tpc, offset;
u32 val; u32 val;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
GPU_LIT_TPC_IN_GPC_STRIDE); GPU_LIT_TPC_IN_GPC_STRIDE);

View File

@@ -51,14 +51,14 @@ bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr);
u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr); u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr);
int gr_gm20b_dump_gr_status_regs(struct gk20a *g, int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
struct gk20a_debug_output *o); struct gk20a_debug_output *o);
int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, int gr_gm20b_update_pc_sampling(struct nvgpu_channel *c,
bool enable); bool enable);
void gr_gm20b_init_cyclestats(struct gk20a *g); void gr_gm20b_init_cyclestats(struct gk20a *g);
void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state); void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc,
u32 tpc, u32 sm, struct channel_gk20a *fault_ch); u32 tpc, u32 sm, struct nvgpu_channel *fault_ch);
int gm20b_gr_clear_sm_error_state(struct gk20a *g, int gm20b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id); struct nvgpu_channel *ch, u32 sm_id);
void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
u32 global_esr); u32 global_esr);
u32 gr_gm20b_get_pmm_per_chiplet_offset(void); u32 gr_gm20b_get_pmm_per_chiplet_offset(void);

View File

@@ -380,10 +380,10 @@ void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
} }
} }
static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a *fault_ch) static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel *fault_ch)
{ {
int ret = 0; int ret = 0;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(fault_ch); tsg = tsg_gk20a_from_ch(fault_ch);
if (tsg == NULL) { if (tsg == NULL) {
@@ -420,10 +420,10 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
} }
int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
struct channel_gk20a *fault_ch) struct nvgpu_channel *fault_ch)
{ {
int ret; int ret;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
@@ -483,12 +483,12 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
*/ */
int gr_gp10b_pre_process_sm_exception(struct gk20a *g, int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct channel_gk20a *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,
bool *early_exit, bool *ignore_debugger) bool *early_exit, bool *ignore_debugger)
{ {
#ifdef NVGPU_DEBUGGER #ifdef NVGPU_DEBUGGER
bool cilp_enabled = false; bool cilp_enabled = false;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
*early_exit = false; *early_exit = false;
*ignore_debugger = false; *ignore_debugger = false;
@@ -624,11 +624,11 @@ u32 get_ecc_override_val(struct gk20a *g)
return 0; return 0;
} }
bool gr_gp10b_suspend_context(struct channel_gk20a *ch, bool gr_gp10b_suspend_context(struct nvgpu_channel *ch,
bool *cilp_preempt_pending) bool *cilp_preempt_pending)
{ {
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
bool ctx_resident = false; bool ctx_resident = false;
int err = 0; int err = 0;
@@ -671,8 +671,8 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
{ {
u32 delay = POLL_DELAY_MIN_US; u32 delay = POLL_DELAY_MIN_US;
bool cilp_preempt_pending = false; bool cilp_preempt_pending = false;
struct channel_gk20a *cilp_preempt_pending_ch = NULL; struct nvgpu_channel *cilp_preempt_pending_ch = NULL;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct dbg_session_channel_data *ch_data; struct dbg_session_channel_data *ch_data;
int err = 0; int err = 0;
int local_ctx_resident_ch_fd = -1; int local_ctx_resident_ch_fd = -1;
@@ -714,7 +714,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
if (cilp_preempt_pending_ch != NULL) { if (cilp_preempt_pending_ch != NULL) {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
@@ -753,10 +753,10 @@ clean_up:
return err; return err;
} }
int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch, int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
bool boost) bool boost)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct nvgpu_mem *mem; struct nvgpu_mem *mem;

View File

@@ -28,7 +28,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_gr_ctx; struct nvgpu_gr_ctx;
struct dbg_session_gk20a; struct dbg_session_gk20a;
struct gk20a_debug_output; struct gk20a_debug_output;
@@ -37,7 +37,7 @@ struct gk20a_debug_output;
#define NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_MAXVAL 0x1U #define NVC097_BES_CROP_DEBUG4_CLAMP_FP_BLEND_TO_MAXVAL 0x1U
int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
struct channel_gk20a *fault_ch); struct nvgpu_channel *fault_ch);
void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data); void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data);
void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data); void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data);
void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data); void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
@@ -47,7 +47,7 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
int gr_gp10b_pre_process_sm_exception(struct gk20a *g, int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct channel_gk20a *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,
bool *early_exit, bool *ignore_debugger); bool *early_exit, bool *ignore_debugger);
u32 gp10b_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gp10b_gr_get_sm_hww_warp_esr(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm); u32 gpc, u32 tpc, u32 sm);
@@ -55,9 +55,9 @@ u32 get_ecc_override_val(struct gk20a *g);
int gr_gp10b_suspend_contexts(struct gk20a *g, int gr_gp10b_suspend_contexts(struct gk20a *g,
struct dbg_session_gk20a *dbg_s, struct dbg_session_gk20a *dbg_s,
int *ctx_resident_ch_fd); int *ctx_resident_ch_fd);
int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch, int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
bool boost); bool boost);
int gp10b_gr_fuse_override(struct gk20a *g); int gp10b_gr_fuse_override(struct gk20a *g);
bool gr_gp10b_suspend_context(struct channel_gk20a *ch, bool gr_gp10b_suspend_context(struct nvgpu_channel *ch,
bool *cilp_preempt_pending); bool *cilp_preempt_pending);
#endif /* NVGPU_GR_GP10B_H */ #endif /* NVGPU_GR_GP10B_H */

View File

@@ -92,7 +92,7 @@ u32 gv11b_gr_sm_offset(struct gk20a *g, u32 sm)
} }
static void gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, static void gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
@@ -214,7 +214,7 @@ static void gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
} }
static void gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, static void gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
@@ -312,7 +312,7 @@ static void gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
} }
static void gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, static void gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
@@ -402,7 +402,7 @@ static void gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
} }
static void gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, static void gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
@@ -487,7 +487,7 @@ static void gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
} }
static void gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, static void gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
@@ -620,7 +620,7 @@ static void gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
void gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g, void gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
/* Check for L1 tag ECC errors. */ /* Check for L1 tag ECC errors. */
@@ -1019,7 +1019,7 @@ void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g, static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 gpc, u32 tpc, u32 sm,
u32 warp_esr_error, u32 warp_esr_error,
struct channel_gk20a *fault_ch) struct nvgpu_channel *fault_ch)
{ {
u32 offset; u32 offset;
int err = 0; int err = 0;
@@ -1125,9 +1125,9 @@ static bool gr_gv11b_check_warp_esr_error(struct gk20a *g, u32 warp_esr_error)
static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g, static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 gpc, u32 tpc, u32 sm,
u32 warp_esr_error, u32 warp_esr_error,
struct channel_gk20a *fault_ch) struct nvgpu_channel *fault_ch)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
u32 offset = 0U; u32 offset = 0U;
bool is_esr_error = false; bool is_esr_error = false;
@@ -1186,14 +1186,14 @@ clear_intr:
*/ */
int gr_gv11b_pre_process_sm_exception(struct gk20a *g, int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct channel_gk20a *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,
bool *early_exit, bool *ignore_debugger) bool *early_exit, bool *ignore_debugger)
{ {
#ifdef NVGPU_DEBUGGER #ifdef NVGPU_DEBUGGER
int ret; int ret;
bool cilp_enabled = false; bool cilp_enabled = false;
u32 warp_esr_error = gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(warp_esr); u32 warp_esr_error = gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(warp_esr);
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
*early_exit = false; *early_exit = false;
*ignore_debugger = false; *ignore_debugger = false;
@@ -1440,7 +1440,7 @@ void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
} }
int gv11b_gr_set_sm_debug_mode(struct gk20a *g, int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable) struct nvgpu_channel *ch, u64 sms, bool enable)
{ {
struct nvgpu_dbg_reg_op *ops; struct nvgpu_dbg_reg_op *ops;
unsigned int i = 0, sm_id; unsigned int i = 0, sm_id;
@@ -1545,14 +1545,14 @@ u64 gv11b_gr_get_sm_hww_warp_esr_pc(struct gk20a *g, u32 offset)
} }
int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
struct channel_gk20a *fault_ch) struct nvgpu_channel *fault_ch)
{ {
int ret = 0; int ret = 0;
u32 sm_id; u32 sm_id;
u32 offset, sm_per_tpc, tpc_id; u32 offset, sm_per_tpc, tpc_id;
u32 gpc_offset, gpc_tpc_offset; u32 gpc_offset, gpc_tpc_offset;
struct nvgpu_tsg_sm_error_state *sm_error_states = NULL; struct nvgpu_tsg_sm_error_state *sm_error_states = NULL;
struct tsg_gk20a *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -2857,11 +2857,11 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
} }
int gv11b_gr_clear_sm_error_state(struct gk20a *g, int gv11b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id) struct nvgpu_channel *ch, u32 sm_id)
{ {
u32 gpc, tpc, sm, offset; u32 gpc, tpc, sm, offset;
u32 val; u32 val;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
int err = 0; int err = 0;

View File

@@ -32,7 +32,7 @@ struct gk20a_debug_output;
u32 gr_gv11b_ctxsw_checksum_mismatch_mailbox_val(void); u32 gr_gv11b_ctxsw_checksum_mismatch_mailbox_val(void);
void gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g, void gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 gpc, u32 tpc,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr); u32 *hww_global_esr);
void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data); void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data); void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data);
@@ -41,19 +41,19 @@ int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
int gr_gv11b_pre_process_sm_exception(struct gk20a *g, int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
bool sm_debugger_attached, struct channel_gk20a *fault_ch, bool sm_debugger_attached, struct nvgpu_channel *fault_ch,
bool *early_exit, bool *ignore_debugger); bool *early_exit, bool *ignore_debugger);
void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
u32 *esr_sm_sel); u32 *esr_sm_sel);
int gv11b_gr_sm_trigger_suspend(struct gk20a *g); int gv11b_gr_sm_trigger_suspend(struct gk20a *g);
void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state); void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
int gv11b_gr_set_sm_debug_mode(struct gk20a *g, int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable); struct nvgpu_channel *ch, u64 sms, bool enable);
u64 gv11b_gr_get_sm_hww_warp_esr_pc(struct gk20a *g, u32 offset); u64 gv11b_gr_get_sm_hww_warp_esr_pc(struct gk20a *g, u32 offset);
int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
struct channel_gk20a *fault_ch); struct nvgpu_channel *fault_ch);
int gv11b_gr_clear_sm_error_state(struct gk20a *g, int gv11b_gr_clear_sm_error_state(struct gk20a *g,
struct channel_gk20a *ch, u32 sm_id); struct nvgpu_channel *ch, u32 sm_id);
void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g); void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g);
bool gv11b_gr_sm_debugger_attached(struct gk20a *g); bool gv11b_gr_sm_debugger_attached(struct gk20a *g);
void gv11b_gr_suspend_single_sm(struct gk20a *g, void gv11b_gr_suspend_single_sm(struct gk20a *g,

View File

@@ -38,9 +38,9 @@
#include <nvgpu/hw/gp10b/hw_gr_gp10b.h> #include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
static int gp10b_gr_intr_clear_cilp_preempt_pending(struct gk20a *g, static int gp10b_gr_intr_clear_cilp_preempt_pending(struct gk20a *g,
struct channel_gk20a *fault_ch) struct nvgpu_channel *fault_ch)
{ {
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
@@ -71,8 +71,8 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
u32 *__chid) u32 *__chid)
{ {
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct channel_gk20a *ch; struct nvgpu_channel *ch;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
u32 chid; u32 chid;
int ret = -EINVAL; int ret = -EINVAL;
@@ -105,13 +105,13 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
} }
int gp10b_gr_intr_handle_fecs_error(struct gk20a *g, int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
struct channel_gk20a *__ch, struct nvgpu_channel *__ch,
struct nvgpu_gr_isr_data *isr_data) struct nvgpu_gr_isr_data *isr_data)
{ {
struct channel_gk20a *ch; struct nvgpu_channel *ch;
u32 chid = NVGPU_INVALID_CHANNEL_ID; u32 chid = NVGPU_INVALID_CHANNEL_ID;
int ret = 0; int ret = 0;
struct tsg_gk20a *tsg; struct nvgpu_tsg *tsg;
struct nvgpu_fecs_host_intr_status fecs_host_intr; struct nvgpu_fecs_host_intr_status fecs_host_intr;
u32 gr_fecs_intr = g->ops.gr.falcon.fecs_host_intr_status(g, u32 gr_fecs_intr = g->ops.gr.falcon.fecs_host_intr_status(g,
&fecs_host_intr); &fecs_host_intr);
@@ -280,7 +280,7 @@ static void gr_gp10b_sm_lrf_ecc_overcount_war(bool single_err,
int gp10b_gr_intr_handle_sm_exception(struct gk20a *g, int gp10b_gr_intr_handle_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 gpc, u32 tpc, u32 sm,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr) u32 *hww_global_esr)
{ {
int ret = 0; int ret = 0;

View File

@@ -26,7 +26,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
struct gk20a; struct gk20a;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_gr_isr_data; struct nvgpu_gr_isr_data;
#define NVC097_SET_GO_IDLE_TIMEOUT 0x022cU #define NVC097_SET_GO_IDLE_TIMEOUT 0x022cU
@@ -41,7 +41,7 @@ struct nvgpu_gr_isr_data;
#define NVC0C0_SET_RD_COALESCE 0x0228U #define NVC0C0_SET_RD_COALESCE 0x0228U
int gp10b_gr_intr_handle_fecs_error(struct gk20a *g, int gp10b_gr_intr_handle_fecs_error(struct gk20a *g,
struct channel_gk20a *__ch, struct nvgpu_channel *__ch,
struct nvgpu_gr_isr_data *isr_data); struct nvgpu_gr_isr_data *isr_data);
void gp10b_gr_intr_set_coalesce_buffer_size(struct gk20a *g, u32 data); void gp10b_gr_intr_set_coalesce_buffer_size(struct gk20a *g, u32 data);
void gp10b_gr_intr_set_go_idle_timeout(struct gk20a *g, u32 data); void gp10b_gr_intr_set_go_idle_timeout(struct gk20a *g, u32 data);
@@ -50,6 +50,6 @@ int gp10b_gr_intr_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data); u32 class_num, u32 offset, u32 data);
int gp10b_gr_intr_handle_sm_exception(struct gk20a *g, int gp10b_gr_intr_handle_sm_exception(struct gk20a *g,
u32 gpc, u32 tpc, u32 sm, u32 gpc, u32 tpc, u32 sm,
bool *post_event, struct channel_gk20a *fault_ch, bool *post_event, struct nvgpu_channel *fault_ch,
u32 *hww_global_esr); u32 *hww_global_esr);
#endif /* NVGPU_GR_INTR_GP10B_H */ #endif /* NVGPU_GR_INTR_GP10B_H */

View File

@@ -84,7 +84,7 @@ static void gv11b_gr_intr_handle_fecs_ecc_error(struct gk20a *g)
} }
int gv11b_gr_intr_handle_fecs_error(struct gk20a *g, int gv11b_gr_intr_handle_fecs_error(struct gk20a *g,
struct channel_gk20a *__ch, struct nvgpu_channel *__ch,
struct nvgpu_gr_isr_data *isr_data) struct nvgpu_gr_isr_data *isr_data)
{ {
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");

View File

@@ -27,7 +27,7 @@
struct gk20a; struct gk20a;
struct nvgpu_gr_config; struct nvgpu_gr_config;
struct channel_gk20a; struct nvgpu_channel;
struct nvgpu_gr_isr_data; struct nvgpu_gr_isr_data;
#define NVC397_SET_SHADER_EXCEPTIONS 0x1528U #define NVC397_SET_SHADER_EXCEPTIONS 0x1528U
@@ -58,7 +58,7 @@ struct nvgpu_gr_isr_data;
#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE U32(0) #define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE U32(0)
int gv11b_gr_intr_handle_fecs_error(struct gk20a *g, int gv11b_gr_intr_handle_fecs_error(struct gk20a *g,
struct channel_gk20a *__ch, struct nvgpu_channel *__ch,
struct nvgpu_gr_isr_data *isr_data); struct nvgpu_gr_isr_data *isr_data);
void gv11b_gr_intr_set_shader_cut_collector(struct gk20a *g, u32 data); void gv11b_gr_intr_set_shader_cut_collector(struct gk20a *g, u32 data);
void gv11b_gr_intr_set_skedcheck(struct gk20a *g, u32 data); void gv11b_gr_intr_set_skedcheck(struct gk20a *g, u32 data);

View File

@@ -29,7 +29,7 @@ struct gk20a;
struct nvgpu_gr_config; struct nvgpu_gr_config;
struct nvgpu_gr_zcull; struct nvgpu_gr_zcull;
struct nvgpu_gr_zcull_info; struct nvgpu_gr_zcull_info;
struct channel_gk20a; struct nvgpu_channel;
int gm20b_gr_init_zcull_hw(struct gk20a *g, int gm20b_gr_init_zcull_hw(struct gk20a *g,
struct nvgpu_gr_zcull *gr_zcull, struct nvgpu_gr_zcull *gr_zcull,

Some files were not shown because too many files have changed in this diff Show More