gpu: nvgpu: rename tsg_gk20a*/gk20a_tsg* functions.

rename the functions with the prefixes tsg_gk20a*/gk20a_tsg*
to nvgpu_tsg_*

Jira NVGPU-3248

Change-Id: I9f5f601040d994cd7798fe76813cc86c8df126dc
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2120165
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-05-16 14:09:51 +05:30
committed by mobile promotions
parent 1dea88c6c7
commit 4c30bd599f
29 changed files with 75 additions and 75 deletions

View File

@@ -145,7 +145,7 @@ int nvgpu_channel_enable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
g->ops.tsg.enable(tsg);
return 0;
@@ -159,7 +159,7 @@ int nvgpu_channel_disable_tsg(struct gk20a *g, struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
g->ops.tsg.disable(tsg);
return 0;
@@ -213,7 +213,7 @@ bool gk20a_channel_check_unserviceable(struct nvgpu_channel *ch)
void gk20a_channel_abort(struct nvgpu_channel *ch, bool channel_preempt)
{
struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
nvgpu_log_fn(ch->g, " ");
@@ -305,7 +305,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
*/
if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
/* abort channel and remove from runlist */
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
/* Between tsg is not null and unbind_channel call,
* ioctl cannot be called anymore because user doesn't
@@ -2554,7 +2554,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g, bool post_events)
if (post_events) {
struct nvgpu_tsg *tsg =
tsg_gk20a_from_ch(c);
nvgpu_tsg_from_ch(c);
if (tsg != NULL) {
g->ops.tsg.post_event_id(tsg,
NVGPU_EVENT_ID_BLOCKING_SYNC);
@@ -2736,7 +2736,7 @@ int nvgpu_channel_deferred_reset_engines(struct gk20a *g,
goto fail;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
engines = g->ops.engine.get_mask_on_id(g,
tsg->tsgid, true);

View File

@@ -36,7 +36,7 @@ u32 nvgpu_preempt_get_timeout(struct gk20a *g)
int nvgpu_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{
int err;
struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
err = g->ops.fifo.preempt_tsg(ch->g, tsg);

View File

@@ -322,7 +322,7 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
struct nvgpu_tsg *tsg = NULL;
runlist = f->runlist_info[runlist_id];
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
/*

View File

@@ -93,7 +93,7 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
nvgpu_log_fn(g, "bind tsg:%u ch:%u\n", tsg->tsgid, ch->chid);
/* check if channel is already bound to some TSG */
if (tsg_gk20a_from_ch(ch) != NULL) {
if (nvgpu_tsg_from_ch(ch) != NULL) {
return -EINVAL;
}
@@ -296,7 +296,7 @@ int nvgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
{
struct gk20a *g = ch->g;
struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
nvgpu_tsg_set_error_notifier(g, tsg, err_code);
@@ -325,7 +325,7 @@ void nvgpu_tsg_cleanup_sw(struct gk20a *g)
nvgpu_mutex_destroy(&f->tsg_inuse_mutex);
}
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
int nvgpu_tsg_init_support(struct gk20a *g, u32 tsgid)
{
struct nvgpu_tsg *tsg = NULL;
@@ -367,7 +367,7 @@ int nvgpu_tsg_setup_sw(struct gk20a *g)
}
for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
err = gk20a_init_tsg_support(g, tsgid);
err = nvgpu_tsg_init_support(g, tsgid);
if (err != 0) {
nvgpu_err(g, "tsg init failed, tsgid=%u", tsgid);
goto clean_up;
@@ -638,7 +638,7 @@ int nvgpu_tsg_open_common(struct gk20a *g, struct nvgpu_tsg *tsg, pid_t pid)
return -EINVAL;
}
err = gk20a_tsg_alloc_sm_error_states_mem(g, tsg, no_of_sm);
err = nvgpu_tsg_alloc_sm_error_states_mem(g, tsg, no_of_sm);
if (err != 0) {
return err;
}
@@ -758,7 +758,7 @@ void nvgpu_tsg_release(struct nvgpu_ref *ref)
nvgpu_log(g, gpu_dbg_fn, "tsg released %d", tsg->tsgid);
}
struct nvgpu_tsg *tsg_gk20a_from_ch(struct nvgpu_channel *ch)
struct nvgpu_tsg *nvgpu_tsg_from_ch(struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg = NULL;
u32 tsgid = ch->tsgid;
@@ -775,7 +775,7 @@ struct nvgpu_tsg *tsg_gk20a_from_ch(struct nvgpu_channel *ch)
return tsg;
}
int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,
int nvgpu_tsg_alloc_sm_error_states_mem(struct gk20a *g,
struct nvgpu_tsg *tsg,
u32 num_sm)
{
@@ -802,7 +802,7 @@ int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,
return err;
}
void gk20a_tsg_update_sm_error_state_locked(struct nvgpu_tsg *tsg,
void nvgpu_tsg_update_sm_error_state_locked(struct nvgpu_tsg *tsg,
u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state)
{
@@ -822,12 +822,12 @@ void gk20a_tsg_update_sm_error_state_locked(struct nvgpu_tsg *tsg,
sm_error_state->hww_warp_esr_report_mask;
}
int gk20a_tsg_set_sm_exception_type_mask(struct nvgpu_channel *ch,
int nvgpu_tsg_set_sm_exception_type_mask(struct nvgpu_channel *ch,
u32 exception_mask)
{
struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -359,7 +359,7 @@ void nvgpu_gr_intr_set_error_notifier(struct gk20a *g,
return;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
nvgpu_tsg_set_error_notifier(g, tsg, error_notifier);
} else {
@@ -641,7 +641,7 @@ void nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
return;
}
if (tsg_gk20a_from_ch(ch) == NULL) {
if (nvgpu_tsg_from_ch(ch) == NULL) {
return;
}
@@ -667,7 +667,7 @@ void nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
return;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
int err;
@@ -717,7 +717,7 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
if (ch == NULL) {
nvgpu_err(g, "pgraph intr: 0x%08x, chid: INVALID", gr_intr);
} else {
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
nvgpu_err(g, "pgraph intr: 0x%08x, chid: %d "
"not bound to tsg", gr_intr, chid);

View File

@@ -84,7 +84,7 @@ int nvgpu_gr_setup_bind_ctxsw_zcull(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_tsg *tsg;
struct nvgpu_gr_ctx *gr_ctx;
tsg = tsg_gk20a_from_ch(c);
tsg = nvgpu_tsg_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
@@ -121,7 +121,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
}
c->obj_class = class_num;
tsg = tsg_gk20a_from_ch(c);
tsg = nvgpu_tsg_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
@@ -234,7 +234,7 @@ int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
return -EINVAL;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -102,7 +102,7 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f,
return;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
nvgpu_tsg_set_error_notifier(g, tsg, error_notifier);
nvgpu_rc_tsg_and_related_engines(g, tsg, true,

View File

@@ -369,7 +369,7 @@ int vgpu_tsg_force_reset_ch(struct nvgpu_channel *ch,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
@@ -425,7 +425,7 @@ static void vgpu_fifo_set_ctx_mmu_error_ch_tsg(struct gk20a *g,
struct nvgpu_tsg *tsg = NULL;
struct nvgpu_channel *ch_tsg = NULL;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
nvgpu_rwsem_down_read(&tsg->ch_list_lock);

View File

@@ -237,7 +237,7 @@ int vgpu_gr_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, u32 flags)
}
c->obj_class = class_num;
tsg = tsg_gk20a_from_ch(c);
tsg = nvgpu_tsg_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
@@ -876,7 +876,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (!tsg) {
return -EINVAL;
}
@@ -960,7 +960,7 @@ int vgpu_gr_clear_sm_error_state(struct gk20a *g,
struct nvgpu_tsg *tsg;
int err;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (!tsg) {
return -EINVAL;
}
@@ -1422,7 +1422,7 @@ int vgpu_gr_set_preemption_mode(struct nvgpu_channel *ch,
return -EINVAL;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (!tsg) {
return -EINVAL;
}

View File

@@ -111,7 +111,7 @@ bool gk20a_fifo_handle_ctxsw_timeout(struct gk20a *g)
} else {
ch = nvgpu_channel_from_id(g, id);
if (ch != NULL) {
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
nvgpu_channel_put(ch);
}
}

View File

@@ -320,7 +320,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
ch = &g->fifo.channel[id];
refch = nvgpu_channel_get(ch);
if (refch != NULL) {
tsg = tsg_gk20a_from_ch(refch);
tsg = nvgpu_tsg_from_ch(refch);
}
} else {
nvgpu_err(g, "ctx_id_type is not chid/tsgid");
@@ -331,7 +331,7 @@ bool gk20a_fifo_handle_mmu_fault_locked(
mmfault_info.inst_ptr);
refch = ch;
if (refch != NULL) {
tsg = tsg_gk20a_from_ch(refch);
tsg = nvgpu_tsg_from_ch(refch);
}
}

View File

@@ -129,7 +129,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
struct nvgpu_tsg *tsg;
nvgpu_err(g, "preempt channel %d timeout", ch->chid);
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg != NULL) {
nvgpu_rc_preempt_timeout(g, tsg);
} else {

View File

@@ -407,7 +407,7 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg = NULL;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
nvgpu_log_info(g, "chid: %d is not bound to tsg", ch->chid);

View File

@@ -61,7 +61,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
tsg = nvgpu_tsg_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
@@ -114,7 +114,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
tsg = nvgpu_tsg_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
@@ -1413,7 +1413,7 @@ bool gk20a_is_channel_ctx_resident(struct nvgpu_channel *ch)
ret = true;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if ((tsg != NULL) && (tsg->tsgid == curr_gr_tsgid)) {
ret = true;
}
@@ -1447,7 +1447,7 @@ static int gr_exec_ctx_ops(struct nvgpu_channel *ch,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
num_ctx_wr_ops, num_ctx_rd_ops);
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -452,7 +452,7 @@ int gr_gm20b_update_pc_sampling(struct nvgpu_channel *c,
nvgpu_log_fn(c->g, " ");
tsg = tsg_gk20a_from_ch(c);
tsg = nvgpu_tsg_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
@@ -596,7 +596,7 @@ int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
gr_gpc0_tpc0_sm_cfg_r() + offset));
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
}
if (tsg == NULL) {
@@ -624,7 +624,7 @@ int gm20b_gr_clear_sm_error_state(struct gk20a *g,
GPU_LIT_TPC_IN_GPC_STRIDE);
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -385,7 +385,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel
int ret = 0;
struct nvgpu_tsg *tsg;
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
if (tsg == NULL) {
nvgpu_err(g, "CILP: chid: %d is not bound to tsg",
fault_ch->chid);
@@ -427,7 +427,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
if (tsg == NULL) {
return -EINVAL;
}
@@ -493,7 +493,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
*ignore_debugger = false;
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
if (tsg == NULL) {
return -EINVAL;
}
@@ -632,7 +632,7 @@ bool gr_gp10b_suspend_context(struct nvgpu_channel *ch,
bool ctx_resident = false;
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}
@@ -724,7 +724,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
"CILP preempt pending, waiting %u msecs for preemption",
nvgpu_get_poll_timeout(g));
tsg = tsg_gk20a_from_ch(cilp_preempt_pending_ch);
tsg = nvgpu_tsg_from_ch(cilp_preempt_pending_ch);
if (tsg == NULL) {
err = -EINVAL;
goto clean_up;
@@ -764,7 +764,7 @@ int gr_gp10b_set_boosted_ctx(struct nvgpu_channel *ch,
struct nvgpu_mem *mem;
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -1218,7 +1218,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
}
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
if (tsg == NULL) {
return -EINVAL;
}
@@ -1566,7 +1566,7 @@ int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
offset = gpc_tpc_offset + gv11b_gr_sm_offset(g, sm);
if (fault_ch != NULL) {
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
}
if (tsg == NULL) {
@@ -2865,7 +2865,7 @@ int gv11b_gr_clear_sm_error_state(struct gk20a *g,
int err = 0;
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}

View File

@@ -44,7 +44,7 @@ static int gp10b_gr_intr_clear_cilp_preempt_pending(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
tsg = tsg_gk20a_from_ch(fault_ch);
tsg = nvgpu_tsg_from_ch(fault_ch);
if (tsg == NULL) {
return -EINVAL;
}
@@ -85,7 +85,7 @@ static int gp10b_gr_intr_get_cilp_preempt_pending_chid(struct gk20a *g,
return ret;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
nvgpu_channel_put(ch);
return -EINVAL;

View File

@@ -571,7 +571,7 @@ static const struct gpu_ops gm20b_ops = {
.intr_unset_recover_mask = gk20a_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.set_sm_exception_type_mask = nvgpu_tsg_set_sm_exception_type_mask,
.intr_0_enable = gk20a_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gk20a_fifo_intr_0_isr,

View File

@@ -635,7 +635,7 @@ static const struct gpu_ops gp10b_ops = {
.intr_unset_recover_mask = gk20a_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.set_sm_exception_type_mask = nvgpu_tsg_set_sm_exception_type_mask,
.intr_0_enable = gk20a_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gk20a_fifo_intr_0_isr,

View File

@@ -785,7 +785,7 @@ static const struct gpu_ops gv100_ops = {
.intr_unset_recover_mask = gv100_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.set_sm_exception_type_mask = nvgpu_tsg_set_sm_exception_type_mask,
.intr_0_enable = gk20a_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gv11b_fifo_intr_0_isr,

View File

@@ -755,7 +755,7 @@ static const struct gpu_ops gv11b_ops = {
.intr_unset_recover_mask = gv11b_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.set_sm_exception_type_mask = nvgpu_tsg_set_sm_exception_type_mask,
.intr_0_enable = gv11b_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gv11b_fifo_intr_0_isr,

View File

@@ -823,7 +823,7 @@ static const struct gpu_ops tu104_ops = {
.intr_unset_recover_mask = gv11b_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.set_sm_exception_type_mask = nvgpu_tsg_set_sm_exception_type_mask,
.intr_0_enable = gv11b_fifo_intr_0_enable,
.intr_1_enable = gk20a_fifo_intr_1_enable,
.intr_0_isr = gv11b_fifo_intr_0_isr,

View File

@@ -308,7 +308,7 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
err = gv11b_fb_fix_page_fault(g, mmufault);
if (mmufault->refch != NULL) {
tsg = tsg_gk20a_from_ch(mmufault->refch);
tsg = nvgpu_tsg_from_ch(mmufault->refch);
nvgpu_tsg_reset_faulted_eng_pbdma(g, tsg, true, true);
}
if (err == 0) {
@@ -365,7 +365,7 @@ void gv11b_mm_mmu_fault_handle_mmu_fault_common(struct gk20a *g,
mmufault->refch->mmu_nack_handled = true;
}
tsg = tsg_gk20a_from_ch(mmufault->refch);
tsg = nvgpu_tsg_from_ch(mmufault->refch);
if (tsg != NULL) {
id = mmufault->refch->tsgid;
id_type = ID_TYPE_TSG;

View File

@@ -96,11 +96,11 @@ struct nvgpu_tsg *nvgpu_tsg_open(struct gk20a *g, pid_t pid);
void nvgpu_tsg_release_common(struct gk20a *g, struct nvgpu_tsg *tsg);
void nvgpu_tsg_release(struct nvgpu_ref *ref);
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid);
int nvgpu_tsg_init_support(struct gk20a *g, u32 tsgid);
int nvgpu_tsg_setup_sw(struct gk20a *g);
void nvgpu_tsg_cleanup_sw(struct gk20a *g);
struct nvgpu_tsg *tsg_gk20a_from_ch(struct nvgpu_channel *ch);
struct nvgpu_tsg *nvgpu_tsg_from_ch(struct nvgpu_channel *ch);
void nvgpu_tsg_disable(struct nvgpu_tsg *tsg);
int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg,
@@ -125,22 +125,22 @@ void nvgpu_tsg_post_event_id(struct nvgpu_tsg *tsg,
enum nvgpu_event_id_type event_id);
bool nvgpu_tsg_check_ctxsw_timeout(struct nvgpu_tsg *tsg,
bool *debug_dump, u32 *ms);
int gk20a_tsg_set_runlist_interleave(struct nvgpu_tsg *tsg, u32 level);
int nvgpu_tsg_set_runlist_interleave(struct nvgpu_tsg *tsg, u32 level);
int nvgpu_tsg_set_timeslice(struct nvgpu_tsg *tsg, u32 timeslice_us);
u32 nvgpu_tsg_get_timeslice(struct nvgpu_tsg *tsg);
u32 nvgpu_tsg_default_timeslice_us(struct gk20a *g);
void nvgpu_tsg_enable_sched(struct gk20a *g, struct nvgpu_tsg *tsg);
void nvgpu_tsg_disable_sched(struct gk20a *g, struct nvgpu_tsg *tsg);
int nvgpu_tsg_set_interleave(struct nvgpu_tsg *tsg, u32 level);
int gk20a_tsg_set_priority(struct gk20a *g, struct nvgpu_tsg *tsg,
int nvgpu_tsg_set_priority(struct gk20a *g, struct nvgpu_tsg *tsg,
u32 priority);
int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g,
int nvgpu_tsg_alloc_sm_error_states_mem(struct gk20a *g,
struct nvgpu_tsg *tsg,
u32 num_sm);
void gk20a_tsg_update_sm_error_state_locked(struct nvgpu_tsg *tsg,
void nvgpu_tsg_update_sm_error_state_locked(struct nvgpu_tsg *tsg,
u32 sm_id,
struct nvgpu_tsg_sm_error_state *sm_error_state);
int gk20a_tsg_set_sm_exception_type_mask(struct nvgpu_channel *ch,
int nvgpu_tsg_set_sm_exception_type_mask(struct nvgpu_channel *ch,
u32 exception_mask);
struct gk20a_event_id_data {

View File

@@ -87,7 +87,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
return ret;
if (nvgpu_channel_get(ch)) {
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg)
seq_printf(s, "%-8d %-8d %-8d %-9d %-8d %-10d %-8d %-8d\n",

View File

@@ -85,13 +85,13 @@ static void gk20a_channel_trace_sched_param(
const char *compute_preempt_mode),
struct nvgpu_channel *ch)
{
struct nvgpu_tsg *tsg = tsg_gk20a_from_ch(ch);
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
if (!tsg)
return;
(trace)(ch->chid, ch->tsgid, ch->pid,
tsg_gk20a_from_ch(ch)->timeslice_us,
nvgpu_tsg_from_ch(ch)->timeslice_us,
ch->ctxsw_timeout_max_ms,
nvgpu_runlist_interleave_level_name(tsg->interleave_level),
gr_gk20a_graphics_preempt_mode_name(

View File

@@ -284,7 +284,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
return -EINVAL;
}
tsg = tsg_gk20a_from_ch(ch);
tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) {
nvgpu_err(g, "no valid tsg from ch");
return -EINVAL;
@@ -1769,7 +1769,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
nvgpu_err(g,
"per-ctxt reserve: global reservation in effect");
err = -EBUSY;
} else if ((tsg = tsg_gk20a_from_ch(my_prof_obj->ch)) != NULL) {
} else if ((tsg = nvgpu_tsg_from_ch(my_prof_obj->ch)) != NULL) {
/* TSG: check that another channel in the TSG
* doesn't already have the reservation
*/

View File

@@ -135,7 +135,7 @@ static int nvgpu_tsg_unbind_channel_fd(struct nvgpu_tsg *tsg, int ch_fd)
return -EINVAL;
}
if (tsg != tsg_gk20a_from_ch(ch)) {
if (tsg != nvgpu_tsg_from_ch(ch)) {
err = -EINVAL;
goto out;
}