gpu: nvgpu: move runlist HAL ops to separate section

Split out ops that belong to runlist unit to a new section called
runlist. This is effectively just renaming; the implementation still
stays put.

Jira NVGPU-1309

Change-Id: Ib928164f8008f680d9cb13c969e3304ef727abba
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1997823
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2019-01-10 16:12:33 +02:00
committed by mobile promotions
parent f6656dc00f
commit 6fda25e958
15 changed files with 134 additions and 109 deletions

View File

@@ -159,7 +159,8 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
{
return c->g->ops.fifo.update_runlist(c->g, c->runlist_id, c->chid, add, true);
return c->g->ops.runlist.update_runlist(c->g, c->runlist_id,
c->chid, add, true);
}
int gk20a_enable_channel_tsg(struct gk20a *g, struct channel_gk20a *ch)

View File

@@ -45,7 +45,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
/* add TSG entry */
nvgpu_log_info(g, "add TSG %d to runlist", tsg->tsgid);
g->ops.fifo.get_tsg_runlist_entry(tsg, *runlist_entry);
g->ops.runlist.get_tsg_runlist_entry(tsg, *runlist_entry);
nvgpu_log_info(g, "tsg rl entries left %d runlist [0] %x [1] %x",
*entries_left,
(*runlist_entry)[0], (*runlist_entry)[1]);
@@ -69,7 +69,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
nvgpu_log_info(g, "add channel %d to runlist",
ch->chid);
g->ops.fifo.get_ch_runlist_entry(ch, *runlist_entry);
g->ops.runlist.get_ch_runlist_entry(ch, *runlist_entry);
nvgpu_log_info(g, "rl entries left %d runlist [0] %x [1] %x",
*entries_left,
(*runlist_entry)[0], (*runlist_entry)[1]);
@@ -350,10 +350,11 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
runlist->count = 0;
}
g->ops.fifo.runlist_hw_submit(g, runlist_id, runlist->count, new_buf);
g->ops.runlist.runlist_hw_submit(g, runlist_id, runlist->count,
new_buf);
if (wait_for_finish) {
ret = g->ops.fifo.runlist_wait_pending(g, runlist_id);
ret = g->ops.runlist.runlist_wait_pending(g, runlist_id);
if (ret == -ETIMEDOUT) {
nvgpu_err(g, "runlist %d update timeout", runlist_id);
@@ -391,14 +392,14 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
}
g->ops.fifo.runlist_hw_submit(
g->ops.runlist.runlist_hw_submit(
g, ch->runlist_id, runlist->count, runlist->cur_buffer);
if (preempt_next) {
g->ops.fifo.reschedule_preempt_next_locked(ch, wait_preempt);
g->ops.runlist.reschedule_preempt_next_locked(ch, wait_preempt);
}
g->ops.fifo.runlist_wait_pending(g, ch->runlist_id);
g->ops.runlist.runlist_wait_pending(g, ch->runlist_id);
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(
@@ -474,8 +475,8 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
ret = 0;
for_each_set_bit(runlist_id, &ulong_runlist_ids, 32U) {
/* Capture the last failure error code */
errcode = g->ops.fifo.update_runlist(g, (u32)runlist_id, chid,
add, wait_for_finish);
errcode = g->ops.runlist.update_runlist(g, (u32)runlist_id,
chid, add, wait_for_finish);
if (errcode != 0) {
nvgpu_err(g,
"failed to update_runlist %lu %d",
@@ -526,7 +527,7 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
PMU_MUTEX_ID_FIFO, &token);
}
g->ops.fifo.runlist_write_state(g, runlists_mask, runlist_state);
g->ops.runlist.runlist_write_state(g, runlists_mask, runlist_state);
if (mutex_ret == 0) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -579,7 +580,7 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
nvgpu_log_fn(g, " ");
f->max_runlists = g->ops.fifo.eng_runlist_base_size();
f->max_runlists = g->ops.runlist.eng_runlist_base_size();
f->runlist_info = nvgpu_kzalloc(g,
sizeof(struct fifo_runlist_info_gk20a) *
f->max_runlists);

View File

@@ -370,7 +370,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW:
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM:
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH:
ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid,
ret = g->ops.runlist.set_runlist_interleave(g, tsg->tsgid,
0, level);
if (ret == 0) {
tsg->interleave_level = level;
@@ -381,7 +381,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
break;
}
return (ret != 0) ? ret : g->ops.fifo.update_runlist(g,
return (ret != 0) ? ret : g->ops.runlist.update_runlist(g,
tsg->runlist_id,
FIFO_INVAL_CHANNEL_ID,
true,

View File

@@ -643,7 +643,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
g->ops.fifo.init_pbdma_intr_descs(f); /* just filling in data/tables */
f->num_channels = g->ops.fifo.get_num_fifos(g);
f->runlist_entry_size = g->ops.fifo.runlist_entry_size();
f->runlist_entry_size = g->ops.runlist.runlist_entry_size();
f->num_runlist_entries = fifo_eng_runlist_length_max_v();
f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
@@ -2870,7 +2870,7 @@ int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
tsg->timeslice_us = timeslice;
return g->ops.fifo.update_runlist(g,
return g->ops.runlist.update_runlist(g,
tsg->runlist_id,
FIFO_INVAL_CHANNEL_ID,
true,

View File

@@ -495,7 +495,6 @@ static const struct gpu_ops gm20b_ops = {
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
.get_mmu_fault_info = gk20a_fifo_get_mmu_fault_info,
.get_mmu_fault_desc = gk20a_fifo_get_mmu_fault_desc,
@@ -504,15 +503,10 @@ static const struct gpu_ops gm20b_ops = {
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gm20b_fifo_get_num_fifos,
.get_pbdma_signature = gk20a_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = gm20b_fifo_init_engine_info,
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
.get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gk20a_dump_eng_status,
@@ -547,19 +541,27 @@ static const struct gpu_ops gm20b_ops = {
.get_syncpt_incr_cmd_size = gk20a_fifo_get_syncpt_incr_cmd_size,
.get_sync_ro_map = NULL,
#endif
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.get_sema_wait_cmd_size = gk20a_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gk20a_fifo_get_sema_incr_cmd_size,
.add_sema_cmd = gk20a_fifo_add_sema_cmd,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gm20b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.runlist = {
.update_runlist = gk20a_fifo_update_runlist,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
.get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.runlist_write_state = gk20a_fifo_runlist_write_state,
},
.netlist = {
.get_netlist_name = gm20b_netlist_get_name,
.is_fw_defined = gm20b_netlist_is_firmware_defined,
@@ -784,6 +786,7 @@ int gm20b_init_hal(struct gk20a *g)
gops->fb = gm20b_ops.fb;
gops->clock_gating = gm20b_ops.clock_gating;
gops->fifo = gm20b_ops.fifo;
gops->runlist = gm20b_ops.runlist;
gops->netlist = gm20b_ops.netlist;
gops->mm = gm20b_ops.mm;
gops->therm = gm20b_ops.therm;

View File

@@ -1561,7 +1561,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
return ret;
}
ret = g->ops.fifo.update_runlist(g,
ret = g->ops.runlist.update_runlist(g,
fault_ch->runlist_id,
FIFO_INVAL_CHANNEL_ID,
true,

View File

@@ -544,9 +544,6 @@ static const struct gpu_ops gp10b_ops = {
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.reschedule_runlist = gk20a_fifo_reschedule_runlist,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
.get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
@@ -555,15 +552,10 @@ static const struct gpu_ops gp10b_ops = {
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gm20b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = gm20b_fifo_init_engine_info,
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
.get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gk20a_dump_eng_status,
@@ -599,19 +591,29 @@ static const struct gpu_ops gp10b_ops = {
.get_sync_ro_map = NULL,
#endif
.resetup_ramfc = gp10b_fifo_resetup_ramfc,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.get_sema_wait_cmd_size = gk20a_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gk20a_fifo_get_sema_incr_cmd_size,
.add_sema_cmd = gk20a_fifo_add_sema_cmd,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.runlist = {
.reschedule_runlist = gk20a_fifo_reschedule_runlist,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
.update_runlist = gk20a_fifo_update_runlist,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
.get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.runlist_write_state = gk20a_fifo_runlist_write_state,
},
.netlist = {
.get_netlist_name = gp10b_netlist_get_name,
.is_fw_defined = gp10b_netlist_is_firmware_defined,
@@ -865,6 +867,7 @@ int gp10b_init_hal(struct gk20a *g)
gops->fb = gp10b_ops.fb;
gops->clock_gating = gp10b_ops.clock_gating;
gops->fifo = gp10b_ops.fifo;
gops->runlist = gp10b_ops.runlist;
gops->netlist = gp10b_ops.netlist;
#ifdef CONFIG_GK20A_CTXSW_TRACE
gops->fecs_trace = gp10b_ops.fecs_trace;

View File

@@ -712,7 +712,6 @@ static const struct gpu_ops gv100_ops = {
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
@@ -721,15 +720,10 @@ static const struct gpu_ops gv100_ops = {
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv100_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = gm20b_fifo_init_engine_info,
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gv11b_dump_eng_status,
@@ -769,8 +763,6 @@ static const struct gpu_ops gv100_ops = {
#endif
.resetup_ramfc = NULL,
.free_channel_ctx_header = gv11b_free_subctx_header,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
.get_sema_wait_cmd_size = gv11b_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gv11b_fifo_get_sema_incr_cmd_size,
@@ -779,12 +771,22 @@ static const struct gpu_ops gv100_ops = {
.usermode_base = gv11b_fifo_usermode_base,
.doorbell_token = gv11b_fifo_doorbell_token,
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.runlist = {
.update_runlist = gk20a_fifo_update_runlist,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.runlist_write_state = gk20a_fifo_runlist_write_state,
},
.netlist = {
.get_netlist_name = gv100_netlist_get_name,
.is_fw_defined = gv100_netlist_is_firmware_defined,
@@ -1126,6 +1128,7 @@ int gv100_init_hal(struct gk20a *g)
gops->nvdec = gv100_ops.nvdec;
gops->clock_gating = gv100_ops.clock_gating;
gops->fifo = gv100_ops.fifo;
gops->runlist = gv100_ops.runlist;
gops->netlist = gv100_ops.netlist;
gops->mm = gv100_ops.mm;
#ifdef CONFIG_GK20A_CTXSW_TRACE

View File

@@ -665,9 +665,6 @@ static const struct gpu_ops gv11b_ops = {
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
.reschedule_runlist = gv11b_fifo_reschedule_runlist,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
@@ -676,15 +673,10 @@ static const struct gpu_ops gv11b_ops = {
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv11b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = gm20b_fifo_init_engine_info,
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gv11b_dump_eng_status,
@@ -725,8 +717,6 @@ static const struct gpu_ops gv11b_ops = {
.resetup_ramfc = NULL,
.free_channel_ctx_header = gv11b_free_subctx_header,
.handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
.get_sema_wait_cmd_size = gv11b_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gv11b_fifo_get_sema_incr_cmd_size,
@@ -735,12 +725,24 @@ static const struct gpu_ops gv11b_ops = {
.usermode_base = gv11b_fifo_usermode_base,
.doorbell_token = gv11b_fifo_doorbell_token,
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = gk20a_fifo_read_pbdma_data,
.reset_pbdma_header = gk20a_fifo_reset_pbdma_header,
},
.runlist = {
.reschedule_runlist = gv11b_fifo_reschedule_runlist,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next,
.update_runlist = gk20a_fifo_update_runlist,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.runlist_hw_submit = gk20a_fifo_runlist_hw_submit,
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
.runlist_write_state = gk20a_fifo_runlist_write_state,
},
.netlist = {
.get_netlist_name = gv11b_netlist_get_name,
.is_fw_defined = gv11b_netlist_is_firmware_defined,
@@ -1001,6 +1003,7 @@ int gv11b_init_hal(struct gk20a *g)
gops->fb = gv11b_ops.fb;
gops->clock_gating = gv11b_ops.clock_gating;
gops->fifo = gv11b_ops.fifo;
gops->runlist = gv11b_ops.runlist;
gops->netlist = gv11b_ops.netlist;
gops->mm = gv11b_ops.mm;
#ifdef CONFIG_GK20A_CTXSW_TRACE

View File

@@ -754,13 +754,6 @@ struct gpu_ops {
int (*tsg_verify_channel_status)(struct channel_gk20a *ch);
void (*tsg_verify_status_ctx_reload)(struct channel_gk20a *ch);
void (*tsg_verify_status_faulted)(struct channel_gk20a *ch);
int (*reschedule_runlist)(struct channel_gk20a *ch,
bool preempt_next);
int (*reschedule_preempt_next_locked)(struct channel_gk20a *ch,
bool wait_preempt);
int (*update_runlist)(struct gk20a *g, u32 runlist_id,
u32 chid, bool add,
bool wait_for_finish);
void (*trigger_mmu_fault)(struct gk20a *g,
unsigned long engine_ids);
void (*get_mmu_fault_info)(struct gk20a *g, u32 mmu_fault_id,
@@ -774,9 +767,6 @@ struct gpu_ops {
int (*wait_engine_idle)(struct gk20a *g);
u32 (*get_num_fifos)(struct gk20a *g);
u32 (*get_pbdma_signature)(struct gk20a *g);
int (*set_runlist_interleave)(struct gk20a *g, u32 id,
u32 runlist_id,
u32 new_level);
int (*tsg_set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice);
u32 (*default_timeslice_us)(struct gk20a *g);
int (*force_reset_ch)(struct channel_gk20a *ch,
@@ -786,15 +776,9 @@ struct gpu_ops {
int (*tsg_unbind_channel)(struct channel_gk20a *ch);
int (*tsg_open)(struct tsg_gk20a *tsg);
void (*tsg_release)(struct tsg_gk20a *tsg);
u32 (*eng_runlist_base_size)(void);
int (*init_engine_info)(struct fifo_gk20a *f);
u32 (*get_engines_mask_on_id)(struct gk20a *g,
u32 id, bool is_tsg);
u32 (*runlist_entry_size)(void);
void (*get_tsg_runlist_entry)(struct tsg_gk20a *tsg,
u32 *runlist);
void (*get_ch_runlist_entry)(struct channel_gk20a *ch,
u32 *runlist);
u32 (*userd_gp_get)(struct gk20a *g, struct channel_gk20a *ch);
void (*userd_gp_put)(struct gk20a *g, struct channel_gk20a *ch);
u64 (*userd_pb_get)(struct gk20a *g, struct channel_gk20a *ch);
@@ -860,9 +844,6 @@ struct gpu_ops {
u64 *base_gpuva, u32 *sync_size);
u32 (*get_syncpt_incr_per_release)(void);
#endif
void (*runlist_hw_submit)(struct gk20a *g, u32 runlist_id,
u32 count, u32 buffer_index);
int (*runlist_wait_pending)(struct gk20a *g, u32 runlist_id);
void (*ring_channel_doorbell)(struct channel_gk20a *c);
u64 (*usermode_base)(struct gk20a *g);
u32 (*doorbell_token)(struct channel_gk20a *c);
@@ -877,8 +858,6 @@ struct gpu_ops {
int (*set_sm_exception_type_mask)(struct channel_gk20a *ch,
u32 exception_mask);
u32 (*runlist_busy_engines)(struct gk20a *g, u32 runlist_id);
void (*runlist_write_state)(struct gk20a *g, u32 runlists_mask,
u32 runlist_state);
bool (*find_pbdma_for_runlist)(struct fifo_gk20a *f,
u32 runlist_id, u32 *pbdma_id);
int (*init_ce_engine_info)(struct fifo_gk20a *f);
@@ -890,6 +869,29 @@ struct gpu_ops {
u32 intr_info);
} err_ops;
} fifo;
struct {
int (*reschedule_runlist)(struct channel_gk20a *ch,
bool preempt_next);
int (*reschedule_preempt_next_locked)(struct channel_gk20a *ch,
bool wait_preempt);
int (*update_runlist)(struct gk20a *g, u32 runlist_id,
u32 chid, bool add,
bool wait_for_finish);
int (*set_runlist_interleave)(struct gk20a *g, u32 id,
u32 runlist_id,
u32 new_level);
u32 (*eng_runlist_base_size)(void);
u32 (*runlist_entry_size)(void);
void (*get_tsg_runlist_entry)(struct tsg_gk20a *tsg,
u32 *runlist);
void (*get_ch_runlist_entry)(struct channel_gk20a *ch,
u32 *runlist);
void (*runlist_hw_submit)(struct gk20a *g, u32 runlist_id,
u32 count, u32 buffer_index);
int (*runlist_wait_pending)(struct gk20a *g, u32 runlist_id);
void (*runlist_write_state)(struct gk20a *g, u32 runlists_mask,
u32 runlist_state);
} runlist;
struct pmu_v {
u32 (*get_pmu_cmdline_args_size)(struct nvgpu_pmu *pmu);
void (*set_pmu_cmdline_args_cpu_freq)(struct nvgpu_pmu *pmu,

View File

@@ -1367,7 +1367,7 @@ long gk20a_channel_ioctl(struct file *filp,
err = -EPERM;
break;
}
if (!ch->g->ops.fifo.reschedule_runlist) {
if (!ch->g->ops.runlist.reschedule_runlist) {
err = -ENOSYS;
break;
}
@@ -1378,7 +1378,7 @@ long gk20a_channel_ioctl(struct file *filp,
__func__, cmd);
break;
}
err = ch->g->ops.fifo.reschedule_runlist(ch,
err = ch->g->ops.runlist.reschedule_runlist(ch,
NVGPU_RESCHEDULE_RUNLIST_PREEMPT_NEXT &
((struct nvgpu_reschedule_runlist_args *)buf)->flags);
gk20a_idle(ch->g);

View File

@@ -740,7 +740,6 @@ static const struct gpu_ops tu104_ops = {
.tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
.tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
.tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
@@ -749,15 +748,10 @@ static const struct gpu_ops tu104_ops = {
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv100_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_runlist_base_lo__size_1_v,
.init_engine_info = gm20b_fifo_init_engine_info,
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
.dump_pbdma_status = gk20a_dump_pbdma_status,
.dump_eng_status = gv11b_dump_eng_status,
@@ -798,8 +792,6 @@ static const struct gpu_ops tu104_ops = {
.resetup_ramfc = NULL,
.free_channel_ctx_header = gv11b_free_subctx_header,
.handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
.runlist_hw_submit = tu104_fifo_runlist_hw_submit,
.runlist_wait_pending = tu104_fifo_runlist_wait_pending,
.ring_channel_doorbell = tu104_ring_channel_doorbell,
.get_sema_wait_cmd_size = gv11b_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gv11b_fifo_get_sema_incr_cmd_size,
@@ -810,12 +802,22 @@ static const struct gpu_ops tu104_ops = {
.deinit_pdb_cache_war = tu104_deinit_pdb_cache_war,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,
.runlist_busy_engines = gk20a_fifo_runlist_busy_engines,
.runlist_write_state = gk20a_fifo_runlist_write_state,
.find_pbdma_for_runlist = gk20a_fifo_find_pbdma_for_runlist,
.init_ce_engine_info = gp10b_fifo_init_ce_engine_info,
.read_pbdma_data = tu104_fifo_read_pbdma_data,
.reset_pbdma_header = tu104_fifo_reset_pbdma_header,
},
.runlist = {
.update_runlist = gk20a_fifo_update_runlist,
.set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_runlist_base_lo__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.runlist_hw_submit = tu104_fifo_runlist_hw_submit,
.runlist_wait_pending = tu104_fifo_runlist_wait_pending,
.runlist_write_state = gk20a_fifo_runlist_write_state,
},
.netlist = {
.get_netlist_name = tu104_netlist_get_name,
.is_fw_defined = tu104_netlist_is_firmware_defined,
@@ -1160,6 +1162,7 @@ int tu104_init_hal(struct gk20a *g)
gops->nvdec = tu104_ops.nvdec;
gops->clock_gating = tu104_ops.clock_gating;
gops->fifo = tu104_ops.fifo;
gops->runlist = tu104_ops.runlist;
gops->netlist = tu104_ops.netlist;
gops->mm = tu104_ops.mm;
#ifdef CONFIG_GK20A_CTXSW_TRACE

View File

@@ -368,8 +368,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.disable_tsg = gk20a_disable_tsg,
.tsg_verify_channel_status = NULL,
.tsg_verify_status_ctx_reload = NULL,
.reschedule_runlist = NULL,
.update_runlist = vgpu_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
@@ -378,17 +376,12 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
.get_num_fifos = gm20b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
.tsg_set_timeslice = vgpu_tsg_set_timeslice,
.tsg_open = vgpu_tsg_open,
.tsg_release = vgpu_tsg_release,
.force_reset_ch = vgpu_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = vgpu_fifo_init_engine_info,
.get_engines_mask_on_id = NULL,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
.get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gk20a_is_fault_engine_subid_gpc,
.dump_pbdma_status = NULL,
.dump_eng_status = NULL,
@@ -424,13 +417,22 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_sync_ro_map = NULL,
#endif
.resetup_ramfc = NULL,
.runlist_hw_submit = NULL,
.runlist_wait_pending = NULL,
.get_sema_wait_cmd_size = gk20a_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gk20a_fifo_get_sema_incr_cmd_size,
.add_sema_cmd = gk20a_fifo_add_sema_cmd,
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,
},
.runlist = {
.reschedule_runlist = NULL,
.update_runlist = vgpu_fifo_update_runlist,
.set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry,
.get_ch_runlist_entry = gk20a_get_ch_runlist_entry,
.runlist_hw_submit = NULL,
.runlist_wait_pending = NULL,
},
.netlist = {
.get_netlist_name = gp10b_netlist_get_name,
.is_fw_defined = gp10b_netlist_is_firmware_defined,
@@ -657,6 +659,7 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
gops->fb = vgpu_gp10b_ops.fb;
gops->clock_gating = vgpu_gp10b_ops.clock_gating;
gops->fifo = vgpu_gp10b_ops.fifo;
gops->runlist = vgpu_gp10b_ops.runlist;
gops->netlist = vgpu_gp10b_ops.netlist;
#ifdef CONFIG_GK20A_CTXSW_TRACE
gops->fecs_trace = vgpu_gp10b_ops.fecs_trace;

View File

@@ -438,7 +438,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.tsg_verify_status_ctx_reload = NULL,
/* TODO: implement it for CE fault */
.tsg_verify_status_faulted = NULL,
.update_runlist = vgpu_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
@@ -447,17 +446,12 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
.get_num_fifos = gv11b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
.tsg_set_timeslice = vgpu_tsg_set_timeslice,
.tsg_open = vgpu_tsg_open,
.tsg_release = vgpu_tsg_release,
.force_reset_ch = vgpu_fifo_force_reset_ch,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.init_engine_info = vgpu_fifo_init_engine_info,
.get_engines_mask_on_id = NULL,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
.dump_pbdma_status = NULL,
.dump_eng_status = NULL,
@@ -496,11 +490,8 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_sync_ro_map = vgpu_gv11b_fifo_get_sync_ro_map,
#endif
.resetup_ramfc = NULL,
.reschedule_runlist = NULL,
.free_channel_ctx_header = vgpu_gv11b_free_subctx_header,
.handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
.runlist_hw_submit = NULL,
.runlist_wait_pending = NULL,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
.get_sema_wait_cmd_size = gv11b_fifo_get_sema_wait_cmd_size,
.get_sema_incr_cmd_size = gv11b_fifo_get_sema_incr_cmd_size,
@@ -509,6 +500,17 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.usermode_base = gv11b_fifo_usermode_base,
.doorbell_token = gv11b_fifo_doorbell_token,
},
.runlist = {
.reschedule_runlist = NULL,
.update_runlist = vgpu_fifo_update_runlist,
.set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
.runlist_entry_size = ram_rl_entry_size_v,
.get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
.get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
.runlist_hw_submit = NULL,
.runlist_wait_pending = NULL,
},
.netlist = {
.get_netlist_name = gv11b_netlist_get_name,
.is_fw_defined = gv11b_netlist_is_firmware_defined,
@@ -734,6 +736,7 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
gops->fb = vgpu_gv11b_ops.fb;
gops->clock_gating = vgpu_gv11b_ops.clock_gating;
gops->fifo = vgpu_gv11b_ops.fifo;
gops->runlist = vgpu_gv11b_ops.runlist;
gops->netlist = vgpu_gv11b_ops.netlist;
gops->mm = vgpu_gv11b_ops.mm;
#ifdef CONFIG_GK20A_CTXSW_TRACE

View File

@@ -58,8 +58,8 @@ static void setup_fifo(struct gk20a *g, unsigned long *tsg_map,
* entries are enough. The logic is same across chips.
*/
f->runlist_entry_size = 2 * sizeof(u32);
g->ops.fifo.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry;
g->ops.fifo.get_ch_runlist_entry = gk20a_get_ch_runlist_entry;
g->ops.runlist.get_tsg_runlist_entry = gk20a_get_tsg_runlist_entry;
g->ops.runlist.get_ch_runlist_entry = gk20a_get_ch_runlist_entry;
g->runlist_interleave = interleave;