mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: add channel HAL section for ccsr_*
Split out ops that belong to channel unit to a new section called channel. Channel is a broad concept; this includes just the code that accesses channel registers (ccsr_*). This is effectively just renaming; the implementation still stays put. The word "channel" is also dropped from certain HAL entries to avoid redundancy (e.g., channel.disable_channel -> channel.disable). fifo.get_num_fifos gets an entirely new name: channel.count. Jira NVGPU-1307 Change-Id: I9a08103e461bf3ddb743aa37ababee3e0c73c861 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2017261 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1d49e8218d
commit
c330d8fd98
@@ -380,7 +380,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
|
||||
fb_mmu_fault_buffer_hi_addr_f(addr_hi));
|
||||
|
||||
g->ops.fb.write_mmu_fault_buffer_size(g, index,
|
||||
fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
|
||||
fb_mmu_fault_buffer_size_val_f(g->ops.channel.count(g)) |
|
||||
fb_mmu_fault_buffer_size_overflow_intr_enable_f());
|
||||
|
||||
gv11b_fb_fault_buf_set_state_hw(g, index, NVGPU_FB_MMU_FAULT_BUF_ENABLED);
|
||||
|
||||
@@ -461,7 +461,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
|
||||
nvgpu_wait_for_deferred_interrupts(g);
|
||||
|
||||
unbind:
|
||||
g->ops.fifo.unbind_channel(ch);
|
||||
g->ops.channel.unbind(ch);
|
||||
g->ops.fifo.free_inst(g, ch);
|
||||
|
||||
/* put back the channel-wide submit ref from init */
|
||||
@@ -1291,7 +1291,7 @@ int nvgpu_channel_setup_bind(struct channel_gk20a *c,
|
||||
goto clean_up_priv_cmd;
|
||||
}
|
||||
|
||||
g->ops.fifo.bind_channel(c);
|
||||
g->ops.channel.bind(c);
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
@@ -2445,7 +2445,7 @@ int gk20a_channel_suspend(struct gk20a *g)
|
||||
"recovered channel %d",
|
||||
chid);
|
||||
} else {
|
||||
g->ops.fifo.unbind_channel(ch);
|
||||
g->ops.channel.unbind(ch);
|
||||
}
|
||||
gk20a_channel_put(ch);
|
||||
}
|
||||
@@ -2476,7 +2476,7 @@ int gk20a_channel_resume(struct gk20a *g)
|
||||
"channel %d", chid);
|
||||
} else {
|
||||
nvgpu_log_info(g, "resume channel %d", chid);
|
||||
g->ops.fifo.bind_channel(ch);
|
||||
g->ops.channel.bind(ch);
|
||||
channels_in_use = true;
|
||||
active_runlist_ids |= (u32) BIT64(ch->runlist_id);
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
|
||||
is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
|
||||
|
||||
if (is_next || is_ctx_reload) {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
g->ops.channel.enable(ch);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
|
||||
continue;
|
||||
}
|
||||
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
g->ops.channel.enable(ch);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
@@ -86,7 +86,7 @@ void gk20a_disable_tsg(struct tsg_gk20a *tsg)
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.fifo.disable_channel(ch);
|
||||
g->ops.channel.disable(ch);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
}
|
||||
|
||||
@@ -641,7 +641,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
|
||||
|
||||
g->ops.fifo.init_pbdma_intr_descs(f); /* just filling in data/tables */
|
||||
|
||||
f->num_channels = g->ops.fifo.get_num_fifos(g);
|
||||
f->num_channels = g->ops.channel.count(g);
|
||||
f->runlist_entry_size = g->ops.runlist.runlist_entry_size();
|
||||
f->num_runlist_entries = fifo_eng_runlist_length_max_v();
|
||||
f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
||||
@@ -3186,14 +3186,14 @@ void gk20a_dump_eng_status(struct gk20a *g,
|
||||
gk20a_debug_output(o, "\n");
|
||||
}
|
||||
|
||||
void gk20a_fifo_enable_channel(struct channel_gk20a *ch)
|
||||
void gk20a_fifo_channel_enable(struct channel_gk20a *ch)
|
||||
{
|
||||
gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
|
||||
gk20a_readl(ch->g, ccsr_channel_r(ch->chid)) |
|
||||
ccsr_channel_enable_set_true_f());
|
||||
}
|
||||
|
||||
void gk20a_fifo_disable_channel(struct channel_gk20a *ch)
|
||||
void gk20a_fifo_channel_disable(struct channel_gk20a *ch)
|
||||
{
|
||||
gk20a_writel(ch->g, ccsr_channel_r(ch->chid),
|
||||
gk20a_readl(ch->g,
|
||||
|
||||
@@ -357,8 +357,8 @@ void gk20a_dump_eng_status(struct gk20a *g,
|
||||
struct gk20a_debug_output *o);
|
||||
const char *gk20a_decode_ccsr_chan_status(u32 index);
|
||||
const char *gk20a_decode_pbdma_chan_eng_ctx_status(u32 index);
|
||||
void gk20a_fifo_enable_channel(struct channel_gk20a *ch);
|
||||
void gk20a_fifo_disable_channel(struct channel_gk20a *ch);
|
||||
void gk20a_fifo_channel_enable(struct channel_gk20a *ch);
|
||||
void gk20a_fifo_channel_disable(struct channel_gk20a *ch);
|
||||
|
||||
bool gk20a_fifo_channel_status_is_next(struct gk20a *g, u32 chid);
|
||||
bool gk20a_fifo_channel_status_is_ctx_reload(struct gk20a *g, u32 chid);
|
||||
|
||||
@@ -485,10 +485,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
},
|
||||
.fifo = {
|
||||
.init_fifo_setup_hw = gk20a_init_fifo_setup_hw,
|
||||
.bind_channel = channel_gm20b_bind,
|
||||
.unbind_channel = gk20a_fifo_channel_unbind,
|
||||
.disable_channel = gk20a_fifo_disable_channel,
|
||||
.enable_channel = gk20a_fifo_enable_channel,
|
||||
.alloc_inst = gk20a_fifo_alloc_inst,
|
||||
.free_inst = gk20a_fifo_free_inst,
|
||||
.setup_ramfc = gk20a_fifo_setup_ramfc,
|
||||
@@ -510,7 +506,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.get_mmu_fault_client_desc = gk20a_fifo_get_mmu_fault_client_desc,
|
||||
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
|
||||
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gm20b_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gk20a_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
@@ -574,6 +569,13 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
|
||||
.runlist_write_state = gk20a_fifo_runlist_write_state,
|
||||
},
|
||||
.channel = {
|
||||
.bind = channel_gm20b_bind,
|
||||
.unbind = gk20a_fifo_channel_unbind,
|
||||
.enable = gk20a_fifo_channel_enable,
|
||||
.disable = gk20a_fifo_channel_disable,
|
||||
.count = gm20b_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = gm20b_netlist_get_name,
|
||||
.is_fw_defined = gm20b_netlist_is_firmware_defined,
|
||||
@@ -801,6 +803,7 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = gm20b_ops.clock_gating;
|
||||
gops->fifo = gm20b_ops.fifo;
|
||||
gops->runlist = gm20b_ops.runlist;
|
||||
gops->channel = gm20b_ops.channel;
|
||||
gops->sync = gm20b_ops.sync;
|
||||
gops->netlist = gm20b_ops.netlist;
|
||||
gops->mm = gm20b_ops.mm;
|
||||
|
||||
@@ -535,10 +535,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
},
|
||||
.fifo = {
|
||||
.init_fifo_setup_hw = gk20a_init_fifo_setup_hw,
|
||||
.bind_channel = channel_gm20b_bind,
|
||||
.unbind_channel = gk20a_fifo_channel_unbind,
|
||||
.disable_channel = gk20a_fifo_disable_channel,
|
||||
.enable_channel = gk20a_fifo_enable_channel,
|
||||
.alloc_inst = gk20a_fifo_alloc_inst,
|
||||
.free_inst = gk20a_fifo_free_inst,
|
||||
.setup_ramfc = channel_gp10b_setup_ramfc,
|
||||
@@ -560,7 +556,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc,
|
||||
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
|
||||
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gm20b_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
@@ -627,6 +622,13 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
|
||||
.runlist_write_state = gk20a_fifo_runlist_write_state,
|
||||
},
|
||||
.channel = {
|
||||
.bind = channel_gm20b_bind,
|
||||
.unbind = gk20a_fifo_channel_unbind,
|
||||
.enable = gk20a_fifo_channel_enable,
|
||||
.disable = gk20a_fifo_channel_disable,
|
||||
.count = gm20b_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = gp10b_netlist_get_name,
|
||||
.is_fw_defined = gp10b_netlist_is_firmware_defined,
|
||||
@@ -884,6 +886,7 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = gp10b_ops.clock_gating;
|
||||
gops->fifo = gp10b_ops.fifo;
|
||||
gops->runlist = gp10b_ops.runlist;
|
||||
gops->channel = gp10b_ops.channel;
|
||||
gops->sync = gp10b_ops.sync;
|
||||
gops->netlist = gp10b_ops.netlist;
|
||||
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
||||
|
||||
@@ -696,10 +696,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.fifo = {
|
||||
.get_preempt_timeout = gv11b_fifo_get_preempt_timeout,
|
||||
.init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
|
||||
.bind_channel = channel_gm20b_bind,
|
||||
.unbind_channel = channel_gv11b_unbind,
|
||||
.disable_channel = gk20a_fifo_disable_channel,
|
||||
.enable_channel = gk20a_fifo_enable_channel,
|
||||
.alloc_inst = gk20a_fifo_alloc_inst,
|
||||
.free_inst = gk20a_fifo_free_inst,
|
||||
.setup_ramfc = channel_gv11b_setup_ramfc,
|
||||
@@ -722,7 +718,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.get_mmu_fault_client_desc = NULL,
|
||||
.get_mmu_fault_gpc_desc = NULL,
|
||||
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gv100_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
@@ -794,6 +789,13 @@ static const struct gpu_ops gv100_ops = {
|
||||
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
|
||||
.runlist_write_state = gk20a_fifo_runlist_write_state,
|
||||
},
|
||||
.channel = {
|
||||
.bind = channel_gm20b_bind,
|
||||
.unbind = channel_gv11b_unbind,
|
||||
.enable = gk20a_fifo_channel_enable,
|
||||
.disable = gk20a_fifo_channel_disable,
|
||||
.count = gv100_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = gv100_netlist_get_name,
|
||||
.is_fw_defined = gv100_netlist_is_firmware_defined,
|
||||
@@ -1146,6 +1148,7 @@ int gv100_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = gv100_ops.clock_gating;
|
||||
gops->fifo = gv100_ops.fifo;
|
||||
gops->runlist = gv100_ops.runlist;
|
||||
gops->channel = gv100_ops.channel;
|
||||
gops->sync = gv100_ops.sync;
|
||||
gops->netlist = gv100_ops.netlist;
|
||||
gops->mm = gv100_ops.mm;
|
||||
|
||||
@@ -806,7 +806,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
g->ops.channel.enable(ch);
|
||||
last_ch = ch;
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
@@ -653,10 +653,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.fifo = {
|
||||
.get_preempt_timeout = gv11b_fifo_get_preempt_timeout,
|
||||
.init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
|
||||
.bind_channel = channel_gm20b_bind,
|
||||
.unbind_channel = channel_gv11b_unbind,
|
||||
.disable_channel = gk20a_fifo_disable_channel,
|
||||
.enable_channel = gk20a_fifo_enable_channel,
|
||||
.alloc_inst = gk20a_fifo_alloc_inst,
|
||||
.free_inst = gk20a_fifo_free_inst,
|
||||
.setup_ramfc = channel_gv11b_setup_ramfc,
|
||||
@@ -679,7 +675,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.get_mmu_fault_client_desc = NULL,
|
||||
.get_mmu_fault_gpc_desc = NULL,
|
||||
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gv11b_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
@@ -754,6 +749,13 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.runlist_wait_pending = gk20a_fifo_runlist_wait_pending,
|
||||
.runlist_write_state = gk20a_fifo_runlist_write_state,
|
||||
},
|
||||
.channel = {
|
||||
.bind = channel_gm20b_bind,
|
||||
.unbind = channel_gv11b_unbind,
|
||||
.enable = gk20a_fifo_channel_enable,
|
||||
.disable = gk20a_fifo_channel_disable,
|
||||
.count = gv11b_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = gv11b_netlist_get_name,
|
||||
.is_fw_defined = gv11b_netlist_is_firmware_defined,
|
||||
@@ -1018,6 +1020,7 @@ int gv11b_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = gv11b_ops.clock_gating;
|
||||
gops->fifo = gv11b_ops.fifo;
|
||||
gops->runlist = gv11b_ops.runlist;
|
||||
gops->channel = gv11b_ops.channel;
|
||||
gops->sync = gv11b_ops.sync;
|
||||
gops->netlist = gv11b_ops.netlist;
|
||||
gops->mm = gv11b_ops.mm;
|
||||
|
||||
@@ -122,7 +122,7 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
|
||||
size_t fb_size;
|
||||
|
||||
/* Max entries take care of 1 entry used for full detection */
|
||||
fb_size = ((size_t)g->ops.fifo.get_num_fifos(g) + (size_t)1) *
|
||||
fb_size = ((size_t)g->ops.channel.count(g) + (size_t)1) *
|
||||
(size_t)gmmu_fault_buf_size_v();
|
||||
|
||||
if (!nvgpu_mem_is_valid(
|
||||
|
||||
@@ -745,10 +745,6 @@ struct gpu_ops {
|
||||
struct {
|
||||
int (*setup_sw)(struct gk20a *g);
|
||||
int (*init_fifo_setup_hw)(struct gk20a *g);
|
||||
void (*bind_channel)(struct channel_gk20a *ch_gk20a);
|
||||
void (*unbind_channel)(struct channel_gk20a *ch_gk20a);
|
||||
void (*disable_channel)(struct channel_gk20a *ch);
|
||||
void (*enable_channel)(struct channel_gk20a *ch);
|
||||
int (*alloc_inst)(struct gk20a *g, struct channel_gk20a *ch);
|
||||
void (*free_inst)(struct gk20a *g, struct channel_gk20a *ch);
|
||||
int (*setup_ramfc)(struct channel_gk20a *c, u64 gpfifo_base,
|
||||
@@ -774,7 +770,6 @@ struct gpu_ops {
|
||||
void (*apply_pb_timeout)(struct gk20a *g);
|
||||
void (*apply_ctxsw_timeout_intr)(struct gk20a *g);
|
||||
int (*wait_engine_idle)(struct gk20a *g);
|
||||
u32 (*get_num_fifos)(struct gk20a *g);
|
||||
u32 (*get_pbdma_signature)(struct gk20a *g);
|
||||
int (*tsg_set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice);
|
||||
u32 (*default_timeslice_us)(struct gk20a *g);
|
||||
@@ -906,6 +901,13 @@ struct gpu_ops {
|
||||
struct priv_cmd_entry *cmd,
|
||||
u32 off, bool acquire, bool wfi);
|
||||
} sync;
|
||||
struct {
|
||||
void (*bind)(struct channel_gk20a *ch);
|
||||
void (*unbind)(struct channel_gk20a *ch);
|
||||
void (*enable)(struct channel_gk20a *ch);
|
||||
void (*disable)(struct channel_gk20a *ch);
|
||||
u32 (*count)(struct gk20a *g);
|
||||
} channel;
|
||||
struct pmu_v {
|
||||
u32 (*get_pmu_cmdline_args_size)(struct nvgpu_pmu *pmu);
|
||||
void (*set_pmu_cmdline_args_cpu_freq)(struct nvgpu_pmu *pmu,
|
||||
|
||||
@@ -1330,8 +1330,8 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
__func__, cmd);
|
||||
break;
|
||||
}
|
||||
if (ch->g->ops.fifo.enable_channel)
|
||||
ch->g->ops.fifo.enable_channel(ch);
|
||||
if (ch->g->ops.channel.enable)
|
||||
ch->g->ops.channel.enable(ch);
|
||||
else
|
||||
err = -ENOSYS;
|
||||
gk20a_idle(ch->g);
|
||||
@@ -1344,8 +1344,8 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
__func__, cmd);
|
||||
break;
|
||||
}
|
||||
if (ch->g->ops.fifo.disable_channel)
|
||||
ch->g->ops.fifo.disable_channel(ch);
|
||||
if (ch->g->ops.channel.disable)
|
||||
ch->g->ops.channel.disable(ch);
|
||||
else
|
||||
err = -ENOSYS;
|
||||
gk20a_idle(ch->g);
|
||||
|
||||
@@ -724,10 +724,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.fifo = {
|
||||
.get_preempt_timeout = gv100_fifo_get_preempt_timeout,
|
||||
.init_fifo_setup_hw = tu104_init_fifo_setup_hw,
|
||||
.bind_channel = channel_gm20b_bind,
|
||||
.unbind_channel = channel_gv11b_unbind,
|
||||
.disable_channel = gk20a_fifo_disable_channel,
|
||||
.enable_channel = gk20a_fifo_enable_channel,
|
||||
.alloc_inst = gk20a_fifo_alloc_inst,
|
||||
.free_inst = gk20a_fifo_free_inst,
|
||||
.setup_ramfc = channel_tu104_setup_ramfc,
|
||||
@@ -750,7 +746,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.get_mmu_fault_client_desc = NULL,
|
||||
.get_mmu_fault_gpc_desc = NULL,
|
||||
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gv100_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
@@ -825,6 +820,13 @@ static const struct gpu_ops tu104_ops = {
|
||||
.runlist_wait_pending = tu104_fifo_runlist_wait_pending,
|
||||
.runlist_write_state = gk20a_fifo_runlist_write_state,
|
||||
},
|
||||
.channel = {
|
||||
.bind = channel_gm20b_bind,
|
||||
.unbind = channel_gv11b_unbind,
|
||||
.enable = gk20a_fifo_channel_enable,
|
||||
.disable = gk20a_fifo_channel_disable,
|
||||
.count = gv100_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = tu104_netlist_get_name,
|
||||
.is_fw_defined = tu104_netlist_is_firmware_defined,
|
||||
@@ -1181,6 +1183,7 @@ int tu104_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = tu104_ops.clock_gating;
|
||||
gops->fifo = tu104_ops.fifo;
|
||||
gops->runlist = tu104_ops.runlist;
|
||||
gops->channel = tu104_ops.channel;
|
||||
gops->sync = tu104_ops.sync;
|
||||
gops->netlist = tu104_ops.netlist;
|
||||
gops->mm = tu104_ops.mm;
|
||||
|
||||
@@ -350,10 +350,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
},
|
||||
.fifo = {
|
||||
.init_fifo_setup_hw = vgpu_init_fifo_setup_hw,
|
||||
.bind_channel = vgpu_channel_bind,
|
||||
.unbind_channel = vgpu_channel_unbind,
|
||||
.disable_channel = vgpu_channel_disable,
|
||||
.enable_channel = vgpu_channel_enable,
|
||||
.alloc_inst = vgpu_channel_alloc_inst,
|
||||
.free_inst = vgpu_channel_free_inst,
|
||||
.setup_ramfc = vgpu_channel_setup_ramfc,
|
||||
@@ -375,7 +371,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc,
|
||||
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
|
||||
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gm20b_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = vgpu_tsg_set_timeslice,
|
||||
.tsg_open = vgpu_tsg_open,
|
||||
@@ -437,6 +432,13 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.runlist_hw_submit = NULL,
|
||||
.runlist_wait_pending = NULL,
|
||||
},
|
||||
.channel = {
|
||||
.bind = vgpu_channel_bind,
|
||||
.unbind = vgpu_channel_unbind,
|
||||
.enable = vgpu_channel_enable,
|
||||
.disable = vgpu_channel_disable,
|
||||
.count = gm20b_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = gp10b_netlist_get_name,
|
||||
.is_fw_defined = gp10b_netlist_is_firmware_defined,
|
||||
@@ -666,6 +668,7 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = vgpu_gp10b_ops.clock_gating;
|
||||
gops->fifo = vgpu_gp10b_ops.fifo;
|
||||
gops->runlist = vgpu_gp10b_ops.runlist;
|
||||
gops->channel = vgpu_gp10b_ops.channel;
|
||||
gops->sync = vgpu_gp10b_ops.sync;
|
||||
gops->netlist = vgpu_gp10b_ops.netlist;
|
||||
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
||||
|
||||
@@ -415,10 +415,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
},
|
||||
.fifo = {
|
||||
.init_fifo_setup_hw = vgpu_gv11b_init_fifo_setup_hw,
|
||||
.bind_channel = vgpu_channel_bind,
|
||||
.unbind_channel = vgpu_channel_unbind,
|
||||
.disable_channel = vgpu_channel_disable,
|
||||
.enable_channel = vgpu_channel_enable,
|
||||
.alloc_inst = vgpu_channel_alloc_inst,
|
||||
.free_inst = vgpu_channel_free_inst,
|
||||
.setup_ramfc = vgpu_channel_setup_ramfc,
|
||||
@@ -442,7 +438,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.get_mmu_fault_client_desc = NULL,
|
||||
.get_mmu_fault_gpc_desc = NULL,
|
||||
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
|
||||
.get_num_fifos = gv11b_fifo_get_num_fifos,
|
||||
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
|
||||
.tsg_set_timeslice = vgpu_tsg_set_timeslice,
|
||||
.tsg_open = vgpu_tsg_open,
|
||||
@@ -512,6 +507,13 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.runlist_hw_submit = NULL,
|
||||
.runlist_wait_pending = NULL,
|
||||
},
|
||||
.channel = {
|
||||
.bind = vgpu_channel_bind,
|
||||
.unbind = vgpu_channel_unbind,
|
||||
.enable = vgpu_channel_enable,
|
||||
.disable = vgpu_channel_disable,
|
||||
.count = gv11b_fifo_get_num_fifos,
|
||||
},
|
||||
.netlist = {
|
||||
.get_netlist_name = gv11b_netlist_get_name,
|
||||
.is_fw_defined = gv11b_netlist_is_firmware_defined,
|
||||
@@ -740,6 +742,7 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
|
||||
gops->clock_gating = vgpu_gv11b_ops.clock_gating;
|
||||
gops->fifo = vgpu_gv11b_ops.fifo;
|
||||
gops->runlist = vgpu_gv11b_ops.runlist;
|
||||
gops->channel = vgpu_gv11b_ops.channel;
|
||||
gops->sync = vgpu_gv11b_ops.sync;
|
||||
gops->netlist = vgpu_gv11b_ops.netlist;
|
||||
gops->mm = vgpu_gv11b_ops.mm;
|
||||
|
||||
@@ -69,7 +69,7 @@ int vgpu_gv11b_enable_tsg(struct tsg_gk20a *tsg)
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
g->ops.channel.enable(ch);
|
||||
last_ch = ch;
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
@@ -81,7 +81,7 @@ int vgpu_enable_tsg(struct tsg_gk20a *tsg)
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
g->ops.channel.enable(ch);
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user