gpu: nvgpu: unify vgpu runlist init

Split out native-specific engine info collection out of
nvgpu_init_runlist() so that it only contains common code. Call this
common function from vgpu code that ends up being identical.

Jira NVGPU-1309

Change-Id: I9e83669c84eb6b145fcadb4fa6e06413b34e1c03
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1978060
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-12-21 13:36:37 +02:00
committed by mobile promotions
parent 2f51d7c5ed
commit 11c0c1ad89
3 changed files with 54 additions and 81 deletions

View File

@@ -564,11 +564,9 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
{
struct fifo_runlist_info_gk20a *runlist;
struct fifo_engine_info_gk20a *engine_info;
unsigned int runlist_id;
u32 i;
size_t runlist_size;
u32 active_engine_id, pbdma_id, engine_id;
int err = 0;
nvgpu_log_fn(g, " ");
@@ -581,9 +579,6 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
goto clean_up_runlist;
}
(void) memset(f->runlist_info, 0,
(sizeof(struct fifo_runlist_info_gk20a) * f->max_runlists));
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
runlist = &f->runlist_info[runlist_id];
@@ -601,7 +596,7 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
goto clean_up_runlist;
}
runlist_size = (size_t)f->runlist_entry_size *
runlist_size = (size_t)f->runlist_entry_size *
(size_t)f->num_runlist_entries;
nvgpu_log(g, gpu_dbg_info,
"runlist_entries %d runlist size %zu",
@@ -609,7 +604,8 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_PHYSICALLY_ADDRESSED,
g->is_virtual ?
0 : NVGPU_DMA_PHYSICALLY_ADDRESSED,
runlist_size,
&runlist->mem[i]);
if (err != 0) {
@@ -628,26 +624,6 @@ int nvgpu_init_runlist(struct gk20a *g, struct fifo_gk20a *f)
/* None of buffers is pinned if this value doesn't change.
Otherwise, one of them (cur_buffer) must have been pinned. */
runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
if ((f->pbdma_map[pbdma_id] & BIT32(runlist_id)) != 0U) {
runlist->pbdma_bitmask |= BIT32(pbdma_id);
}
}
nvgpu_log(g, gpu_dbg_info, "runlist %d : pbdma bitmask 0x%x",
runlist_id, runlist->pbdma_bitmask);
for (engine_id = 0; engine_id < f->num_engines; ++engine_id) {
active_engine_id = f->active_engines_list[engine_id];
engine_info = &f->engine_info[active_engine_id];
if ((engine_info != NULL) &&
(engine_info->runlist_id == runlist_id)) {
runlist->eng_bitmask |= BIT(active_engine_id);
}
}
nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x",
runlist_id, runlist->eng_bitmask);
}
nvgpu_log_fn(g, "done");

View File

@@ -560,6 +560,44 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
return 0;
}
static int nvgpu_init_runlist_enginfo(struct gk20a *g, struct fifo_gk20a *f)
{
struct fifo_runlist_info_gk20a *runlist;
struct fifo_engine_info_gk20a *engine_info;
unsigned int runlist_id;
u32 active_engine_id, pbdma_id, engine_id;
nvgpu_log_fn(g, " ");
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
runlist = &f->runlist_info[runlist_id];
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
if ((f->pbdma_map[pbdma_id] & BIT32(runlist_id)) != 0U) {
runlist->pbdma_bitmask |= BIT32(pbdma_id);
}
}
nvgpu_log(g, gpu_dbg_info, "runlist %d : pbdma bitmask 0x%x",
runlist_id, runlist->pbdma_bitmask);
for (engine_id = 0; engine_id < f->num_engines; ++engine_id) {
active_engine_id = f->active_engines_list[engine_id];
engine_info = &f->engine_info[active_engine_id];
if ((engine_info != NULL) &&
(engine_info->runlist_id == runlist_id)) {
runlist->eng_bitmask |= BIT(active_engine_id);
}
}
nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x",
runlist_id, runlist->eng_bitmask);
}
nvgpu_log_fn(g, "done");
return 0;
}
int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
@@ -625,6 +663,12 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
goto clean_up;
}
nvgpu_init_runlist_enginfo(g, f);
if (err != 0) {
nvgpu_err(g, "failed to init runlist engine info");
goto clean_up;
}
nvgpu_init_list_node(&f->free_chs);
err = nvgpu_mutex_init(&f->free_chs_mutex);

View File

@@ -223,59 +223,6 @@ int vgpu_fifo_init_engine_info(struct fifo_gk20a *f)
return 0;
}
static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
{
struct fifo_runlist_info_gk20a *runlist;
unsigned int runlist_id = -1;
u32 i;
u64 runlist_size;
nvgpu_log_fn(g, " ");
f->max_runlists = g->ops.fifo.eng_runlist_base_size();
f->runlist_info = nvgpu_kzalloc(g,
sizeof(struct fifo_runlist_info_gk20a) *
f->max_runlists);
if (!f->runlist_info)
goto clean_up_runlist;
(void) memset(f->runlist_info, 0,
(sizeof(struct fifo_runlist_info_gk20a) * f->max_runlists));
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
runlist = &f->runlist_info[runlist_id];
runlist->active_channels =
nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
BITS_PER_BYTE));
if (!runlist->active_channels)
goto clean_up_runlist;
runlist_size = sizeof(u16) * f->num_channels;
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
int err = nvgpu_dma_alloc_sys(g, runlist_size,
&runlist->mem[i]);
if (err) {
nvgpu_err(g, "memory allocation failed");
goto clean_up_runlist;
}
}
nvgpu_mutex_init(&runlist->runlist_lock);
/* None of buffers is pinned if this value doesn't change.
Otherwise, one of them (cur_buffer) must have been pinned. */
runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
}
nvgpu_log_fn(g, "done");
return 0;
clean_up_runlist:
gk20a_fifo_delete_runlist(f);
nvgpu_log_fn(g, "fail");
return -ENOMEM;
}
static int vgpu_init_fifo_setup_sw(struct gk20a *g)
{
struct fifo_gk20a *f = &g->fifo;
@@ -292,6 +239,8 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
f->g = g;
f->num_channels = priv->constants.num_channels;
f->runlist_entry_size = (u32)sizeof(u16);
f->num_runlist_entries = f->num_channels;
f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
f->userd_entry_size = 1 << ram_userd_base_shift_v();
@@ -317,7 +266,11 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
g->ops.fifo.init_engine_info(f);
init_runlist(g, f);
err = nvgpu_init_runlist(g, f);
if (err != 0) {
nvgpu_err(g, "failed to init runlist");
goto clean_up;
}
nvgpu_init_list_node(&f->free_chs);
nvgpu_mutex_init(&f->free_chs_mutex);