diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index a5a95526b..dab8be749 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -440,7 +440,7 @@ void nvgpu_runlist_swap_mem(struct gk20a *g, struct nvgpu_runlist_domain *domain * mem becomes the previously scheduled buffer and it can be modified once * the runlist lock is released. */ - rl_dbg(g, "Swapping mem for rl domain[%s]", domain->name); + rl_dbg(g, "Swapping mem for rl domain[%llu]", domain->domain_id); mem_tmp = domain->mem; domain->mem = domain->mem_hw; @@ -452,8 +452,8 @@ static int nvgpu_runlist_domain_actual_submit(struct gk20a *g, struct nvgpu_runl { int ret = 0; - rl_dbg(g, "Runlist[%u]: submitting domain %s", - rl->id, rl->domain->name); + rl_dbg(g, "Runlist[%u]: submitting domain[%llu]", + rl->id, rl->domain->domain_id); if (swap_buffer) { nvgpu_runlist_swap_mem(g, rl->domain); @@ -483,8 +483,8 @@ static int nvgpu_runlist_update_mem_locked(struct gk20a *g, struct nvgpu_runlist int ret = 0; bool add_entries; - rl_dbg(g, "updating runlist[%u], domain[%s], channel = [%u], op = %s", - rl->id, domain->name, + rl_dbg(g, "updating runlist[%u], domain[%llu], channel = [%u], op = %s", + rl->id, domain->domain_id, ch == NULL ? NVGPU_INVALID_CHANNEL_ID : ch->chid, add ? "add" : "remove"); @@ -673,8 +673,8 @@ static int runlist_submit_powered(struct gk20a *g, struct nvgpu_runlist *runlist runlist->domain = next_domain; - rl_dbg(g, "Runlist[%u]: switching to domain %s", - runlist->id, next_domain->name); + rl_dbg(g, "Runlist[%u]: switching to domain %llu", + runlist->id, next_domain->domain_id); err = nvgpu_runlist_domain_actual_submit(g, runlist, swap_buffer, wait_for_finish); @@ -686,8 +686,8 @@ static int runlist_select_and_submit(struct gk20a *g, struct nvgpu_runlist *runl { int err; - rl_dbg(g, "Runlist[%u]: switching to domain %s", - runlist->id, next_domain->name); + rl_dbg(g, "Runlist[%u]: switching to domain %llu", + runlist->id, next_domain->domain_id); runlist->domain = next_domain; @@ -783,7 +783,7 @@ void nvgpu_runlist_tick(struct gk20a *g, struct nvgpu_runlist_domain **rl_domain runlist = &f->active_runlists[i]; err = runlist_switch_domain_and_submit(g, runlist, rl_domain[i]); if (err != 0) { - nvgpu_err(g, "Failed to schedule domain [%s]", rl_domain[i]->name); + nvgpu_err(g, "Failed to schedule domain [%llu]", rl_domain[i]->domain_id); } } } @@ -1114,7 +1114,7 @@ void nvgpu_runlist_link_domain(struct nvgpu_runlist *runlist, } struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g, - const char *name) + u64 domain_id) { struct nvgpu_runlist_domain *domain = nvgpu_kzalloc(g, sizeof(*domain)); struct nvgpu_fifo *f = &g->fifo; @@ -1125,7 +1125,7 @@ struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g, return NULL; } - (void)strncpy(domain->name, name, sizeof(domain->name) - 1U); + domain->domain_id = domain_id; domain->mem = init_rl_mem(g, (u32)runlist_size); if (domain->mem == NULL) { @@ -1166,7 +1166,7 @@ free_domain: } struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id, - const char *name) + u64 domain_id) { struct nvgpu_fifo *f = &g->fifo; struct nvgpu_runlist *runlist = f->runlists[runlist_id]; @@ -1174,7 +1174,7 @@ struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id nvgpu_list_for_each_entry(domain, &runlist->user_rl_domains, nvgpu_runlist_domain, domains_list) { - if (strcmp(domain->name, name) == 0) { + if (domain->domain_id == domain_id) { return domain; } } @@ -1232,7 +1232,7 @@ static int nvgpu_runlist_alloc_shadow_rl_domain(struct gk20a *g) for (i = 0; i < g->fifo.num_runlists; i++) { struct nvgpu_runlist *runlist = &f->active_runlists[i]; - runlist->shadow_rl_domain = nvgpu_runlist_domain_alloc(g, SHADOW_DOMAIN_NAME); + runlist->shadow_rl_domain = nvgpu_runlist_domain_alloc(g, SHADOW_DOMAIN_ID); if (runlist->shadow_rl_domain == NULL) { nvgpu_err(g, "memory allocation failed"); /* @@ -1242,8 +1242,8 @@ static int nvgpu_runlist_alloc_shadow_rl_domain(struct gk20a *g) return -ENOMEM; } - rl_dbg(g, "Allocated default domain for runlist[%u]: %s", runlist->id, - runlist->shadow_rl_domain->name); + rl_dbg(g, "Allocated default domain for runlist[%u]: %llu", runlist->id, + runlist->shadow_rl_domain->domain_id); runlist->domain = runlist->shadow_rl_domain; } diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index 40d217613..a6f64efe1 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -112,13 +112,13 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch) tsg->runlist = ch->runlist; if (tsg->rl_domain != NULL) { /* - * The rl domain identifier is stashed in tsg->rl_domain->name + * The rl domain identifier is stashed in tsg->rl_domain->domain_id * when the tsg is bound to a domain, but at that point there * are no channels yet to describe which runlist id should be * used. Now we know. */ tsg->rl_domain = nvgpu_rl_domain_get(g, tsg->runlist->id, - tsg->rl_domain->name); + tsg->rl_domain->domain_id); WARN_ON(tsg->rl_domain == NULL); } } else { @@ -175,9 +175,9 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, struct nvgpu_nvs_domain *nnvs_d * The domain ptr will get updated with the right id once the runlist * gets specified based on the first channel. */ - rl_domain = nvgpu_rl_domain_get(g, 0, name); + rl_domain = nvgpu_rl_domain_get(g, 0, nnvs_domain->id); if (rl_domain == NULL) { - nvgpu_err(g, "rl domain not found (%s)", name); + nvgpu_err(g, "rl domain not found (%s) having Id[%llu]", name, nnvs_domain->id); /* * This shouldn't happen because the nvs domain guarantees RL domains. * diff --git a/drivers/gpu/nvgpu/common/nvs/nvs_sched.c b/drivers/gpu/nvgpu/common/nvs/nvs_sched.c index 104f2fcbc..6b4a71da2 100644 --- a/drivers/gpu/nvgpu/common/nvs/nvs_sched.c +++ b/drivers/gpu/nvgpu/common/nvs/nvs_sched.c @@ -57,6 +57,10 @@ struct nvgpu_nvs_worker_item { nvgpu_atomic_t state; }; + +static struct nvgpu_nvs_domain * +nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id); + static inline struct nvgpu_nvs_worker_item * nvgpu_nvs_worker_item_from_worker_item(struct nvgpu_list_node *node) { @@ -131,6 +135,7 @@ static void nvgpu_nvs_worker_wakeup_process_item(struct nvgpu_list_node *work_it struct gk20a *g = work->g; int ret = 0; struct nvgpu_nvs_scheduler *sched = g->scheduler; + struct nvgpu_nvs_domain *nvgpu_nvs_domain; struct nvs_domain *nvs_domain; struct nvgpu_runlist *runlist = work->rl; struct nvgpu_runlist_domain *rl_domain = work->rl_domain; @@ -140,14 +145,16 @@ static void nvgpu_nvs_worker_wakeup_process_item(struct nvgpu_list_node *work_it if (rl_domain == NULL) { nvs_domain = sched->shadow_domain->parent; rl_domain = runlist->shadow_rl_domain; - } else if (strcmp(rl_domain->name, SHADOW_DOMAIN_NAME) == 0) { + } else if (rl_domain->domain_id == SHADOW_DOMAIN_ID) { nvs_domain = sched->shadow_domain->parent; } else { - nvs_domain = nvs_domain_by_name(sched->sched, rl_domain->name); - if (nvs_domain == NULL) { - nvgpu_err(g, "Unable to find domain[%s]", rl_domain->name); + nvgpu_nvs_domain = nvgpu_nvs_domain_by_id_locked(g, rl_domain->domain_id); + if (nvgpu_nvs_domain == NULL) { + nvgpu_err(g, "Unable to find domain[%llu]", rl_domain->domain_id); ret = -EINVAL; goto done; + } else { + nvs_domain = nvgpu_nvs_domain->parent; } } @@ -499,11 +506,9 @@ int nvgpu_nvs_open(struct gk20a *g) goto unlock; } - if (nvgpu_rl_domain_get(g, 0, SHADOW_DOMAIN_NAME) == NULL) { - err = nvgpu_nvs_gen_shadow_domain(g); - if (err != 0) { - goto unlock; - } + err = nvgpu_nvs_gen_shadow_domain(g); + if (err != 0) { + goto unlock; } err = nvgpu_nvs_worker_init(g); @@ -539,14 +544,14 @@ static u64 nvgpu_nvs_new_id(struct gk20a *g) } static int nvgpu_nvs_create_rl_domain_mem(struct gk20a *g, - struct nvgpu_nvs_domain *domain, const char *name) + struct nvgpu_nvs_domain *domain) { struct nvgpu_fifo *f = &g->fifo; u32 i, j; int err = 0; for (i = 0U; i < f->num_runlists; i++) { - domain->rl_domains[i] = nvgpu_runlist_domain_alloc(g, name); + domain->rl_domains[i] = nvgpu_runlist_domain_alloc(g, domain->id); if (domain->rl_domains[i] == NULL) { err = -ENOMEM; break; @@ -599,7 +604,7 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice, goto unlock; } - err = nvgpu_nvs_create_rl_domain_mem(g, nvgpu_dom, name); + err = nvgpu_nvs_create_rl_domain_mem(g, nvgpu_dom); if (err != 0) { nvs_domain_destroy(sched->sched, nvgpu_dom->parent); nvgpu_kfree(g, nvgpu_dom->rl_domains); @@ -622,7 +627,7 @@ unlock: return err; } -struct nvgpu_nvs_domain * +static struct nvgpu_nvs_domain * nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id) { struct nvgpu_nvs_scheduler *sched = g->scheduler; diff --git a/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_ga10b_fusa.c b/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_ga10b_fusa.c index 643582aaf..c2a8be3d5 100644 --- a/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_ga10b_fusa.c +++ b/drivers/gpu/nvgpu/hal/fifo/runlist_fifo_ga10b_fusa.c @@ -72,7 +72,7 @@ void ga10b_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist) runlist_submit_base_hi_ptr_hi_f(runlist_iova_hi)); } - rl_dbg(g, "Submitting domain[%s], mem=0x%16llx", runlist->domain->name, + rl_dbg(g, "Submitting domain[%llu], mem=0x%16llx", runlist->domain->domain_id, (u64)nvgpu_mem_get_addr(g, &runlist->domain->mem_hw->mem)); /* TODO offset in runlist support */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/runlist.h b/drivers/gpu/nvgpu/include/nvgpu/runlist.h index a90192f45..9007cb70f 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/runlist.h +++ b/drivers/gpu/nvgpu/include/nvgpu/runlist.h @@ -78,6 +78,7 @@ struct nvgpu_pbdma_info; #define NVGPU_INVALID_RUNLIST_ID U32_MAX #define SHADOW_DOMAIN_NAME "(shadow)" +#define SHADOW_DOMAIN_ID U64_MAX /* * Updates to this memory are still serialized by the runlist lock. @@ -122,7 +123,7 @@ struct nvgpu_runlist_domain { /** * Placeholder for metadata that will come in further patches. */ - char name[32]; + u64 domain_id; /** * All created domains are tracked in a list. * @@ -201,7 +202,7 @@ struct nvgpu_runlist { bool nvgpu_rl_domain_exists(struct gk20a *g, const char *name); struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g, - const char *name); + u64 domain_id); void nvgpu_runlist_domain_free(struct gk20a *g, struct nvgpu_runlist_domain *domain); void nvgpu_runlist_swap_mem(struct gk20a *g, struct nvgpu_runlist_domain *domain); @@ -210,7 +211,7 @@ void nvgpu_runlist_link_domain(struct nvgpu_runlist *runlist, void nvgpu_runlist_unlink_domain(struct nvgpu_runlist *runlist, struct nvgpu_runlist_domain *domain); struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id, - const char *name); + u64 domain_id); /** * @brief Schedule runlist domain