gpu: nvgpu: modify rl_domain member

KMD needs to send the domain id and GPU_VA corresponding
to the struct runlist_domains to GSP. In the current
implementation, struct nvgpu_runlist_domain contains
the domain name instead of domain id. This requires
an additional search by name everytime an update
is needed to be submitted to the GSP.

Modify the struct nvgpu_runlist_domain to store domain id
instead of domain name. This simplifies the flow and avoids
unnecessary search.

Removed the conditional check for existence of shadow domain
as its a deadcode. Shadow Domain is not searchable in the list
of domains inside the struct nvgpu_runlist.

Jira NVGPU-8610

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: I0d67cfa93d89186240290e933aa750702b14f4f0
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2744890
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2022-07-14 09:52:45 +05:30
committed by mobile promotions
parent 00fd453830
commit 1bdca92c50
5 changed files with 44 additions and 38 deletions

View File

@@ -440,7 +440,7 @@ void nvgpu_runlist_swap_mem(struct gk20a *g, struct nvgpu_runlist_domain *domain
* mem becomes the previously scheduled buffer and it can be modified once * mem becomes the previously scheduled buffer and it can be modified once
* the runlist lock is released. * the runlist lock is released.
*/ */
rl_dbg(g, "Swapping mem for rl domain[%s]", domain->name); rl_dbg(g, "Swapping mem for rl domain[%llu]", domain->domain_id);
mem_tmp = domain->mem; mem_tmp = domain->mem;
domain->mem = domain->mem_hw; domain->mem = domain->mem_hw;
@@ -452,8 +452,8 @@ static int nvgpu_runlist_domain_actual_submit(struct gk20a *g, struct nvgpu_runl
{ {
int ret = 0; int ret = 0;
rl_dbg(g, "Runlist[%u]: submitting domain %s", rl_dbg(g, "Runlist[%u]: submitting domain[%llu]",
rl->id, rl->domain->name); rl->id, rl->domain->domain_id);
if (swap_buffer) { if (swap_buffer) {
nvgpu_runlist_swap_mem(g, rl->domain); nvgpu_runlist_swap_mem(g, rl->domain);
@@ -483,8 +483,8 @@ static int nvgpu_runlist_update_mem_locked(struct gk20a *g, struct nvgpu_runlist
int ret = 0; int ret = 0;
bool add_entries; bool add_entries;
rl_dbg(g, "updating runlist[%u], domain[%s], channel = [%u], op = %s", rl_dbg(g, "updating runlist[%u], domain[%llu], channel = [%u], op = %s",
rl->id, domain->name, rl->id, domain->domain_id,
ch == NULL ? NVGPU_INVALID_CHANNEL_ID : ch->chid, ch == NULL ? NVGPU_INVALID_CHANNEL_ID : ch->chid,
add ? "add" : "remove"); add ? "add" : "remove");
@@ -673,8 +673,8 @@ static int runlist_submit_powered(struct gk20a *g, struct nvgpu_runlist *runlist
runlist->domain = next_domain; runlist->domain = next_domain;
rl_dbg(g, "Runlist[%u]: switching to domain %s", rl_dbg(g, "Runlist[%u]: switching to domain %llu",
runlist->id, next_domain->name); runlist->id, next_domain->domain_id);
err = nvgpu_runlist_domain_actual_submit(g, runlist, swap_buffer, wait_for_finish); err = nvgpu_runlist_domain_actual_submit(g, runlist, swap_buffer, wait_for_finish);
@@ -686,8 +686,8 @@ static int runlist_select_and_submit(struct gk20a *g, struct nvgpu_runlist *runl
{ {
int err; int err;
rl_dbg(g, "Runlist[%u]: switching to domain %s", rl_dbg(g, "Runlist[%u]: switching to domain %llu",
runlist->id, next_domain->name); runlist->id, next_domain->domain_id);
runlist->domain = next_domain; runlist->domain = next_domain;
@@ -783,7 +783,7 @@ void nvgpu_runlist_tick(struct gk20a *g, struct nvgpu_runlist_domain **rl_domain
runlist = &f->active_runlists[i]; runlist = &f->active_runlists[i];
err = runlist_switch_domain_and_submit(g, runlist, rl_domain[i]); err = runlist_switch_domain_and_submit(g, runlist, rl_domain[i]);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Failed to schedule domain [%s]", rl_domain[i]->name); nvgpu_err(g, "Failed to schedule domain [%llu]", rl_domain[i]->domain_id);
} }
} }
} }
@@ -1114,7 +1114,7 @@ void nvgpu_runlist_link_domain(struct nvgpu_runlist *runlist,
} }
struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g, struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g,
const char *name) u64 domain_id)
{ {
struct nvgpu_runlist_domain *domain = nvgpu_kzalloc(g, sizeof(*domain)); struct nvgpu_runlist_domain *domain = nvgpu_kzalloc(g, sizeof(*domain));
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -1125,7 +1125,7 @@ struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g,
return NULL; return NULL;
} }
(void)strncpy(domain->name, name, sizeof(domain->name) - 1U); domain->domain_id = domain_id;
domain->mem = init_rl_mem(g, (u32)runlist_size); domain->mem = init_rl_mem(g, (u32)runlist_size);
if (domain->mem == NULL) { if (domain->mem == NULL) {
@@ -1166,7 +1166,7 @@ free_domain:
} }
struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id, struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id,
const char *name) u64 domain_id)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist *runlist = f->runlists[runlist_id]; struct nvgpu_runlist *runlist = f->runlists[runlist_id];
@@ -1174,7 +1174,7 @@ struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id
nvgpu_list_for_each_entry(domain, &runlist->user_rl_domains, nvgpu_runlist_domain, nvgpu_list_for_each_entry(domain, &runlist->user_rl_domains, nvgpu_runlist_domain,
domains_list) { domains_list) {
if (strcmp(domain->name, name) == 0) { if (domain->domain_id == domain_id) {
return domain; return domain;
} }
} }
@@ -1232,7 +1232,7 @@ static int nvgpu_runlist_alloc_shadow_rl_domain(struct gk20a *g)
for (i = 0; i < g->fifo.num_runlists; i++) { for (i = 0; i < g->fifo.num_runlists; i++) {
struct nvgpu_runlist *runlist = &f->active_runlists[i]; struct nvgpu_runlist *runlist = &f->active_runlists[i];
runlist->shadow_rl_domain = nvgpu_runlist_domain_alloc(g, SHADOW_DOMAIN_NAME); runlist->shadow_rl_domain = nvgpu_runlist_domain_alloc(g, SHADOW_DOMAIN_ID);
if (runlist->shadow_rl_domain == NULL) { if (runlist->shadow_rl_domain == NULL) {
nvgpu_err(g, "memory allocation failed"); nvgpu_err(g, "memory allocation failed");
/* /*
@@ -1242,8 +1242,8 @@ static int nvgpu_runlist_alloc_shadow_rl_domain(struct gk20a *g)
return -ENOMEM; return -ENOMEM;
} }
rl_dbg(g, "Allocated default domain for runlist[%u]: %s", runlist->id, rl_dbg(g, "Allocated default domain for runlist[%u]: %llu", runlist->id,
runlist->shadow_rl_domain->name); runlist->shadow_rl_domain->domain_id);
runlist->domain = runlist->shadow_rl_domain; runlist->domain = runlist->shadow_rl_domain;
} }

View File

@@ -112,13 +112,13 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
tsg->runlist = ch->runlist; tsg->runlist = ch->runlist;
if (tsg->rl_domain != NULL) { if (tsg->rl_domain != NULL) {
/* /*
* The rl domain identifier is stashed in tsg->rl_domain->name * The rl domain identifier is stashed in tsg->rl_domain->domain_id
* when the tsg is bound to a domain, but at that point there * when the tsg is bound to a domain, but at that point there
* are no channels yet to describe which runlist id should be * are no channels yet to describe which runlist id should be
* used. Now we know. * used. Now we know.
*/ */
tsg->rl_domain = nvgpu_rl_domain_get(g, tsg->runlist->id, tsg->rl_domain = nvgpu_rl_domain_get(g, tsg->runlist->id,
tsg->rl_domain->name); tsg->rl_domain->domain_id);
WARN_ON(tsg->rl_domain == NULL); WARN_ON(tsg->rl_domain == NULL);
} }
} else { } else {
@@ -175,9 +175,9 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, struct nvgpu_nvs_domain *nnvs_d
* The domain ptr will get updated with the right id once the runlist * The domain ptr will get updated with the right id once the runlist
* gets specified based on the first channel. * gets specified based on the first channel.
*/ */
rl_domain = nvgpu_rl_domain_get(g, 0, name); rl_domain = nvgpu_rl_domain_get(g, 0, nnvs_domain->id);
if (rl_domain == NULL) { if (rl_domain == NULL) {
nvgpu_err(g, "rl domain not found (%s)", name); nvgpu_err(g, "rl domain not found (%s) having Id[%llu]", name, nnvs_domain->id);
/* /*
* This shouldn't happen because the nvs domain guarantees RL domains. * This shouldn't happen because the nvs domain guarantees RL domains.
* *

View File

@@ -57,6 +57,10 @@ struct nvgpu_nvs_worker_item {
nvgpu_atomic_t state; nvgpu_atomic_t state;
}; };
static struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id);
static inline struct nvgpu_nvs_worker_item * static inline struct nvgpu_nvs_worker_item *
nvgpu_nvs_worker_item_from_worker_item(struct nvgpu_list_node *node) nvgpu_nvs_worker_item_from_worker_item(struct nvgpu_list_node *node)
{ {
@@ -131,6 +135,7 @@ static void nvgpu_nvs_worker_wakeup_process_item(struct nvgpu_list_node *work_it
struct gk20a *g = work->g; struct gk20a *g = work->g;
int ret = 0; int ret = 0;
struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvgpu_nvs_scheduler *sched = g->scheduler;
struct nvgpu_nvs_domain *nvgpu_nvs_domain;
struct nvs_domain *nvs_domain; struct nvs_domain *nvs_domain;
struct nvgpu_runlist *runlist = work->rl; struct nvgpu_runlist *runlist = work->rl;
struct nvgpu_runlist_domain *rl_domain = work->rl_domain; struct nvgpu_runlist_domain *rl_domain = work->rl_domain;
@@ -140,14 +145,16 @@ static void nvgpu_nvs_worker_wakeup_process_item(struct nvgpu_list_node *work_it
if (rl_domain == NULL) { if (rl_domain == NULL) {
nvs_domain = sched->shadow_domain->parent; nvs_domain = sched->shadow_domain->parent;
rl_domain = runlist->shadow_rl_domain; rl_domain = runlist->shadow_rl_domain;
} else if (strcmp(rl_domain->name, SHADOW_DOMAIN_NAME) == 0) { } else if (rl_domain->domain_id == SHADOW_DOMAIN_ID) {
nvs_domain = sched->shadow_domain->parent; nvs_domain = sched->shadow_domain->parent;
} else { } else {
nvs_domain = nvs_domain_by_name(sched->sched, rl_domain->name); nvgpu_nvs_domain = nvgpu_nvs_domain_by_id_locked(g, rl_domain->domain_id);
if (nvs_domain == NULL) { if (nvgpu_nvs_domain == NULL) {
nvgpu_err(g, "Unable to find domain[%s]", rl_domain->name); nvgpu_err(g, "Unable to find domain[%llu]", rl_domain->domain_id);
ret = -EINVAL; ret = -EINVAL;
goto done; goto done;
} else {
nvs_domain = nvgpu_nvs_domain->parent;
} }
} }
@@ -499,12 +506,10 @@ int nvgpu_nvs_open(struct gk20a *g)
goto unlock; goto unlock;
} }
if (nvgpu_rl_domain_get(g, 0, SHADOW_DOMAIN_NAME) == NULL) {
err = nvgpu_nvs_gen_shadow_domain(g); err = nvgpu_nvs_gen_shadow_domain(g);
if (err != 0) { if (err != 0) {
goto unlock; goto unlock;
} }
}
err = nvgpu_nvs_worker_init(g); err = nvgpu_nvs_worker_init(g);
if (err != 0) { if (err != 0) {
@@ -539,14 +544,14 @@ static u64 nvgpu_nvs_new_id(struct gk20a *g)
} }
static int nvgpu_nvs_create_rl_domain_mem(struct gk20a *g, static int nvgpu_nvs_create_rl_domain_mem(struct gk20a *g,
struct nvgpu_nvs_domain *domain, const char *name) struct nvgpu_nvs_domain *domain)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
u32 i, j; u32 i, j;
int err = 0; int err = 0;
for (i = 0U; i < f->num_runlists; i++) { for (i = 0U; i < f->num_runlists; i++) {
domain->rl_domains[i] = nvgpu_runlist_domain_alloc(g, name); domain->rl_domains[i] = nvgpu_runlist_domain_alloc(g, domain->id);
if (domain->rl_domains[i] == NULL) { if (domain->rl_domains[i] == NULL) {
err = -ENOMEM; err = -ENOMEM;
break; break;
@@ -599,7 +604,7 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice,
goto unlock; goto unlock;
} }
err = nvgpu_nvs_create_rl_domain_mem(g, nvgpu_dom, name); err = nvgpu_nvs_create_rl_domain_mem(g, nvgpu_dom);
if (err != 0) { if (err != 0) {
nvs_domain_destroy(sched->sched, nvgpu_dom->parent); nvs_domain_destroy(sched->sched, nvgpu_dom->parent);
nvgpu_kfree(g, nvgpu_dom->rl_domains); nvgpu_kfree(g, nvgpu_dom->rl_domains);
@@ -622,7 +627,7 @@ unlock:
return err; return err;
} }
struct nvgpu_nvs_domain * static struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id) nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id)
{ {
struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvgpu_nvs_scheduler *sched = g->scheduler;

View File

@@ -72,7 +72,7 @@ void ga10b_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist)
runlist_submit_base_hi_ptr_hi_f(runlist_iova_hi)); runlist_submit_base_hi_ptr_hi_f(runlist_iova_hi));
} }
rl_dbg(g, "Submitting domain[%s], mem=0x%16llx", runlist->domain->name, rl_dbg(g, "Submitting domain[%llu], mem=0x%16llx", runlist->domain->domain_id,
(u64)nvgpu_mem_get_addr(g, &runlist->domain->mem_hw->mem)); (u64)nvgpu_mem_get_addr(g, &runlist->domain->mem_hw->mem));
/* TODO offset in runlist support */ /* TODO offset in runlist support */

View File

@@ -78,6 +78,7 @@ struct nvgpu_pbdma_info;
#define NVGPU_INVALID_RUNLIST_ID U32_MAX #define NVGPU_INVALID_RUNLIST_ID U32_MAX
#define SHADOW_DOMAIN_NAME "(shadow)" #define SHADOW_DOMAIN_NAME "(shadow)"
#define SHADOW_DOMAIN_ID U64_MAX
/* /*
* Updates to this memory are still serialized by the runlist lock. * Updates to this memory are still serialized by the runlist lock.
@@ -122,7 +123,7 @@ struct nvgpu_runlist_domain {
/** /**
* Placeholder for metadata that will come in further patches. * Placeholder for metadata that will come in further patches.
*/ */
char name[32]; u64 domain_id;
/** /**
* All created domains are tracked in a list. * All created domains are tracked in a list.
* *
@@ -201,7 +202,7 @@ struct nvgpu_runlist {
bool nvgpu_rl_domain_exists(struct gk20a *g, const char *name); bool nvgpu_rl_domain_exists(struct gk20a *g, const char *name);
struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g, struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g,
const char *name); u64 domain_id);
void nvgpu_runlist_domain_free(struct gk20a *g, void nvgpu_runlist_domain_free(struct gk20a *g,
struct nvgpu_runlist_domain *domain); struct nvgpu_runlist_domain *domain);
void nvgpu_runlist_swap_mem(struct gk20a *g, struct nvgpu_runlist_domain *domain); void nvgpu_runlist_swap_mem(struct gk20a *g, struct nvgpu_runlist_domain *domain);
@@ -210,7 +211,7 @@ void nvgpu_runlist_link_domain(struct nvgpu_runlist *runlist,
void nvgpu_runlist_unlink_domain(struct nvgpu_runlist *runlist, void nvgpu_runlist_unlink_domain(struct nvgpu_runlist *runlist,
struct nvgpu_runlist_domain *domain); struct nvgpu_runlist_domain *domain);
struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id, struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id,
const char *name); u64 domain_id);
/** /**
* @brief Schedule runlist domain * @brief Schedule runlist domain