mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: track runlist domains in list
There will be multiple scheduling domains managed dynamically. Move from strictly one domain to a list of domains and still only one default domain in practice. This facilitates future changes on many domains. Jira NVGPU-6425 Change-Id: I6760c651be6c01791708740a821aa564d7a6b7b8 Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2621212 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
6885071c64
commit
c8fa7f57f6
@@ -660,6 +660,9 @@ static void free_rl_mem(struct gk20a *g, struct nvgpu_runlist_mem *mem)
|
||||
static void nvgpu_runlist_domain_free(struct gk20a *g,
|
||||
struct nvgpu_runlist_domain *domain)
|
||||
{
|
||||
/* added in nvgpu_runlist_domain_alloc() */
|
||||
nvgpu_list_del(&domain->domains_list);
|
||||
|
||||
free_rl_mem(g, domain->mem);
|
||||
domain->mem = NULL;
|
||||
free_rl_mem(g, domain->mem_hw);
|
||||
@@ -687,10 +690,16 @@ void nvgpu_runlist_cleanup_sw(struct gk20a *g)
|
||||
for (i = 0; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if (runlist->domain != NULL) {
|
||||
nvgpu_runlist_domain_free(g, runlist->domain);
|
||||
runlist->domain = NULL;
|
||||
while (!nvgpu_list_empty(&runlist->domains)) {
|
||||
struct nvgpu_runlist_domain *domain;
|
||||
|
||||
domain = nvgpu_list_first_entry(&runlist->domains,
|
||||
nvgpu_runlist_domain,
|
||||
domains_list);
|
||||
nvgpu_runlist_domain_free(g, domain);
|
||||
}
|
||||
/* this isn't an owning pointer, just reset */
|
||||
runlist->domain = NULL;
|
||||
|
||||
nvgpu_mutex_destroy(&runlist->runlist_lock);
|
||||
f->runlists[runlist->id] = NULL;
|
||||
@@ -844,15 +853,20 @@ static struct nvgpu_runlist_mem *init_rl_mem(struct gk20a *g, u32 runlist_size)
|
||||
return mem;
|
||||
}
|
||||
|
||||
static struct nvgpu_runlist_domain *nvgpu_init_rl_domain(struct gk20a *g, u32 runlist_size)
|
||||
static struct nvgpu_runlist_domain *nvgpu_runlist_domain_alloc(struct gk20a *g,
|
||||
struct nvgpu_runlist *runlist, const char *name)
|
||||
{
|
||||
struct nvgpu_runlist_domain *domain = nvgpu_kzalloc(g, sizeof(*domain));
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
size_t runlist_size = (size_t)f->runlist_entry_size *
|
||||
(size_t)f->num_runlist_entries;
|
||||
|
||||
if (domain == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
(void)strncpy(domain->name, name, sizeof(domain->name) - 1U);
|
||||
|
||||
domain->mem = init_rl_mem(g, runlist_size);
|
||||
if (domain->mem == NULL) {
|
||||
goto free_domain;
|
||||
@@ -877,6 +891,9 @@ static struct nvgpu_runlist_domain *nvgpu_init_rl_domain(struct gk20a *g, u32 ru
|
||||
goto free_active_channels;
|
||||
}
|
||||
|
||||
/* deleted in nvgpu_runlist_domain_free() */
|
||||
nvgpu_list_add_tail(&domain->domains_list, &runlist->domains);
|
||||
|
||||
return domain;
|
||||
free_active_channels:
|
||||
nvgpu_kfree(g, domain->active_channels);
|
||||
@@ -889,14 +906,29 @@ free_domain:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id,
|
||||
const char *name)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist *runlist = f->runlists[runlist_id];
|
||||
struct nvgpu_runlist_domain *domain;
|
||||
|
||||
nvgpu_list_for_each_entry(domain, &runlist->domains, nvgpu_runlist_domain,
|
||||
domains_list) {
|
||||
if (strcmp(domain->name, name) == 0) {
|
||||
return domain;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_runlist *runlist;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
unsigned int runlist_id;
|
||||
size_t runlist_size;
|
||||
u32 i;
|
||||
int err = 0;
|
||||
|
||||
rl_dbg(g, "Building active runlist map.");
|
||||
|
||||
@@ -907,6 +939,8 @@ static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
*/
|
||||
i = 0U;
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
|
||||
/* skip inactive runlist */
|
||||
rl_dbg(g, " Skipping invalid runlist: %d", runlist_id);
|
||||
@@ -926,26 +960,15 @@ static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
rl_dbg(g, " RL entries: %d", f->num_runlist_entries);
|
||||
rl_dbg(g, " RL size %zu", runlist_size);
|
||||
|
||||
runlist->domain = nvgpu_init_rl_domain(g, runlist_size);
|
||||
if (runlist->domain == NULL) {
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
err = -ENOMEM;
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
|
||||
nvgpu_init_list_node(&runlist->domains);
|
||||
nvgpu_mutex_init(&runlist->runlist_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
clean_up_runlist:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
u32 num_runlists = 0U;
|
||||
u32 num_runlists = 0U, i;
|
||||
unsigned int runlist_id;
|
||||
int err = 0;
|
||||
|
||||
@@ -984,11 +1007,20 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
rl_dbg(g, " RL entry size: %u bytes", f->runlist_entry_size);
|
||||
rl_dbg(g, " Max RL entries: %u", f->num_runlist_entries);
|
||||
|
||||
err = nvgpu_init_active_runlist_mapping(g);
|
||||
if (err != 0) {
|
||||
goto clean_up_runlist;
|
||||
nvgpu_init_active_runlist_mapping(g);
|
||||
|
||||
for (i = 0; i < g->fifo.num_runlists; i++) {
|
||||
struct nvgpu_runlist *runlist = &f->active_runlists[i];
|
||||
|
||||
runlist->domain = nvgpu_runlist_domain_alloc(g, runlist, "(default)");
|
||||
if (runlist->domain == NULL) {
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
err = -ENOMEM;
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
g->ops.runlist.init_enginfo(g, f);
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -93,7 +93,42 @@ struct nvgpu_runlist_mem {
|
||||
u32 count;
|
||||
};
|
||||
|
||||
/*
|
||||
* Data interface to be owned by another SW unit. The heart of the domain
|
||||
* scheduler can be running outside nvgpu and as such cannot own these.
|
||||
* However, nvgpu needs to do some bookkeeping for the domain scheduler anyway;
|
||||
* this will be owned by that layer, and will be only presented to the runlist
|
||||
* HW for submitting only. The contents will be filled by another SW unit
|
||||
* responsible for only that.
|
||||
*
|
||||
* For now, we live in a transitional limbo where the domain scheduler does not
|
||||
* exist yet in its final form but managing separate runlist domains will help
|
||||
* bring it to existence.
|
||||
*
|
||||
* Intent of final interfaces ("a -> b": a uses b):
|
||||
*
|
||||
* nvgpu domain scheduler -> runlist domain
|
||||
* channels -> runlist domain
|
||||
* TSGs -> runlist domain
|
||||
* nvgpu domain scheduler -> core scheduler
|
||||
* core scheduler -> runlist HW
|
||||
* fault reset/recovery -> core scheduler
|
||||
* fault reset/recovery -> runlist HW
|
||||
*
|
||||
* Memory ownership of a runlist domain will be in the nvgpu domain scheduler.
|
||||
*/
|
||||
struct nvgpu_runlist_domain {
|
||||
/**
|
||||
* Placeholder for metadata that will come in further patches.
|
||||
*/
|
||||
char name[32];
|
||||
/**
|
||||
* All created domains are tracked in a list.
|
||||
*
|
||||
* The list head is nvgpu_runlist::domains
|
||||
*/
|
||||
struct nvgpu_list_node domains_list;
|
||||
|
||||
/** Bitmap of active channels in the runlist domain. One bit per chid. */
|
||||
unsigned long *active_channels;
|
||||
/** Bitmap of active TSGs in the runlist domain. One bit per tsgid. */
|
||||
@@ -110,8 +145,18 @@ struct nvgpu_runlist {
|
||||
/** The HW has some designated RL IDs that are bound to engines. */
|
||||
u32 id;
|
||||
|
||||
/* The default domain is the only one that currently exists. */
|
||||
/* The currently active scheduling domain. */
|
||||
struct nvgpu_runlist_domain *domain;
|
||||
/*
|
||||
* All scheduling domains of this RL, see nvgpu_runlist_domain::domain_node.
|
||||
*
|
||||
* Design note: the runlist hardware unit should not own the actual
|
||||
* domain memory; this arrangement is temporary to aid in transition
|
||||
* for a domain scheduler where a scheduling domain will own
|
||||
* domain-related runlist data (nvgpu_runlist_domain). See the
|
||||
* documentation of nvgpu_runlist_domain.
|
||||
*/
|
||||
struct nvgpu_list_node domains;
|
||||
|
||||
/** Bitmask of PBDMAs supported for this runlist. */
|
||||
u32 pbdma_bitmask;
|
||||
@@ -138,6 +183,16 @@ struct nvgpu_runlist {
|
||||
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
};
|
||||
|
||||
struct nvgpu_runlist_domain *nvgpu_rl_domain_get(struct gk20a *g, u32 runlist_id,
|
||||
const char *name);
|
||||
|
||||
static inline struct nvgpu_runlist_domain *
|
||||
nvgpu_runlist_domain_from_domains_list(struct nvgpu_list_node *node)
|
||||
{
|
||||
return (struct nvgpu_runlist_domain *)
|
||||
((uintptr_t)node - offsetof(struct nvgpu_runlist_domain, domains_list));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Rebuild runlist
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user