gpu: nvgpu: improve nvs uapi

- Make the domain scheduler timeslice type nanoseconds to future proof
  the interface
- Return -ENOSYS from ioctls if the nvs code is not initialized
- Return the number of domains also when user supplied array is present
- Use domain id instead of name for TSG binding
- Improve documentation in the uapi headers
- Verify that reserved fields are zeroed
- Extend some internal logging
- Release the sched mutex on alloc error
- Add file mode checks in the nvs ioctls. The create and remove ioctls
  require writable file permissions, while the query does not; this
  allows filesystem based access control on domain management on the
  single dev node.

Jira NVGPU-6788

Change-Id: I668eb5972a0ed1073e84a4ae30e3069bf0b59e16
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2639017
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Konsta Hölttä
2021-12-08 16:01:09 +02:00
committed by mobile promotions
parent b92e8530fc
commit 55afe1ff4c
11 changed files with 234 additions and 107 deletions

View File

@@ -572,6 +572,8 @@ static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist
gk20a_busy_noresume(g); gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) { if (nvgpu_is_powered_off(g)) {
rl_dbg(g, "Runlist[%u]: power is off, skip submit",
runlist->id);
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
return; return;
} }
@@ -580,7 +582,7 @@ static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to hold power for runlist switch"); nvgpu_err(g, "failed to hold power for runlist submit");
/* /*
* probably shutting down though, so don't bother propagating * probably shutting down though, so don't bother propagating
* the error. Power is already on when the domain scheduler is * the error. Power is already on when the domain scheduler is

View File

@@ -155,7 +155,7 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
} }
#ifdef CONFIG_NVS_PRESENT #ifdef CONFIG_NVS_PRESENT
int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name) int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, u64 domain_id)
{ {
struct nvgpu_runlist_domain *rl_domain; struct nvgpu_runlist_domain *rl_domain;
struct nvgpu_nvs_domain *nvs_domain; struct nvgpu_nvs_domain *nvs_domain;
@@ -166,8 +166,9 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name)
return -EINVAL; return -EINVAL;
} }
nvs_domain = nvgpu_nvs_domain_get(g, domain_name); nvs_domain = nvgpu_nvs_domain_by_id(g, domain_id);
if (nvs_domain == NULL) { if (nvs_domain == NULL) {
nvgpu_err(g, "nvs domain not found (%llu)", domain_id);
return -ENOENT; return -ENOENT;
} }
@@ -175,8 +176,9 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name)
* The domain ptr will get updated with the right id once the runlist * The domain ptr will get updated with the right id once the runlist
* gets specified based on the first channel. * gets specified based on the first channel.
*/ */
rl_domain = nvgpu_rl_domain_get(g, 0, domain_name); rl_domain = nvgpu_rl_domain_get(g, 0, nvs_domain->parent->name);
if (rl_domain == NULL) { if (rl_domain == NULL) {
nvgpu_err(g, "rl domain not found (%s)", nvs_domain->parent->name);
/* /*
* This shouldn't happen because the nvs domain guarantees RL domains. * This shouldn't happen because the nvs domain guarantees RL domains.
* *
@@ -858,7 +860,7 @@ int nvgpu_tsg_open_common(struct gk20a *g, struct nvgpu_tsg *tsg, pid_t pid)
* gets specified based on the first channel. * gets specified based on the first channel.
*/ */
tsg->rl_domain = nvgpu_rl_domain_get(g, 0, "(default)"); tsg->rl_domain = nvgpu_rl_domain_get(g, 0, "(default)");
tsg->nvs_domain = nvgpu_nvs_domain_get(g, "(default)"); tsg->nvs_domain = nvgpu_nvs_domain_by_name(g, "(default)");
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
#endif #endif

View File

@@ -88,12 +88,12 @@ static void nvgpu_nvs_worker_wakeup_process_item(
/* placeholder; never called yet */ /* placeholder; never called yet */
} }
static u32 nvgpu_nvs_tick(struct gk20a *g) static u64 nvgpu_nvs_tick(struct gk20a *g)
{ {
struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvgpu_nvs_scheduler *sched = g->scheduler;
struct nvgpu_nvs_domain *domain; struct nvgpu_nvs_domain *domain;
struct nvs_domain *nvs_domain; struct nvs_domain *nvs_domain;
u32 timeslice; u64 timeslice;
nvs_dbg(g, "nvs tick"); nvs_dbg(g, "nvs tick");
@@ -104,14 +104,14 @@ static u32 nvgpu_nvs_tick(struct gk20a *g)
if (domain == NULL) { if (domain == NULL) {
/* nothing to schedule, TODO wait for an event instead */ /* nothing to schedule, TODO wait for an event instead */
nvgpu_mutex_release(&g->sched_mutex); nvgpu_mutex_release(&g->sched_mutex);
return 100000; return 100 * NSEC_PER_MSEC;
} }
nvs_domain = domain->parent->next; nvs_domain = domain->parent->next;
if (nvs_domain == NULL) { if (nvs_domain == NULL) {
nvs_domain = g->scheduler->sched->domain_list->domains; nvs_domain = g->scheduler->sched->domain_list->domains;
} }
timeslice = nvs_domain->timeslice_us; timeslice = nvs_domain->timeslice_ns;
nvgpu_runlist_tick(g); nvgpu_runlist_tick(g);
sched->active_domain = nvs_domain->priv; sched->active_domain = nvs_domain->priv;
@@ -128,10 +128,11 @@ static void nvgpu_nvs_worker_wakeup_post_process(struct nvgpu_worker *worker)
nvgpu_nvs_worker_from_worker(worker); nvgpu_nvs_worker_from_worker(worker);
if (nvgpu_timeout_peek_expired(&nvs_worker->timeout)) { if (nvgpu_timeout_peek_expired(&nvs_worker->timeout)) {
u32 next_timeout_us = nvgpu_nvs_tick(g); u32 next_timeout_ns = nvgpu_nvs_tick(g);
if (next_timeout_us != 0U) { if (next_timeout_ns != 0U) {
nvs_worker->current_timeout = (next_timeout_us + 999U) / 1000U; nvs_worker->current_timeout =
(next_timeout_ns + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC;
} }
nvgpu_timeout_init_cpu_timer(g, &nvs_worker->timeout, nvgpu_timeout_init_cpu_timer(g, &nvs_worker->timeout,
@@ -175,7 +176,10 @@ int nvgpu_nvs_init(struct gk20a *g)
} }
if (nvgpu_rl_domain_get(g, 0, "(default)") == NULL) { if (nvgpu_rl_domain_get(g, 0, "(default)") == NULL) {
int err = nvgpu_nvs_add_domain(g, "(default)", 1000*1000, 10*1000, &domain); int err = nvgpu_nvs_add_domain(g, "(default)",
100U * NSEC_PER_MSEC,
0U,
&domain);
if (err != 0) { if (err != 0) {
return err; return err;
} }
@@ -284,8 +288,8 @@ static u64 nvgpu_nvs_new_id(struct gk20a *g)
return nvgpu_atomic64_inc_return(&g->scheduler->id_counter); return nvgpu_atomic64_inc_return(&g->scheduler->id_counter);
} }
int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice, int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice,
u32 preempt_grace, struct nvgpu_nvs_domain **pdomain) u64 preempt_grace, struct nvgpu_nvs_domain **pdomain)
{ {
int err = 0; int err = 0;
struct nvs_domain *nvs_dom; struct nvs_domain *nvs_dom;
@@ -313,6 +317,7 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice,
timeslice, preempt_grace, nvgpu_dom); timeslice, preempt_grace, nvgpu_dom);
if (nvs_dom == NULL) { if (nvs_dom == NULL) {
nvs_dbg(g, "failed to create nvs domain for %s", name);
nvgpu_kfree(g, nvgpu_dom); nvgpu_kfree(g, nvgpu_dom);
err = -ENOMEM; err = -ENOMEM;
goto unlock; goto unlock;
@@ -320,9 +325,10 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice,
err = nvgpu_rl_domain_alloc(g, name); err = nvgpu_rl_domain_alloc(g, name);
if (err != 0) { if (err != 0) {
nvs_dbg(g, "failed to alloc rl domain for %s", name);
nvs_domain_destroy(g->scheduler->sched, nvs_dom); nvs_domain_destroy(g->scheduler->sched, nvs_dom);
nvgpu_kfree(g, nvgpu_dom); nvgpu_kfree(g, nvgpu_dom);
return err; goto unlock;
} }
@@ -339,14 +345,15 @@ unlock:
} }
struct nvgpu_nvs_domain * struct nvgpu_nvs_domain *
nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id) nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id)
{ {
struct nvgpu_nvs_scheduler *sched = g->scheduler;
struct nvs_domain *nvs_dom; struct nvs_domain *nvs_dom;
nvs_domain_for_each(sched, nvs_dom) { nvs_domain_for_each(sched->sched, nvs_dom) {
struct nvgpu_nvs_domain *nvgpu_dom = nvs_dom->priv; struct nvgpu_nvs_domain *nvgpu_dom = nvs_dom->priv;
if (nvgpu_dom->id == dom_id) { if (nvgpu_dom->id == domain_id) {
return nvgpu_dom; return nvgpu_dom;
} }
} }
@@ -355,7 +362,26 @@ nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id)
} }
struct nvgpu_nvs_domain * struct nvgpu_nvs_domain *
nvgpu_nvs_domain_get(struct gk20a *g, const char *name) nvgpu_nvs_domain_by_id(struct gk20a *g, u64 domain_id)
{
struct nvgpu_nvs_domain *dom = NULL;
nvgpu_mutex_acquire(&g->sched_mutex);
dom = nvgpu_nvs_domain_by_id_locked(g, domain_id);
if (dom == NULL) {
goto unlock;
}
dom->ref++;
unlock:
nvgpu_mutex_release(&g->sched_mutex);
return dom;
}
struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name)
{ {
struct nvs_domain *nvs_dom; struct nvs_domain *nvs_dom;
struct nvgpu_nvs_domain *dom = NULL; struct nvgpu_nvs_domain *dom = NULL;
@@ -395,7 +421,7 @@ int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id)
nvs_dbg(g, "Attempting to remove domain: %llu", dom_id); nvs_dbg(g, "Attempting to remove domain: %llu", dom_id);
nvgpu_dom = nvgpu_nvs_get_dom_by_id(g, s->sched, dom_id); nvgpu_dom = nvgpu_nvs_domain_by_id_locked(g, dom_id);
if (nvgpu_dom == NULL) { if (nvgpu_dom == NULL) {
nvs_dbg(g, "domain %llu does not exist!", dom_id); nvs_dbg(g, "domain %llu does not exist!", dom_id);
err = -ENOENT; err = -ENOENT;
@@ -473,7 +499,7 @@ void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain)
struct nvs_domain *nvs_dom = domain->parent; struct nvs_domain *nvs_dom = domain->parent;
nvs_dbg(g, "Domain %s", nvs_dom->name); nvs_dbg(g, "Domain %s", nvs_dom->name);
nvs_dbg(g, " timeslice: %u us", nvs_dom->timeslice_us); nvs_dbg(g, " timeslice: %llu ns", nvs_dom->timeslice_ns);
nvs_dbg(g, " preempt grace: %u us", nvs_dom->preempt_grace_us); nvs_dbg(g, " preempt grace: %llu ns", nvs_dom->preempt_grace_ns);
nvs_dbg(g, " domain ID: %llu", domain->id); nvs_dbg(g, " domain ID: %llu", domain->id);
} }

View File

@@ -88,14 +88,14 @@ int nvgpu_nvs_suspend(struct gk20a *g);
void nvgpu_nvs_get_log(struct gk20a *g, s64 *timestamp, const char **msg); void nvgpu_nvs_get_log(struct gk20a *g, s64 *timestamp, const char **msg);
u32 nvgpu_nvs_domain_count(struct gk20a *g); u32 nvgpu_nvs_domain_count(struct gk20a *g);
int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id); int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id);
int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice, int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice,
u32 preempt_grace, struct nvgpu_nvs_domain **pdomain); u64 preempt_grace, struct nvgpu_nvs_domain **pdomain);
struct nvgpu_nvs_domain *
nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id);
void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain); void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain);
struct nvgpu_nvs_domain * struct nvgpu_nvs_domain *
nvgpu_nvs_domain_get(struct gk20a *g, const char *name); nvgpu_nvs_domain_by_id(struct gk20a *g, u64 domain_id);
struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name);
void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom); void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom);
/* /*
* Debug wrapper for NVS code. * Debug wrapper for NVS code.
@@ -119,7 +119,7 @@ static inline int nvgpu_nvs_suspend(struct gk20a *g)
} }
static inline struct nvgpu_nvs_domain * static inline struct nvgpu_nvs_domain *
nvgpu_nvs_domain_get(struct gk20a *g, const char *name) nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name)
{ {
return NULL; return NULL;
} }

View File

@@ -381,7 +381,20 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg,
struct nvgpu_channel *ch); struct nvgpu_channel *ch);
#ifdef CONFIG_NVS_PRESENT #ifdef CONFIG_NVS_PRESENT
int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name); /**
* @brief Bind a TSG to a domain.
*
* @param tsg [in] Pointer to TSG struct.
* @param domain_id [in] Domain identifier.
*
* Make this TSG participate in the given domain, such that it can only be
* seen by runlist HW when the domain has been scheduled in.
*
* The TSG must have no channels at this point.
*
* @return 0 for successful bind, < 0 for failure.
*/
int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, u64 domain_id);
#endif #endif
/** /**

View File

@@ -55,10 +55,30 @@ static int nvgpu_nvs_ioctl_create_domain(
struct nvgpu_nvs_domain *domain = NULL; struct nvgpu_nvs_domain *domain = NULL;
int err; int err;
if (dom_args->reserved1 != 0) {
return -EINVAL;
}
if (dom_args->domain_params.reserved1 != 0) {
return -EINVAL;
}
if (dom_args->domain_params.reserved2 != 0) {
return -EINVAL;
}
if (dom_args->domain_params.dom_id != 0) {
return -EINVAL;
}
if (g->scheduler == NULL) {
return -ENOSYS;
}
err = nvgpu_nvs_add_domain(g, err = nvgpu_nvs_add_domain(g,
dom_args->domain_params.name, dom_args->domain_params.name,
dom_args->domain_params.timeslice_us, dom_args->domain_params.timeslice_ns,
dom_args->domain_params.preempt_grace_us, dom_args->domain_params.preempt_grace_ns,
&domain); &domain);
if (err != 0) { if (err != 0) {
return err; return err;
@@ -71,9 +91,18 @@ static int nvgpu_nvs_ioctl_create_domain(
return 0; return 0;
} }
static int nvgpu_nvs_ioctl_remove_domain(struct gk20a *g, u32 dom_id) static int nvgpu_nvs_ioctl_remove_domain(struct gk20a *g,
struct nvgpu_nvs_ioctl_remove_domain *args)
{ {
return nvgpu_nvs_del_domain(g, dom_id); if (args->reserved1 != 0) {
return -EINVAL;
}
if (g->scheduler == NULL) {
return -ENOSYS;
}
return nvgpu_nvs_del_domain(g, args->dom_id);
} }
static int nvgpu_nvs_ioctl_query_domains( static int nvgpu_nvs_ioctl_query_domains(
@@ -84,45 +113,61 @@ static int nvgpu_nvs_ioctl_query_domains(
struct nvgpu_nvs_domain *nvgpu_dom; struct nvgpu_nvs_domain *nvgpu_dom;
struct nvs_domain *nvs_dom; struct nvs_domain *nvs_dom;
u32 index; u32 index;
struct nvgpu_nvs_ioctl_domain *args_domains = (void __user *)(uintptr_t)args->domains; u32 user_capacity = args->nr;
struct nvgpu_nvs_ioctl_domain *args_domains =
(void __user *)(uintptr_t)args->domains;
/* First call variant: return number of domains. */ if (args->reserved0 != 0) {
if (args_domains == NULL) { return -EINVAL;
args->nr = nvgpu_nvs_domain_count(g);
if (copy_to_user(user_arg, args, sizeof(*args))) {
return -EFAULT;
}
nvs_dbg(g, "Nr domains: %u", args->nr);
return 0;
} }
/* if (args->reserved1 != 0) {
* Second call variant: populate the passed array with domain info. return -EINVAL;
*/ }
index = 0;
nvs_domain_for_each(g->scheduler->sched, nvs_dom) {
struct nvgpu_nvs_ioctl_domain dom;
nvgpu_dom = nvs_dom->priv; if (g->scheduler == NULL) {
return -ENOSYS;
}
nvs_dbg(g, "Copying dom #%u [%s] (%llu)", /* First call variant: return number of domains. */
index, nvs_dom->name, nvgpu_dom->id); args->nr = nvgpu_nvs_domain_count(g);
if (copy_to_user(user_arg, args, sizeof(*args))) {
return -EFAULT;
}
nvs_dbg(g, "Nr domains: %u", args->nr);
(void)memset(&dom, 0, sizeof(dom)); if (args_domains != NULL) {
/*
* Second call variant: populate the passed array with domain info.
*/
index = 0;
nvs_domain_for_each(g->scheduler->sched, nvs_dom) {
struct nvgpu_nvs_ioctl_domain dom;
if (index == user_capacity) {
break;
}
strncpy(dom.name, nvs_dom->name, sizeof(dom.name) - 1); nvgpu_dom = nvs_dom->priv;
dom.timeslice_us = nvs_dom->timeslice_us;
dom.preempt_grace_us = nvs_dom->preempt_grace_us;
dom.subscheduler = nvgpu_dom->subscheduler;
dom.dom_id = nvgpu_dom->id;
if (copy_to_user(&args_domains[index], nvs_dbg(g, "Copying dom #%u [%s] (%llu)",
&dom, sizeof(dom))) { index, nvs_dom->name, nvgpu_dom->id);
nvs_dbg(g, "Fault during copy of domain to userspace.");
return -EFAULT; (void)memset(&dom, 0, sizeof(dom));
strncpy(dom.name, nvs_dom->name, sizeof(dom.name) - 1);
dom.timeslice_ns = nvs_dom->timeslice_ns;
dom.preempt_grace_ns = nvs_dom->preempt_grace_ns;
dom.subscheduler = nvgpu_dom->subscheduler;
dom.dom_id = nvgpu_dom->id;
if (copy_to_user(&args_domains[index],
&dom, sizeof(dom))) {
nvs_dbg(g, "Fault during copy of domain to userspace.");
return -EFAULT;
}
index += 1;
} }
index += 1;
} }
return 0; return 0;
@@ -130,9 +175,10 @@ static int nvgpu_nvs_ioctl_query_domains(
long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{ {
u8 buf[NVGPU_NVS_IOCTL_MAX_ARG_SIZE] = { 0 };
bool writable = filp->f_mode & FMODE_WRITE;
struct gk20a *g = filp->private_data; struct gk20a *g = filp->private_data;
int err = 0; int err = 0;
u8 buf[NVGPU_NVS_IOCTL_MAX_ARG_SIZE] = { 0 };
nvs_dbg(g, "IOC_TYPE: %c", _IOC_TYPE(cmd)); nvs_dbg(g, "IOC_TYPE: %c", _IOC_TYPE(cmd));
nvs_dbg(g, "IOC_NR: %u", _IOC_NR(cmd)); nvs_dbg(g, "IOC_NR: %u", _IOC_NR(cmd));
@@ -159,17 +205,21 @@ long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct nvgpu_nvs_ioctl_create_domain *args = struct nvgpu_nvs_ioctl_create_domain *args =
(struct nvgpu_nvs_ioctl_create_domain *)buf; (struct nvgpu_nvs_ioctl_create_domain *)buf;
if (!writable) {
err = -EPERM;
goto done;
}
err = nvgpu_nvs_ioctl_create_domain(g, args); err = nvgpu_nvs_ioctl_create_domain(g, args);
if (err) if (err)
goto done; goto done;
/* /*
* Issue a remove domain IOCTL in case of fault when copying back to * Remove the domain in case of fault when copying back to
* userspace. * userspace to keep this ioctl atomic.
*/ */
if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd))) { if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd))) {
nvgpu_nvs_ioctl_remove_domain(g, nvgpu_nvs_del_domain(g, args->domain_params.dom_id);
args->domain_params.dom_id);
err = -EFAULT; err = -EFAULT;
goto done; goto done;
} }
@@ -177,19 +227,26 @@ long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break; break;
} }
case NVGPU_NVS_IOCTL_QUERY_DOMAINS: case NVGPU_NVS_IOCTL_QUERY_DOMAINS:
{
struct nvgpu_nvs_ioctl_query_domains *args =
(struct nvgpu_nvs_ioctl_query_domains *)buf;
err = nvgpu_nvs_ioctl_query_domains(g, err = nvgpu_nvs_ioctl_query_domains(g,
(void __user *)arg, (void __user *)arg,
(void *)buf); args);
if (err)
goto done;
break; break;
}
case NVGPU_NVS_IOCTL_REMOVE_DOMAIN: case NVGPU_NVS_IOCTL_REMOVE_DOMAIN:
{ {
struct nvgpu_nvs_ioctl_remove_domain *args = struct nvgpu_nvs_ioctl_remove_domain *args =
(struct nvgpu_nvs_ioctl_remove_domain *)buf; (struct nvgpu_nvs_ioctl_remove_domain *)buf;
err = nvgpu_nvs_ioctl_remove_domain(g, args->dom_id); if (!writable) {
err = -EPERM;
goto done;
}
err = nvgpu_nvs_ioctl_remove_domain(g, args);
break; break;
} }
default: default:

View File

@@ -181,7 +181,23 @@ static int nvgpu_tsg_bind_scheduling_domain(struct nvgpu_tsg *tsg,
struct nvgpu_tsg_bind_scheduling_domain_args *args) struct nvgpu_tsg_bind_scheduling_domain_args *args)
{ {
return nvgpu_tsg_bind_domain(tsg, args->domain_name); if (args->reserved[0] != 0) {
return -EINVAL;
}
if (args->reserved[1] != 0) {
return -EINVAL;
}
if (args->reserved[2] != 0) {
return -EINVAL;
}
if (tsg->g->scheduler == NULL) {
return -ENOSYS;
}
return nvgpu_tsg_bind_domain(tsg, args->domain_id);
} }
#endif #endif

View File

@@ -23,9 +23,9 @@
*/ */
struct nvgpu_nvs_ioctl_domain { struct nvgpu_nvs_ioctl_domain {
/* /*
* Human readable name for this domain. * Human readable null-terminated name for this domain.
*/ */
char name[32]; char name[32];
/* /*
* Scheduling parameters: specify how long this domain should be scheduled * Scheduling parameters: specify how long this domain should be scheduled
@@ -33,15 +33,15 @@ struct nvgpu_nvs_ioctl_domain {
* preempting. A value of zero is treated as an infinite timeslice or an * preempting. A value of zero is treated as an infinite timeslice or an
* infinite grace period, respectively. * infinite grace period, respectively.
*/ */
__u32 timeslice_us; __u64 timeslice_ns;
__u32 preempt_grace_us; __u64 preempt_grace_ns;
/* /*
* Pick which subscheduler to use. These will be implemented by the kernel * Pick which subscheduler to use. These will be implemented by the kernel
* as needed. There'll always be at least one, which is the host HW built in * as needed. There'll always be at least one, which is the host HW built in
* round-robin scheduler. * round-robin scheduler.
*/ */
__u32 subscheduler; __u32 subscheduler;
/* /*
* GPU host hardware round-robin. * GPU host hardware round-robin.
@@ -50,19 +50,21 @@ struct nvgpu_nvs_ioctl_domain {
/* /*
* Populated by the IOCTL when created: unique identifier. User space * Populated by the IOCTL when created: unique identifier. User space
* should never set this variable. * must set this to 0.
*/ */
__u64 dom_id; __u64 dom_id;
__u64 reserved1; /* Must be 0. */
__u64 reserved2; __u64 reserved1;
/* Must be 0. */
__u64 reserved2;
}; };
/** /**
* NVGPU_NVS_IOCTL_CREATE_DOMAIN * NVGPU_NVS_IOCTL_CREATE_DOMAIN
* *
* Create a domain - essentially a group of GPU contexts. Applications * Create a domain - essentially a group of GPU contexts. Applications
* cacan be bound into this domain on request for each TSG. * can be bound into this domain on request for each TSG.
* *
* The domain ID is returned in dom_id; this id is _not_ secure. The * The domain ID is returned in dom_id; this id is _not_ secure. The
* nvsched device needs to have restricted permissions such that only a * nvsched device needs to have restricted permissions such that only a
@@ -81,16 +83,18 @@ struct nvgpu_nvs_ioctl_create_domain {
*/ */
struct nvgpu_nvs_ioctl_domain domain_params; struct nvgpu_nvs_ioctl_domain domain_params;
__u64 reserved1; /* Must be 0. */
__u64 reserved1;
}; };
struct nvgpu_nvs_ioctl_remove_domain { struct nvgpu_nvs_ioctl_remove_domain {
/* /*
* In: a domain_id to remove. * In: a domain_id to remove.
*/ */
__u64 dom_id; __u64 dom_id;
__u64 reserved1; /* Must be 0. */
__u64 reserved1;
}; };
/** /**
@@ -99,26 +103,32 @@ struct nvgpu_nvs_ioctl_remove_domain {
* Query the current list of domains in the scheduler. This is a two * Query the current list of domains in the scheduler. This is a two
* part IOCTL. * part IOCTL.
* *
* If domains is NULL, then this IOCTL will populate nr with the number * If domains is 0, then this IOCTL will populate nr with the number
* of present domains. * of present domains.
* *
* If domains is not NULL, then this IOCTL will treat domains as an * If domains is nonzero, then this IOCTL will treat domains as a pointer to an
* array with nr elements and write up to nr domains into that array. * array of nvgpu_nvs_ioctl_domain and will write up to nr domains into that
* array. The nr field will be updated with the number of present domains,
* which may be more than the number of entries written.
*/ */
struct nvgpu_nvs_ioctl_query_domains { struct nvgpu_nvs_ioctl_query_domains {
/* /*
* In/Out: If NULL, leave untouched. If not NULL, then write * In/Out: If 0, leave untouched. If nonzero, then write up to nr
* up to nr domains into the domain elements pointed to by * elements of nvgpu_nvs_ioctl_domain into where domains points to.
* domains.
*/ */
__u64 domains; __u64 domains;
/* /*
* In/Out: If domains is NULL, then populate with the number * - In: the capacity of the domains array if domais is not 0.
* of domains present. Otherwise nr specifies the capacity of * - Out: populate with the number of domains present.
* the domains array pointed to by domains.
*/ */
__u32 nr; __u32 nr;
/* Must be 0. */
__u32 reserved0;
/* Must be 0. */
__u64 reserved1;
}; };
#define NVGPU_NVS_IOCTL_CREATE_DOMAIN \ #define NVGPU_NVS_IOCTL_CREATE_DOMAIN \

View File

@@ -41,9 +41,10 @@ struct nvgpu_tsg_bind_channel_ex_args {
}; };
struct nvgpu_tsg_bind_scheduling_domain_args { struct nvgpu_tsg_bind_scheduling_domain_args {
/* in: name of the domain this tsg will be bound to */ /* in: id of the domain this tsg will be bound to */
__u8 domain_name[16]; __u64 domain_id;
__u8 reserved[16]; /* Must be set to 0 */
__u64 reserved[3];
}; };
/* /*

View File

@@ -48,8 +48,8 @@ struct nvs_domain {
* preempting. A value of zero is treated as an infinite timeslice or an * preempting. A value of zero is treated as an infinite timeslice or an
* infinite grace period. * infinite grace period.
*/ */
u32 timeslice_us; u64 timeslice_ns;
u32 preempt_grace_us; u64 preempt_grace_ns;
/* /*
* Priv pointer for downstream use. * Priv pointer for downstream use.
@@ -66,7 +66,7 @@ struct nvs_domain {
(domain_ptr) = (domain_ptr)->next) (domain_ptr) = (domain_ptr)->next)
struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, struct nvs_domain *nvs_domain_create(struct nvs_sched *sched,
const char *name, u32 timeslice, u32 preempt_grace, const char *name, u64 timeslice, u64 preempt_grace,
void *priv); void *priv);
void nvs_domain_destroy(struct nvs_sched *sched, struct nvs_domain *dom); void nvs_domain_destroy(struct nvs_sched *sched, struct nvs_domain *dom);
void nvs_domain_clear_all(struct nvs_sched *sched); void nvs_domain_clear_all(struct nvs_sched *sched);

View File

@@ -16,7 +16,7 @@
* Create and add a new domain to the end of the domain list. * Create and add a new domain to the end of the domain list.
*/ */
struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, struct nvs_domain *nvs_domain_create(struct nvs_sched *sched,
const char *name, u32 timeslice, u32 preempt_grace, const char *name, u64 timeslice, u64 preempt_grace,
void *priv) void *priv)
{ {
struct nvs_domain_list *dlist = sched->domain_list; struct nvs_domain_list *dlist = sched->domain_list;
@@ -31,8 +31,8 @@ struct nvs_domain *nvs_domain_create(struct nvs_sched *sched,
nvs_memset(dom, 0, sizeof(*dom)); nvs_memset(dom, 0, sizeof(*dom));
strncpy(dom->name, name, sizeof(dom->name) - 1); strncpy(dom->name, name, sizeof(dom->name) - 1);
dom->timeslice_us = timeslice; dom->timeslice_ns = timeslice;
dom->preempt_grace_us = preempt_grace; dom->preempt_grace_ns = preempt_grace;
dom->priv = priv; dom->priv = priv;
nvs_log_event(sched, NVS_EV_CREATE_DOMAIN, 0U); nvs_log_event(sched, NVS_EV_CREATE_DOMAIN, 0U);