From 55afe1ff4c9926b5374b247b5af1e56aaca42feb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Konsta=20H=C3=B6ltt=C3=A4?= Date: Wed, 8 Dec 2021 16:01:09 +0200 Subject: [PATCH] gpu: nvgpu: improve nvs uapi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Make the domain scheduler timeslice type nanoseconds to future proof the interface - Return -ENOSYS from ioctls if the nvs code is not initialized - Return the number of domains also when user supplied array is present - Use domain id instead of name for TSG binding - Improve documentation in the uapi headers - Verify that reserved fields are zeroed - Extend some internal logging - Release the sched mutex on alloc error - Add file mode checks in the nvs ioctls. The create and remove ioctls require writable file permissions, while the query does not; this allows filesystem based access control on domain management on the single dev node. Jira NVGPU-6788 Change-Id: I668eb5972a0ed1073e84a4ae30e3069bf0b59e16 Signed-off-by: Konsta Hölttä Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2639017 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-cert Reviewed-by: Alex Waterman Reviewed-by: mobile promotions Tested-by: mobile promotions GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/common/fifo/runlist.c | 4 +- drivers/gpu/nvgpu/common/fifo/tsg.c | 10 +- drivers/gpu/nvgpu/common/nvs/nvs_sched.c | 62 +++++++--- drivers/gpu/nvgpu/include/nvgpu/nvs.h | 12 +- drivers/gpu/nvgpu/include/nvgpu/tsg.h | 15 ++- drivers/gpu/nvgpu/os/linux/ioctl_nvs.c | 145 ++++++++++++++++------- drivers/gpu/nvgpu/os/linux/ioctl_tsg.c | 18 ++- include/uapi/linux/nvgpu-nvs.h | 56 +++++---- include/uapi/linux/nvgpu.h | 7 +- nvsched/include/nvs/domain.h | 6 +- nvsched/src/domain.c | 6 +- 11 files changed, 234 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index ae17bfd52..43fadf5e4 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -572,6 +572,8 @@ static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist gk20a_busy_noresume(g); if (nvgpu_is_powered_off(g)) { + rl_dbg(g, "Runlist[%u]: power is off, skip submit", + runlist->id); gk20a_idle_nosuspend(g); return; } @@ -580,7 +582,7 @@ static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist gk20a_idle_nosuspend(g); if (err != 0) { - nvgpu_err(g, "failed to hold power for runlist switch"); + nvgpu_err(g, "failed to hold power for runlist submit"); /* * probably shutting down though, so don't bother propagating * the error. Power is already on when the domain scheduler is diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index aeea43749..62c8d0531 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -155,7 +155,7 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch) } #ifdef CONFIG_NVS_PRESENT -int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name) +int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, u64 domain_id) { struct nvgpu_runlist_domain *rl_domain; struct nvgpu_nvs_domain *nvs_domain; @@ -166,8 +166,9 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name) return -EINVAL; } - nvs_domain = nvgpu_nvs_domain_get(g, domain_name); + nvs_domain = nvgpu_nvs_domain_by_id(g, domain_id); if (nvs_domain == NULL) { + nvgpu_err(g, "nvs domain not found (%llu)", domain_id); return -ENOENT; } @@ -175,8 +176,9 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name) * The domain ptr will get updated with the right id once the runlist * gets specified based on the first channel. */ - rl_domain = nvgpu_rl_domain_get(g, 0, domain_name); + rl_domain = nvgpu_rl_domain_get(g, 0, nvs_domain->parent->name); if (rl_domain == NULL) { + nvgpu_err(g, "rl domain not found (%s)", nvs_domain->parent->name); /* * This shouldn't happen because the nvs domain guarantees RL domains. * @@ -858,7 +860,7 @@ int nvgpu_tsg_open_common(struct gk20a *g, struct nvgpu_tsg *tsg, pid_t pid) * gets specified based on the first channel. */ tsg->rl_domain = nvgpu_rl_domain_get(g, 0, "(default)"); - tsg->nvs_domain = nvgpu_nvs_domain_get(g, "(default)"); + tsg->nvs_domain = nvgpu_nvs_domain_by_name(g, "(default)"); #ifdef CONFIG_NVGPU_DEBUGGER tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; #endif diff --git a/drivers/gpu/nvgpu/common/nvs/nvs_sched.c b/drivers/gpu/nvgpu/common/nvs/nvs_sched.c index 19c92bdcc..f393f19aa 100644 --- a/drivers/gpu/nvgpu/common/nvs/nvs_sched.c +++ b/drivers/gpu/nvgpu/common/nvs/nvs_sched.c @@ -88,12 +88,12 @@ static void nvgpu_nvs_worker_wakeup_process_item( /* placeholder; never called yet */ } -static u32 nvgpu_nvs_tick(struct gk20a *g) +static u64 nvgpu_nvs_tick(struct gk20a *g) { struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvgpu_nvs_domain *domain; struct nvs_domain *nvs_domain; - u32 timeslice; + u64 timeslice; nvs_dbg(g, "nvs tick"); @@ -104,14 +104,14 @@ static u32 nvgpu_nvs_tick(struct gk20a *g) if (domain == NULL) { /* nothing to schedule, TODO wait for an event instead */ nvgpu_mutex_release(&g->sched_mutex); - return 100000; + return 100 * NSEC_PER_MSEC; } nvs_domain = domain->parent->next; if (nvs_domain == NULL) { nvs_domain = g->scheduler->sched->domain_list->domains; } - timeslice = nvs_domain->timeslice_us; + timeslice = nvs_domain->timeslice_ns; nvgpu_runlist_tick(g); sched->active_domain = nvs_domain->priv; @@ -128,10 +128,11 @@ static void nvgpu_nvs_worker_wakeup_post_process(struct nvgpu_worker *worker) nvgpu_nvs_worker_from_worker(worker); if (nvgpu_timeout_peek_expired(&nvs_worker->timeout)) { - u32 next_timeout_us = nvgpu_nvs_tick(g); + u32 next_timeout_ns = nvgpu_nvs_tick(g); - if (next_timeout_us != 0U) { - nvs_worker->current_timeout = (next_timeout_us + 999U) / 1000U; + if (next_timeout_ns != 0U) { + nvs_worker->current_timeout = + (next_timeout_ns + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC; } nvgpu_timeout_init_cpu_timer(g, &nvs_worker->timeout, @@ -175,7 +176,10 @@ int nvgpu_nvs_init(struct gk20a *g) } if (nvgpu_rl_domain_get(g, 0, "(default)") == NULL) { - int err = nvgpu_nvs_add_domain(g, "(default)", 1000*1000, 10*1000, &domain); + int err = nvgpu_nvs_add_domain(g, "(default)", + 100U * NSEC_PER_MSEC, + 0U, + &domain); if (err != 0) { return err; } @@ -284,8 +288,8 @@ static u64 nvgpu_nvs_new_id(struct gk20a *g) return nvgpu_atomic64_inc_return(&g->scheduler->id_counter); } -int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice, - u32 preempt_grace, struct nvgpu_nvs_domain **pdomain) +int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice, + u64 preempt_grace, struct nvgpu_nvs_domain **pdomain) { int err = 0; struct nvs_domain *nvs_dom; @@ -313,6 +317,7 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice, timeslice, preempt_grace, nvgpu_dom); if (nvs_dom == NULL) { + nvs_dbg(g, "failed to create nvs domain for %s", name); nvgpu_kfree(g, nvgpu_dom); err = -ENOMEM; goto unlock; @@ -320,9 +325,10 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice, err = nvgpu_rl_domain_alloc(g, name); if (err != 0) { + nvs_dbg(g, "failed to alloc rl domain for %s", name); nvs_domain_destroy(g->scheduler->sched, nvs_dom); nvgpu_kfree(g, nvgpu_dom); - return err; + goto unlock; } @@ -339,14 +345,15 @@ unlock: } struct nvgpu_nvs_domain * -nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id) +nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id) { + struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvs_domain *nvs_dom; - nvs_domain_for_each(sched, nvs_dom) { + nvs_domain_for_each(sched->sched, nvs_dom) { struct nvgpu_nvs_domain *nvgpu_dom = nvs_dom->priv; - if (nvgpu_dom->id == dom_id) { + if (nvgpu_dom->id == domain_id) { return nvgpu_dom; } } @@ -355,7 +362,26 @@ nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id) } struct nvgpu_nvs_domain * -nvgpu_nvs_domain_get(struct gk20a *g, const char *name) +nvgpu_nvs_domain_by_id(struct gk20a *g, u64 domain_id) +{ + struct nvgpu_nvs_domain *dom = NULL; + + nvgpu_mutex_acquire(&g->sched_mutex); + + dom = nvgpu_nvs_domain_by_id_locked(g, domain_id); + if (dom == NULL) { + goto unlock; + } + + dom->ref++; + +unlock: + nvgpu_mutex_release(&g->sched_mutex); + return dom; +} + +struct nvgpu_nvs_domain * +nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name) { struct nvs_domain *nvs_dom; struct nvgpu_nvs_domain *dom = NULL; @@ -395,7 +421,7 @@ int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id) nvs_dbg(g, "Attempting to remove domain: %llu", dom_id); - nvgpu_dom = nvgpu_nvs_get_dom_by_id(g, s->sched, dom_id); + nvgpu_dom = nvgpu_nvs_domain_by_id_locked(g, dom_id); if (nvgpu_dom == NULL) { nvs_dbg(g, "domain %llu does not exist!", dom_id); err = -ENOENT; @@ -473,7 +499,7 @@ void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain) struct nvs_domain *nvs_dom = domain->parent; nvs_dbg(g, "Domain %s", nvs_dom->name); - nvs_dbg(g, " timeslice: %u us", nvs_dom->timeslice_us); - nvs_dbg(g, " preempt grace: %u us", nvs_dom->preempt_grace_us); + nvs_dbg(g, " timeslice: %llu ns", nvs_dom->timeslice_ns); + nvs_dbg(g, " preempt grace: %llu ns", nvs_dom->preempt_grace_ns); nvs_dbg(g, " domain ID: %llu", domain->id); } diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvs.h b/drivers/gpu/nvgpu/include/nvgpu/nvs.h index ddc147052..df51d46c3 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvs.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvs.h @@ -88,14 +88,14 @@ int nvgpu_nvs_suspend(struct gk20a *g); void nvgpu_nvs_get_log(struct gk20a *g, s64 *timestamp, const char **msg); u32 nvgpu_nvs_domain_count(struct gk20a *g); int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id); -int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice, - u32 preempt_grace, struct nvgpu_nvs_domain **pdomain); -struct nvgpu_nvs_domain * -nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id); +int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice, + u64 preempt_grace, struct nvgpu_nvs_domain **pdomain); void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain); struct nvgpu_nvs_domain * -nvgpu_nvs_domain_get(struct gk20a *g, const char *name); +nvgpu_nvs_domain_by_id(struct gk20a *g, u64 domain_id); +struct nvgpu_nvs_domain * +nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name); void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom); /* * Debug wrapper for NVS code. @@ -119,7 +119,7 @@ static inline int nvgpu_nvs_suspend(struct gk20a *g) } static inline struct nvgpu_nvs_domain * -nvgpu_nvs_domain_get(struct gk20a *g, const char *name) +nvgpu_nvs_domain_by_name(struct gk20a *g, const char *name) { return NULL; } diff --git a/drivers/gpu/nvgpu/include/nvgpu/tsg.h b/drivers/gpu/nvgpu/include/nvgpu/tsg.h index b17a14884..c9fc591bd 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/tsg.h +++ b/drivers/gpu/nvgpu/include/nvgpu/tsg.h @@ -381,7 +381,20 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch); #ifdef CONFIG_NVS_PRESENT -int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name); +/** + * @brief Bind a TSG to a domain. + * + * @param tsg [in] Pointer to TSG struct. + * @param domain_id [in] Domain identifier. + * + * Make this TSG participate in the given domain, such that it can only be + * seen by runlist HW when the domain has been scheduled in. + * + * The TSG must have no channels at this point. + * + * @return 0 for successful bind, < 0 for failure. + */ +int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, u64 domain_id); #endif /** diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c b/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c index 56e871219..67ce5f1c2 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c @@ -55,10 +55,30 @@ static int nvgpu_nvs_ioctl_create_domain( struct nvgpu_nvs_domain *domain = NULL; int err; + if (dom_args->reserved1 != 0) { + return -EINVAL; + } + + if (dom_args->domain_params.reserved1 != 0) { + return -EINVAL; + } + + if (dom_args->domain_params.reserved2 != 0) { + return -EINVAL; + } + + if (dom_args->domain_params.dom_id != 0) { + return -EINVAL; + } + + if (g->scheduler == NULL) { + return -ENOSYS; + } + err = nvgpu_nvs_add_domain(g, dom_args->domain_params.name, - dom_args->domain_params.timeslice_us, - dom_args->domain_params.preempt_grace_us, + dom_args->domain_params.timeslice_ns, + dom_args->domain_params.preempt_grace_ns, &domain); if (err != 0) { return err; @@ -71,9 +91,18 @@ static int nvgpu_nvs_ioctl_create_domain( return 0; } -static int nvgpu_nvs_ioctl_remove_domain(struct gk20a *g, u32 dom_id) +static int nvgpu_nvs_ioctl_remove_domain(struct gk20a *g, + struct nvgpu_nvs_ioctl_remove_domain *args) { - return nvgpu_nvs_del_domain(g, dom_id); + if (args->reserved1 != 0) { + return -EINVAL; + } + + if (g->scheduler == NULL) { + return -ENOSYS; + } + + return nvgpu_nvs_del_domain(g, args->dom_id); } static int nvgpu_nvs_ioctl_query_domains( @@ -84,45 +113,61 @@ static int nvgpu_nvs_ioctl_query_domains( struct nvgpu_nvs_domain *nvgpu_dom; struct nvs_domain *nvs_dom; u32 index; - struct nvgpu_nvs_ioctl_domain *args_domains = (void __user *)(uintptr_t)args->domains; + u32 user_capacity = args->nr; + struct nvgpu_nvs_ioctl_domain *args_domains = + (void __user *)(uintptr_t)args->domains; - /* First call variant: return number of domains. */ - if (args_domains == NULL) { - args->nr = nvgpu_nvs_domain_count(g); - if (copy_to_user(user_arg, args, sizeof(*args))) { - return -EFAULT; - } - nvs_dbg(g, "Nr domains: %u", args->nr); - return 0; + if (args->reserved0 != 0) { + return -EINVAL; } - /* - * Second call variant: populate the passed array with domain info. - */ - index = 0; - nvs_domain_for_each(g->scheduler->sched, nvs_dom) { - struct nvgpu_nvs_ioctl_domain dom; + if (args->reserved1 != 0) { + return -EINVAL; + } - nvgpu_dom = nvs_dom->priv; + if (g->scheduler == NULL) { + return -ENOSYS; + } - nvs_dbg(g, "Copying dom #%u [%s] (%llu)", - index, nvs_dom->name, nvgpu_dom->id); + /* First call variant: return number of domains. */ + args->nr = nvgpu_nvs_domain_count(g); + if (copy_to_user(user_arg, args, sizeof(*args))) { + return -EFAULT; + } + nvs_dbg(g, "Nr domains: %u", args->nr); - (void)memset(&dom, 0, sizeof(dom)); + if (args_domains != NULL) { + /* + * Second call variant: populate the passed array with domain info. + */ + index = 0; + nvs_domain_for_each(g->scheduler->sched, nvs_dom) { + struct nvgpu_nvs_ioctl_domain dom; + if (index == user_capacity) { + break; + } - strncpy(dom.name, nvs_dom->name, sizeof(dom.name) - 1); - dom.timeslice_us = nvs_dom->timeslice_us; - dom.preempt_grace_us = nvs_dom->preempt_grace_us; - dom.subscheduler = nvgpu_dom->subscheduler; - dom.dom_id = nvgpu_dom->id; + nvgpu_dom = nvs_dom->priv; - if (copy_to_user(&args_domains[index], - &dom, sizeof(dom))) { - nvs_dbg(g, "Fault during copy of domain to userspace."); - return -EFAULT; + nvs_dbg(g, "Copying dom #%u [%s] (%llu)", + index, nvs_dom->name, nvgpu_dom->id); + + (void)memset(&dom, 0, sizeof(dom)); + + strncpy(dom.name, nvs_dom->name, sizeof(dom.name) - 1); + dom.timeslice_ns = nvs_dom->timeslice_ns; + dom.preempt_grace_ns = nvs_dom->preempt_grace_ns; + dom.subscheduler = nvgpu_dom->subscheduler; + dom.dom_id = nvgpu_dom->id; + + if (copy_to_user(&args_domains[index], + &dom, sizeof(dom))) { + nvs_dbg(g, "Fault during copy of domain to userspace."); + return -EFAULT; + } + + index += 1; } - - index += 1; } return 0; @@ -130,9 +175,10 @@ static int nvgpu_nvs_ioctl_query_domains( long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { + u8 buf[NVGPU_NVS_IOCTL_MAX_ARG_SIZE] = { 0 }; + bool writable = filp->f_mode & FMODE_WRITE; struct gk20a *g = filp->private_data; int err = 0; - u8 buf[NVGPU_NVS_IOCTL_MAX_ARG_SIZE] = { 0 }; nvs_dbg(g, "IOC_TYPE: %c", _IOC_TYPE(cmd)); nvs_dbg(g, "IOC_NR: %u", _IOC_NR(cmd)); @@ -159,17 +205,21 @@ long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct nvgpu_nvs_ioctl_create_domain *args = (struct nvgpu_nvs_ioctl_create_domain *)buf; + if (!writable) { + err = -EPERM; + goto done; + } + err = nvgpu_nvs_ioctl_create_domain(g, args); if (err) goto done; /* - * Issue a remove domain IOCTL in case of fault when copying back to - * userspace. + * Remove the domain in case of fault when copying back to + * userspace to keep this ioctl atomic. */ if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd))) { - nvgpu_nvs_ioctl_remove_domain(g, - args->domain_params.dom_id); + nvgpu_nvs_del_domain(g, args->domain_params.dom_id); err = -EFAULT; goto done; } @@ -177,19 +227,26 @@ long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } case NVGPU_NVS_IOCTL_QUERY_DOMAINS: + { + struct nvgpu_nvs_ioctl_query_domains *args = + (struct nvgpu_nvs_ioctl_query_domains *)buf; + err = nvgpu_nvs_ioctl_query_domains(g, (void __user *)arg, - (void *)buf); - if (err) - goto done; - + args); break; + } case NVGPU_NVS_IOCTL_REMOVE_DOMAIN: { struct nvgpu_nvs_ioctl_remove_domain *args = (struct nvgpu_nvs_ioctl_remove_domain *)buf; - err = nvgpu_nvs_ioctl_remove_domain(g, args->dom_id); + if (!writable) { + err = -EPERM; + goto done; + } + + err = nvgpu_nvs_ioctl_remove_domain(g, args); break; } default: diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c index 75cb40df1..ec69c643a 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c @@ -181,7 +181,23 @@ static int nvgpu_tsg_bind_scheduling_domain(struct nvgpu_tsg *tsg, struct nvgpu_tsg_bind_scheduling_domain_args *args) { - return nvgpu_tsg_bind_domain(tsg, args->domain_name); + if (args->reserved[0] != 0) { + return -EINVAL; + } + + if (args->reserved[1] != 0) { + return -EINVAL; + } + + if (args->reserved[2] != 0) { + return -EINVAL; + } + + if (tsg->g->scheduler == NULL) { + return -ENOSYS; + } + + return nvgpu_tsg_bind_domain(tsg, args->domain_id); } #endif diff --git a/include/uapi/linux/nvgpu-nvs.h b/include/uapi/linux/nvgpu-nvs.h index 9a0e0096b..7f76ce999 100644 --- a/include/uapi/linux/nvgpu-nvs.h +++ b/include/uapi/linux/nvgpu-nvs.h @@ -23,9 +23,9 @@ */ struct nvgpu_nvs_ioctl_domain { /* - * Human readable name for this domain. + * Human readable null-terminated name for this domain. */ - char name[32]; + char name[32]; /* * Scheduling parameters: specify how long this domain should be scheduled @@ -33,15 +33,15 @@ struct nvgpu_nvs_ioctl_domain { * preempting. A value of zero is treated as an infinite timeslice or an * infinite grace period, respectively. */ - __u32 timeslice_us; - __u32 preempt_grace_us; + __u64 timeslice_ns; + __u64 preempt_grace_ns; /* * Pick which subscheduler to use. These will be implemented by the kernel * as needed. There'll always be at least one, which is the host HW built in * round-robin scheduler. */ - __u32 subscheduler; + __u32 subscheduler; /* * GPU host hardware round-robin. @@ -50,19 +50,21 @@ struct nvgpu_nvs_ioctl_domain { /* * Populated by the IOCTL when created: unique identifier. User space - * should never set this variable. + * must set this to 0. */ - __u64 dom_id; + __u64 dom_id; - __u64 reserved1; - __u64 reserved2; + /* Must be 0. */ + __u64 reserved1; + /* Must be 0. */ + __u64 reserved2; }; /** * NVGPU_NVS_IOCTL_CREATE_DOMAIN * * Create a domain - essentially a group of GPU contexts. Applications - * cacan be bound into this domain on request for each TSG. + * can be bound into this domain on request for each TSG. * * The domain ID is returned in dom_id; this id is _not_ secure. The * nvsched device needs to have restricted permissions such that only a @@ -81,16 +83,18 @@ struct nvgpu_nvs_ioctl_create_domain { */ struct nvgpu_nvs_ioctl_domain domain_params; - __u64 reserved1; + /* Must be 0. */ + __u64 reserved1; }; struct nvgpu_nvs_ioctl_remove_domain { /* * In: a domain_id to remove. */ - __u64 dom_id; + __u64 dom_id; - __u64 reserved1; + /* Must be 0. */ + __u64 reserved1; }; /** @@ -99,26 +103,32 @@ struct nvgpu_nvs_ioctl_remove_domain { * Query the current list of domains in the scheduler. This is a two * part IOCTL. * - * If domains is NULL, then this IOCTL will populate nr with the number + * If domains is 0, then this IOCTL will populate nr with the number * of present domains. * - * If domains is not NULL, then this IOCTL will treat domains as an - * array with nr elements and write up to nr domains into that array. + * If domains is nonzero, then this IOCTL will treat domains as a pointer to an + * array of nvgpu_nvs_ioctl_domain and will write up to nr domains into that + * array. The nr field will be updated with the number of present domains, + * which may be more than the number of entries written. */ struct nvgpu_nvs_ioctl_query_domains { /* - * In/Out: If NULL, leave untouched. If not NULL, then write - * up to nr domains into the domain elements pointed to by - * domains. + * In/Out: If 0, leave untouched. If nonzero, then write up to nr + * elements of nvgpu_nvs_ioctl_domain into where domains points to. */ __u64 domains; /* - * In/Out: If domains is NULL, then populate with the number - * of domains present. Otherwise nr specifies the capacity of - * the domains array pointed to by domains. + * - In: the capacity of the domains array if domais is not 0. + * - Out: populate with the number of domains present. */ - __u32 nr; + __u32 nr; + + /* Must be 0. */ + __u32 reserved0; + + /* Must be 0. */ + __u64 reserved1; }; #define NVGPU_NVS_IOCTL_CREATE_DOMAIN \ diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h index c171df1fc..0d06b739f 100644 --- a/include/uapi/linux/nvgpu.h +++ b/include/uapi/linux/nvgpu.h @@ -41,9 +41,10 @@ struct nvgpu_tsg_bind_channel_ex_args { }; struct nvgpu_tsg_bind_scheduling_domain_args { - /* in: name of the domain this tsg will be bound to */ - __u8 domain_name[16]; - __u8 reserved[16]; + /* in: id of the domain this tsg will be bound to */ + __u64 domain_id; + /* Must be set to 0 */ + __u64 reserved[3]; }; /* diff --git a/nvsched/include/nvs/domain.h b/nvsched/include/nvs/domain.h index 2814e21a0..637caa573 100644 --- a/nvsched/include/nvs/domain.h +++ b/nvsched/include/nvs/domain.h @@ -48,8 +48,8 @@ struct nvs_domain { * preempting. A value of zero is treated as an infinite timeslice or an * infinite grace period. */ - u32 timeslice_us; - u32 preempt_grace_us; + u64 timeslice_ns; + u64 preempt_grace_ns; /* * Priv pointer for downstream use. @@ -66,7 +66,7 @@ struct nvs_domain { (domain_ptr) = (domain_ptr)->next) struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, - const char *name, u32 timeslice, u32 preempt_grace, + const char *name, u64 timeslice, u64 preempt_grace, void *priv); void nvs_domain_destroy(struct nvs_sched *sched, struct nvs_domain *dom); void nvs_domain_clear_all(struct nvs_sched *sched); diff --git a/nvsched/src/domain.c b/nvsched/src/domain.c index b95468689..92e98cee8 100644 --- a/nvsched/src/domain.c +++ b/nvsched/src/domain.c @@ -16,7 +16,7 @@ * Create and add a new domain to the end of the domain list. */ struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, - const char *name, u32 timeslice, u32 preempt_grace, + const char *name, u64 timeslice, u64 preempt_grace, void *priv) { struct nvs_domain_list *dlist = sched->domain_list; @@ -31,8 +31,8 @@ struct nvs_domain *nvs_domain_create(struct nvs_sched *sched, nvs_memset(dom, 0, sizeof(*dom)); strncpy(dom->name, name, sizeof(dom->name) - 1); - dom->timeslice_us = timeslice; - dom->preempt_grace_us = preempt_grace; + dom->timeslice_ns = timeslice; + dom->preempt_grace_ns = preempt_grace; dom->priv = priv; nvs_log_event(sched, NVS_EV_CREATE_DOMAIN, 0U);