gpu: nvgpu: scheduler management uapi

Add ioctls for creating, removing and querying scheduling domains and
interface with the "nvsched" entity that will be the core scheduler.
Include the scheduler in the Linux build.

The core scheduler code will ultimately hold data on and control what
gets scheduled, but this intermediate layer in nvgpu-rm needs a bit of
bookeeping to manage the userspace interface.

To keep changes isolated, this does not touch the internal runlist
domains yet. The core scheduler logic will eventually control the
runlist domains.

Jira NVGPU-6788

Change-Id: I7b4064edb6205acbac2d8c593dad019d517243ce
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2463625
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2021-11-25 10:33:48 +02:00
committed by mobile promotions
parent bfd7fc8649
commit 1d14a4412f
21 changed files with 851 additions and 1 deletions

View File

@@ -208,6 +208,14 @@ sched:
owner: Thomas F owner: Thomas F
sources: [ include/nvgpu/sched.h ] sources: [ include/nvgpu/sched.h ]
nvsched:
safe: no
owner: Alex W
sources: [ common/nvs/nvs_sched.c,
include/external-nvs/impl.h,
include/external-nvs/types.h,
include/nvgpu/nvs.h ]
semaphore: semaphore:
safe: no safe: no
owner: Alex W owner: Alex W

View File

@@ -200,6 +200,9 @@ sched:
os/linux/sched.c, os/linux/sched.c,
os/linux/sched.h ] os/linux/sched.h ]
nvsched:
sources: [ os/linux/ioctl_nvs.c, os/linux/ioctl_nvs.h ]
sim: sim:
sources: [ os/linux/sim.c, os/linux/sim_pci.c ] sources: [ os/linux/sim.c, os/linux/sim_pci.c ]

View File

@@ -954,3 +954,15 @@ endif
nvgpu-y += \ nvgpu-y += \
os/linux/platform_ga10b_tegra.o os/linux/platform_ga10b_tegra.o
ifeq ($(CONFIG_NVS_PRESENT),y)
include $(srctree.nvgpu)/nvsched/Makefile.sources
nvgpu-y += $(patsubst %.c,../../../nvsched/%.o,$(NVS_SOURCES))
nvgpu-y += \
os/linux/ioctl_nvs.o \
common/nvs/nvs_sched.o
ccflags-y += \
$(patsubst %,-I$(srctree.nvgpu)/nvsched/%,$(NVS_INCLUDE)) \
-I$(srctree.nvgpu)/drivers/gpu/nvgpu/include/external-nvs \
-DNVS_USE_IMPL_TYPES
endif

View File

@@ -1,6 +1,9 @@
# Turn off all other configs, if CONFIG_GK20A is not set # Turn off all other configs, if CONFIG_GK20A is not set
CONFIG_GK20A := m CONFIG_GK20A := m
# Enable the nv_sched build.
CONFIG_NVS_PRESENT := y
# Enable GK20A PMU features. # Enable GK20A PMU features.
CONFIG_GK20A_PMU := y CONFIG_GK20A_PMU := y
@@ -252,3 +255,6 @@ endif
ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y) ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y)
ccflags-y += -DCONFIG_NVGPU_GSP_STRESS_TEST ccflags-y += -DCONFIG_NVGPU_GSP_STRESS_TEST
endif endif
ifeq ($(CONFIG_NVS_PRESENT),y)
ccflags-y += -DCONFIG_NVS_PRESENT
endif

View File

@@ -153,6 +153,7 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
return err; return err;
} }
#ifdef CONFIG_NVS_PRESENT
int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name) int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name)
{ {
struct nvgpu_runlist_domain *domain; struct nvgpu_runlist_domain *domain;
@@ -176,6 +177,7 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name)
return 0; return 0;
} }
#endif
static bool nvgpu_tsg_is_multi_channel(struct nvgpu_tsg *tsg) static bool nvgpu_tsg_is_multi_channel(struct nvgpu_tsg *tsg)
{ {

View File

@@ -52,6 +52,9 @@
#include <nvgpu/cic_mon.h> #include <nvgpu/cic_mon.h>
#include <nvgpu/cic_rm.h> #include <nvgpu/cic_rm.h>
#include <nvgpu/fbp.h> #include <nvgpu/fbp.h>
#ifdef CONFIG_NVS_PRESENT
#include <nvgpu/nvs.h>
#endif
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pstate.h> #include <nvgpu/pmu/pmu_pstate.h>
@@ -920,6 +923,9 @@ int nvgpu_finalize_poweron(struct gk20a *g)
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) || defined(CONFIG_NVGPU_GSP_STRESS_TEST) #if defined(CONFIG_NVGPU_GSP_SCHEDULER) || defined(CONFIG_NVGPU_GSP_STRESS_TEST)
/* Init gsp ops */ /* Init gsp ops */
NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sw_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sw_init, NO_FLAG),
#endif
#ifdef CONFIG_NVS_PRESENT
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
#endif #endif
}; };
size_t i; size_t i;

View File

@@ -0,0 +1,220 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvs/log.h>
#include <nvs/sched.h>
#include <nvgpu/nvs.h>
#include <nvgpu/kmem.h>
#include <nvgpu/gk20a.h>
static struct nvs_sched_ops nvgpu_nvs_ops = {
.preempt = NULL,
.recover = NULL,
};
/**
* Init call to prepare the scheduler mutex. We won't actually allocate a
* scheduler until someone opens the scheduler node.
*/
int nvgpu_nvs_init(struct gk20a *g)
{
nvgpu_mutex_init(&g->sched_mutex);
return 0;
}
int nvgpu_nvs_open(struct gk20a *g)
{
int err = 0;
nvs_dbg(g, "Opening NVS node.");
nvgpu_mutex_acquire(&g->sched_mutex);
/*
* If there's already a scheduler present, we are done; no need for
* further action.
*/
if (g->scheduler != NULL) {
goto unlock;
}
g->scheduler = nvgpu_kzalloc(g, sizeof(*g->scheduler));
if (g->scheduler == NULL) {
err = -ENOMEM;
goto unlock;
}
/* separately allocated to keep the definition hidden from other files */
g->scheduler->sched = nvgpu_kzalloc(g, sizeof(*g->scheduler->sched));
if (g->scheduler->sched == NULL) {
err = -ENOMEM;
goto unlock;
}
nvs_dbg(g, " Creating scheduler.");
err = nvs_sched_create(g->scheduler->sched, &nvgpu_nvs_ops, g);
unlock:
nvgpu_mutex_release(&g->sched_mutex);
if (err) {
nvs_dbg(g, " Failed! Error code: %d", err);
if (g->scheduler) {
nvgpu_kfree(g, g->scheduler->sched);
nvgpu_kfree(g, g->scheduler);
g->scheduler = NULL;
}
}
return err;
}
/*
* A trivial allocator for now.
*/
static u64 nvgpu_nvs_new_id(struct gk20a *g)
{
return nvgpu_atomic64_inc_return(&g->scheduler->id_counter);
}
int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice,
u32 preempt_grace, struct nvgpu_nvs_domain **pdomain)
{
int err = 0;
struct nvs_domain *nvs_dom;
struct nvgpu_nvs_domain *nvgpu_dom;
nvs_dbg(g, "Adding new domain: %s", name);
nvgpu_mutex_acquire(&g->sched_mutex);
if (nvs_domain_by_name(g->scheduler->sched, name) != NULL) {
err = -EEXIST;
goto unlock;
}
nvgpu_dom = nvgpu_kzalloc(g, sizeof(*nvgpu_dom));
if (nvgpu_dom == NULL) {
err = -ENOMEM;
goto unlock;
}
nvgpu_dom->id = nvgpu_nvs_new_id(g);
nvs_dom = nvs_domain_create(g->scheduler->sched, name,
timeslice, preempt_grace, nvgpu_dom);
if (nvs_dom == NULL) {
nvgpu_kfree(g, nvgpu_dom);
err = -ENOMEM;
goto unlock;
}
nvgpu_dom->parent = nvs_dom;
*pdomain = nvgpu_dom;
unlock:
nvgpu_mutex_release(&g->sched_mutex);
return err;
}
struct nvgpu_nvs_domain *
nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id)
{
struct nvs_domain *nvs_dom;
nvs_domain_for_each(sched, nvs_dom) {
struct nvgpu_nvs_domain *nvgpu_dom = nvs_dom->priv;
if (nvgpu_dom->id == dom_id) {
return nvgpu_dom;
}
}
return NULL;
}
int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id)
{
struct nvgpu_nvs_domain *nvgpu_dom;
struct nvs_domain *nvs_dom;
int err = 0;
nvgpu_mutex_acquire(&g->sched_mutex);
nvs_dbg(g, "Attempting to remove domain: %llu", dom_id);
nvgpu_dom = nvgpu_nvs_get_dom_by_id(g, g->scheduler->sched, dom_id);
if (nvgpu_dom == NULL) {
nvs_dbg(g, "domain %llu does not exist!", dom_id);
err = -EINVAL;
goto unlock;
}
nvs_dom = nvgpu_dom->parent;
nvs_domain_destroy(g->scheduler->sched, nvs_dom);
nvgpu_kfree(g, nvgpu_dom);
unlock:
nvgpu_mutex_release(&g->sched_mutex);
return err;
}
u32 nvgpu_nvs_domain_count(struct gk20a *g)
{
u32 count;
nvgpu_mutex_acquire(&g->sched_mutex);
count = nvs_domain_count(g->scheduler->sched);
nvgpu_mutex_release(&g->sched_mutex);
return count;
}
void nvgpu_nvs_get_log(struct gk20a *g, s64 *timestamp, const char **msg)
{
struct nvs_log_event ev;
nvs_log_get(g->scheduler->sched, &ev);
if (ev.event == NVS_EV_NO_EVENT) {
*timestamp = 0;
*msg = NULL;
return;
}
*msg = nvs_log_event_string(ev.event);
*timestamp = ev.timestamp;
}
void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain)
{
struct nvs_domain *nvs_dom = domain->parent;
nvs_dbg(g, "Domain %s", nvs_dom->name);
nvs_dbg(g, " timeslice: %u us", nvs_dom->timeslice_us);
nvs_dbg(g, " preempt grace: %u us", nvs_dom->preempt_grace_us);
nvs_dbg(g, " domain ID: %llu", domain->id);
}

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NVS_IMPL_H
#define NVGPU_NVS_IMPL_H
#include <nvgpu/kmem.h>
#include <nvgpu/string.h>
#include <nvgpu/timers.h>
#include <nvgpu/log.h>
#define nvs_malloc(sched, size) \
nvgpu_kmalloc((struct gk20a *)(sched)->priv, (size))
#define nvs_free(sched, ptr) \
nvgpu_kfree((struct gk20a *)(sched)->priv, (ptr))
#define nvs_memset(ptr, value, length) \
memset((ptr), (value), (length))
#define nvs_timestamp() \
nvgpu_current_time_ns()
#define nvs_log(sched, fmt, args...) \
nvgpu_log((struct gk20a *)(sched)->priv, \
gpu_dbg_nvs_internal, (fmt), ##args)
#endif

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>

View File

@@ -854,6 +854,11 @@ struct gk20a {
/** Flag to check if debugger and profiler support is enabled. */ /** Flag to check if debugger and profiler support is enabled. */
u32 support_gpu_tools; u32 support_gpu_tools;
#ifdef CONFIG_NVS_PRESENT
struct nvgpu_nvs_scheduler *scheduler;
struct nvgpu_mutex sched_mutex;
#endif
}; };
/** /**

View File

@@ -82,5 +82,9 @@ enum nvgpu_log_type {
#define gpu_dbg_hwpm BIT(42) /* GPU HWPM. */ #define gpu_dbg_hwpm BIT(42) /* GPU HWPM. */
#define gpu_dbg_verbose BIT(43) /* More verbose logs. */ #define gpu_dbg_verbose BIT(43) /* More verbose logs. */
#define gpu_dbg_ce BIT(44) /* Copy Engine debugging */ #define gpu_dbg_ce BIT(44) /* Copy Engine debugging */
#ifdef CONFIG_NVS_PRESENT
#define gpu_dbg_nvs BIT(45) /* NvGPU's NVS logging. */
#define gpu_dbg_nvs_internal BIT(46) /* Internal NVS logging. */
#endif
#endif #endif

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_NVS_H
#define NVGPU_NVS_H
#include <nvs/domain.h>
#include <nvgpu/atomic.h>
#include <nvgpu/lock.h>
/*
* Max size we'll parse from an NVS log entry.
*/
#define NVS_LOG_BUF_SIZE 128
struct gk20a;
/*
* NvGPU KMD domain implementation details for nvsched.
*/
struct nvgpu_nvs_domain {
u64 id;
/*
* Subscheduler ID to define the scheduling within a domain. These will
* be implemented by the kernel as needed. There'll always be at least
* one, which is the host HW built in round-robin scheduler.
*/
u32 subscheduler;
/*
* Convenience pointer for linking back to the parent object.
*/
struct nvs_domain *parent;
};
struct nvgpu_nvs_scheduler {
struct nvs_sched *sched;
nvgpu_atomic64_t id_counter;
};
int nvgpu_nvs_init(struct gk20a *g);
int nvgpu_nvs_open(struct gk20a *g);
void nvgpu_nvs_get_log(struct gk20a *g, s64 *timestamp, const char **msg);
u32 nvgpu_nvs_domain_count(struct gk20a *g);
int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id);
int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice,
u32 preempt_grace, struct nvgpu_nvs_domain **pdomain);
struct nvgpu_nvs_domain *
nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id);
void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain);
/*
* Debug wrapper for NVS code.
*/
#define nvs_dbg(g, fmt, arg...) \
nvgpu_log(g, gpu_dbg_nvs, fmt, ##arg)
#endif

View File

@@ -373,7 +373,9 @@ void nvgpu_tsg_disable(struct nvgpu_tsg *tsg);
int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg,
struct nvgpu_channel *ch); struct nvgpu_channel *ch);
#ifdef CONFIG_NVS_PRESENT
int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name); int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name);
#endif
/** /**
* @brief Get pointer to #nvgpu_tsg for the tsgid. * @brief Get pointer to #nvgpu_tsg for the tsgid.

View File

@@ -32,6 +32,7 @@
#include "ioctl_dbg.h" #include "ioctl_dbg.h"
#include "ioctl_prof.h" #include "ioctl_prof.h"
#include "power_ops.h" #include "power_ops.h"
#include "ioctl_nvs.h"
#include "ioctl.h" #include "ioctl.h"
#include "module.h" #include "module.h"
#include "os_linux.h" #include "os_linux.h"
@@ -161,6 +162,17 @@ static const struct file_operations gk20a_sched_ops = {
.read = gk20a_sched_dev_read, .read = gk20a_sched_dev_read,
}; };
static const struct file_operations nvgpu_nvs_ops = {
.owner = THIS_MODULE,
.release = nvgpu_nvs_dev_release,
.open = nvgpu_nvs_dev_open,
#ifdef CONFIG_COMPAT
.compat_ioctl = nvgpu_nvs_dev_ioctl,
#endif
.unlocked_ioctl = nvgpu_nvs_dev_ioctl,
.read = nvgpu_nvs_dev_read,
};
struct nvgpu_dev_node { struct nvgpu_dev_node {
/* Device node name */ /* Device node name */
char name[20]; char name[20];
@@ -185,6 +197,7 @@ static const struct nvgpu_dev_node dev_node_list[] = {
{"prof-ctx", &gk20a_prof_ctx_ops, false, true }, {"prof-ctx", &gk20a_prof_ctx_ops, false, true },
{"prof-dev", &gk20a_prof_dev_ops, false, true }, {"prof-dev", &gk20a_prof_dev_ops, false, true },
{"sched", &gk20a_sched_ops, false, false }, {"sched", &gk20a_sched_ops, false, false },
{"nvsched", &nvgpu_nvs_ops, false, false },
{"tsg", &gk20a_tsg_ops, false, false }, {"tsg", &gk20a_tsg_ops, false, false },
}; };

View File

@@ -17,6 +17,7 @@
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/list.h> #include <nvgpu/list.h>
#include <nvgpu/mig.h>
struct device; struct device;
struct class; struct class;

View File

@@ -0,0 +1,244 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <uapi/linux/nvgpu-nvs.h>
#include <nvgpu/nvs.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_init.h>
#include <nvs/sched.h>
#include <nvs/domain.h>
#include "ioctl.h"
int nvgpu_nvs_dev_open(struct inode *inode, struct file *filp)
{
struct nvgpu_cdev *cdev;
struct gk20a *g;
int err;
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
g = nvgpu_get_gk20a_from_cdev(cdev);
err = nvgpu_nvs_open(g);
if (err != 0) {
return err;
}
filp->private_data = g;
return 0;
}
int nvgpu_nvs_dev_release(struct inode *inode, struct file *filp)
{
/*
* Since the scheduler persists through a close() call, there's nothing
* to do on device close (for now).
*/
return 0;
}
static int nvgpu_nvs_ioctl_create_domain(
struct gk20a *g,
struct nvgpu_nvs_ioctl_create_domain *dom_args)
{
struct nvgpu_nvs_domain *domain = NULL;
int err;
err = nvgpu_nvs_add_domain(g,
dom_args->domain_params.name,
dom_args->domain_params.timeslice_us,
dom_args->domain_params.preempt_grace_us,
&domain);
if (err != 0) {
return err;
}
domain->subscheduler = dom_args->domain_params.subscheduler;
dom_args->domain_params.dom_id = domain->id;
return 0;
}
static int nvgpu_nvs_ioctl_remove_domain(struct gk20a *g, u32 dom_id)
{
return nvgpu_nvs_del_domain(g, dom_id);
}
static int nvgpu_nvs_ioctl_query_domains(
struct gk20a *g,
void __user *user_arg,
struct nvgpu_nvs_ioctl_query_domains *args)
{
struct nvgpu_nvs_domain *nvgpu_dom;
struct nvs_domain *nvs_dom;
u32 index;
struct nvgpu_nvs_ioctl_domain *args_domains = (void __user *)(uintptr_t)args->domains;
/* First call variant: return number of domains. */
if (args_domains == NULL) {
args->nr = nvgpu_nvs_domain_count(g);
if (copy_to_user(user_arg, args, sizeof(*args))) {
return -EFAULT;
}
nvs_dbg(g, "Nr domains: %u", args->nr);
return 0;
}
/*
* Second call variant: populate the passed array with domain info.
*/
index = 0;
nvs_domain_for_each(g->scheduler->sched, nvs_dom) {
struct nvgpu_nvs_ioctl_domain dom;
nvgpu_dom = nvs_dom->priv;
nvs_dbg(g, "Copying dom #%u [%s] (%llu)",
index, nvs_dom->name, nvgpu_dom->id);
(void)memset(&dom, 0, sizeof(dom));
strncpy(dom.name, nvs_dom->name, sizeof(dom.name) - 1);
dom.timeslice_us = nvs_dom->timeslice_us;
dom.preempt_grace_us = nvs_dom->preempt_grace_us;
dom.subscheduler = nvgpu_dom->subscheduler;
dom.dom_id = nvgpu_dom->id;
if (copy_to_user(&args_domains[index],
&dom, sizeof(dom))) {
nvs_dbg(g, "Fault during copy of domain to userspace.");
return -EFAULT;
}
index += 1;
}
return 0;
}
long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct gk20a *g = filp->private_data;
int err = 0;
u8 buf[NVGPU_NVS_IOCTL_MAX_ARG_SIZE] = { 0 };
nvs_dbg(g, "IOC_TYPE: %c", _IOC_TYPE(cmd));
nvs_dbg(g, "IOC_NR: %u", _IOC_NR(cmd));
nvs_dbg(g, "IOC_SIZE: %u", _IOC_SIZE(cmd));
if ((_IOC_TYPE(cmd) != NVGPU_NVS_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) ||
(_IOC_NR(cmd) > NVGPU_NVS_IOCTL_LAST) ||
(_IOC_SIZE(cmd) > NVGPU_NVS_IOCTL_MAX_ARG_SIZE)) {
nvs_dbg(g, "-> BAD!!");
return -EINVAL;
}
if (_IOC_DIR(cmd) & _IOC_WRITE) {
if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
}
gk20a_busy(g);
switch (cmd) {
case NVGPU_NVS_IOCTL_CREATE_DOMAIN:
{
struct nvgpu_nvs_ioctl_create_domain *args =
(struct nvgpu_nvs_ioctl_create_domain *)buf;
err = nvgpu_nvs_ioctl_create_domain(g, args);
if (err)
goto done;
/*
* Issue a remove domain IOCTL in case of fault when copying back to
* userspace.
*/
if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd))) {
nvgpu_nvs_ioctl_remove_domain(g,
args->domain_params.dom_id);
err = -EFAULT;
goto done;
}
break;
}
case NVGPU_NVS_IOCTL_QUERY_DOMAINS:
err = nvgpu_nvs_ioctl_query_domains(g,
(void __user *)arg,
(void *)buf);
if (err)
goto done;
break;
case NVGPU_NVS_IOCTL_REMOVE_DOMAIN:
{
struct nvgpu_nvs_ioctl_remove_domain *args =
(struct nvgpu_nvs_ioctl_remove_domain *)buf;
err = nvgpu_nvs_ioctl_remove_domain(g, args->dom_id);
break;
}
default:
err = -ENOTTY;
goto done;
}
done:
gk20a_idle(g);
return err;
}
ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
size_t size, loff_t *off)
{
struct gk20a *g = filp->private_data;
char log_buf[NVS_LOG_BUF_SIZE];
const char *log_msg;
s64 timestamp;
int bytes;
/*
* We need at least NVS_LOG_BUF_SIZE to parse text into from the binary
* log format.
*
* TODO: If size is large enough, return multiple entries in one go.
*/
if (size < NVS_LOG_BUF_SIZE) {
nvgpu_err(g, "Write buf size too small: %zu", size);
return -EINVAL;
}
nvgpu_nvs_get_log(g, &timestamp, &log_msg);
if (log_msg == NULL) {
return 0;
}
bytes = snprintf(log_buf, NVS_LOG_BUF_SIZE, "[%16lld] %s\n",
timestamp, log_msg);
if (copy_to_user(buf, log_buf, bytes)) {
return -EFAULT;
}
return bytes;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef NVGPU_OS_LINUX_IOCTL_NVS_H
#define NVGPU_OS_LINUX_IOCTL_NVS_H
#include <nvgpu/types.h>
struct inode;
struct file;
int nvgpu_nvs_dev_open(struct inode *inode, struct file *filp);
int nvgpu_nvs_dev_release(struct inode *inode, struct file *filp);
long nvgpu_nvs_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
size_t size, loff_t *off);
#endif

View File

@@ -176,12 +176,14 @@ out:
return err; return err;
} }
#ifdef CONFIG_NVS_PRESENT
static int nvgpu_tsg_bind_scheduling_domain(struct nvgpu_tsg *tsg, static int nvgpu_tsg_bind_scheduling_domain(struct nvgpu_tsg *tsg,
struct nvgpu_tsg_bind_scheduling_domain_args *args) struct nvgpu_tsg_bind_scheduling_domain_args *args)
{ {
return nvgpu_tsg_bind_domain(tsg, args->domain_name); return nvgpu_tsg_bind_domain(tsg, args->domain_name);
} }
#endif
#ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL #ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
static int gk20a_tsg_get_event_data_from_id(struct nvgpu_tsg *tsg, static int gk20a_tsg_get_event_data_from_id(struct nvgpu_tsg *tsg,
@@ -821,6 +823,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
break; break;
} }
#ifdef CONFIG_NVS_PRESENT
case NVGPU_TSG_IOCTL_BIND_SCHEDULING_DOMAIN: case NVGPU_TSG_IOCTL_BIND_SCHEDULING_DOMAIN:
{ {
err = gk20a_busy(g); err = gk20a_busy(g);
@@ -834,6 +837,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
gk20a_idle(g); gk20a_idle(g);
break; break;
} }
#endif
case NVGPU_IOCTL_TSG_ENABLE: case NVGPU_IOCTL_TSG_ENABLE:
{ {

View File

@@ -0,0 +1,140 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _UAPI__LINUX_NVGPU_NVS_H
#define _UAPI__LINUX_NVGPU_NVS_H
#include "nvgpu-uapi-common.h"
#define NVGPU_NVS_IOCTL_MAGIC 'N'
/**
* Domain parameters to pass to the kernel.
*/
struct nvgpu_nvs_ioctl_domain {
/*
* Human readable name for this domain.
*/
char name[32];
/*
* Scheduling parameters: specify how long this domain should be scheduled
* for and what the grace period the scheduler should give this domain when
* preempting. A value of zero is treated as an infinite timeslice or an
* infinite grace period, respectively.
*/
__u32 timeslice_us;
__u32 preempt_grace_us;
/*
* Pick which subscheduler to use. These will be implemented by the kernel
* as needed. There'll always be at least one, which is the host HW built in
* round-robin scheduler.
*/
__u32 subscheduler;
/*
* GPU host hardware round-robin.
*/
#define NVGPU_SCHED_IOCTL_SUBSCHEDULER_HOST_HW_RR 0x0
/*
* Populated by the IOCTL when created: unique identifier. User space
* should never set this variable.
*/
__u64 dom_id;
__u64 reserved1;
__u64 reserved2;
};
/**
* NVGPU_NVS_IOCTL_CREATE_DOMAIN
*
* Create a domain - essentially a group of GPU contexts. Applications
* cacan be bound into this domain on request for each TSG.
*
* The domain ID is returned in dom_id; this id is _not_ secure. The
* nvsched device needs to have restricted permissions such that only a
* single user, or group of users, has permissions to modify the
* scheduler.
*
* It's fine to allow read-only access to the device node for other
* users; this lets other users query scheduling information that may be
* of interest to them.
*/
struct nvgpu_nvs_ioctl_create_domain {
/*
* In/out: domain parameters that userspace configures.
*
* The domain ID is returned here.
*/
struct nvgpu_nvs_ioctl_domain domain_params;
__u64 reserved1;
};
struct nvgpu_nvs_ioctl_remove_domain {
/*
* In: a domain_id to remove.
*/
__u64 dom_id;
__u64 reserved1;
};
/**
* NVGPU_NVS_IOCTL_QUERY_DOMAINS
*
* Query the current list of domains in the scheduler. This is a two
* part IOCTL.
*
* If domains is NULL, then this IOCTL will populate nr with the number
* of present domains.
*
* If domains is not NULL, then this IOCTL will treat domains as an
* array with nr elements and write up to nr domains into that array.
*/
struct nvgpu_nvs_ioctl_query_domains {
/*
* In/Out: If NULL, leave untouched. If not NULL, then write
* up to nr domains into the domain elements pointed to by
* domains.
*/
__u64 domains;
/*
* In/Out: If domains is NULL, then populate with the number
* of domains present. Otherwise nr specifies the capacity of
* the domains array pointed to by domains.
*/
__u32 nr;
};
#define NVGPU_NVS_IOCTL_CREATE_DOMAIN \
_IOWR(NVGPU_NVS_IOCTL_MAGIC, 1, \
struct nvgpu_nvs_ioctl_create_domain)
#define NVGPU_NVS_IOCTL_REMOVE_DOMAIN \
_IOW(NVGPU_NVS_IOCTL_MAGIC, 2, \
struct nvgpu_nvs_ioctl_remove_domain)
#define NVGPU_NVS_IOCTL_QUERY_DOMAINS \
_IOWR(NVGPU_NVS_IOCTL_MAGIC, 3, \
struct nvgpu_nvs_ioctl_query_domains)
#define NVGPU_NVS_IOCTL_LAST \
_IOC_NR(NVGPU_NVS_IOCTL_QUERY_DOMAINS)
#define NVGPU_NVS_IOCTL_MAX_ARG_SIZE \
sizeof(struct nvgpu_nvs_ioctl_create_domain)
#endif

View File

@@ -20,6 +20,8 @@
#include "nvgpu-event.h" #include "nvgpu-event.h"
#include "nvgpu-as.h" #include "nvgpu-as.h"
#include "nvgpu-nvs.h"
/* /*
* /dev/nvhost-tsg-gpu device * /dev/nvhost-tsg-gpu device
* *

View File

@@ -46,7 +46,7 @@ int nvs_sched_create(struct nvs_sched *sched,
void nvs_sched_close(struct nvs_sched *sched) void nvs_sched_close(struct nvs_sched *sched)
{ {
nvs_domain_clear(sched); nvs_domain_clear_all(sched);
nvs_free(sched, sched->domain_list); nvs_free(sched, sched->domain_list);
nvs_log_destroy(sched); nvs_log_destroy(sched);