mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: add support for nvs control_fifo
Add a device node for management of nvs control fifo buffers for scheduling domains. The current design consists of a master structure struct nvgpu_nvs_domain_sched_ctrl for management of users as well as control queues. Initially all users are added as non-exclusive users. Subsequent changes will add support for IOCTLS to manage opening of Send/Receive and Event buffers, querying characteristics etc. In subsequent changes, a user that tries to open a Send/Receive queue will first try to reserve itself as an exclusive user and only if that succeeds can proceed with creation of both Send/Receive queues. Exclusive users will be reset to non-exclusive users just before they close their device node handle. Jira NVGPU-8128 Change-Id: I15a83f70cd49c685510a9fd5ea4476ebb3544378 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2691404 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
cce488d87e
commit
62c03dfaef
@@ -162,6 +162,12 @@ static const struct file_operations gk20a_sched_ops = {
|
||||
.read = gk20a_sched_dev_read,
|
||||
};
|
||||
|
||||
const struct file_operations nvgpu_nvs_ctrl_fifo_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = nvgpu_nvs_ctrl_fifo_ops_release,
|
||||
.open = nvgpu_nvs_ctrl_fifo_ops_open,
|
||||
};
|
||||
|
||||
static const struct file_operations nvgpu_nvs_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = nvgpu_nvs_dev_release,
|
||||
@@ -198,6 +204,7 @@ static const struct nvgpu_dev_node dev_node_list[] = {
|
||||
{"prof-dev", &gk20a_prof_dev_ops, false, true },
|
||||
{"sched", &gk20a_sched_ops, false, false },
|
||||
{"nvsched", &nvgpu_nvs_ops, false, false },
|
||||
{"nvsched_ctrl_fifo", &nvgpu_nvs_ctrl_fifo_ops, false, false },
|
||||
{"tsg", &gk20a_tsg_ops, false, false },
|
||||
};
|
||||
|
||||
|
||||
@@ -307,6 +307,8 @@ static struct nvgpu_flags_mapping flags_mapping[] = {
|
||||
NVGPU_SUPPORT_BUFFER_METADATA},
|
||||
{NVGPU_GPU_FLAGS_SUPPORT_NVS,
|
||||
NVGPU_SUPPORT_NVS},
|
||||
{NVGPU_GPU_FLAGS_SUPPORT_NVS_SCHED_CTRL_FIFO,
|
||||
NVGPU_SUPPORT_NVS_CTRL_FIFO},
|
||||
};
|
||||
|
||||
static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g)
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <nvgpu/nvs.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#include <nvgpu/os_sched.h>
|
||||
#include "os_linux.h"
|
||||
|
||||
#include <nvs/sched.h>
|
||||
@@ -595,3 +596,82 @@ ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_user_linux {
|
||||
struct nvs_domain_ctrl_fifo_user user;
|
||||
struct nvgpu_cdev *cdev;
|
||||
};
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_ops_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct nvgpu_cdev *cdev;
|
||||
struct gk20a *g;
|
||||
int pid;
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_user_linux *linux_user;
|
||||
bool writable = filp->f_mode & FMODE_WRITE;
|
||||
|
||||
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
|
||||
g = nvgpu_get_gk20a_from_cdev(cdev);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVS_CTRL_FIFO)) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
pid = nvgpu_current_pid(g);
|
||||
if (nvgpu_nvs_ctrl_fifo_user_exists(g->sched_ctrl_fifo, pid, writable)) {
|
||||
nvgpu_err(g, "User already exists");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
linux_user = nvgpu_kzalloc(g, sizeof(*linux_user));
|
||||
if (linux_user == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
linux_user->cdev = cdev;
|
||||
linux_user->user.pid = pid;
|
||||
if (writable)
|
||||
linux_user->user.has_write_access = true;
|
||||
|
||||
nvgpu_nvs_ctrl_fifo_add_user(g->sched_ctrl_fifo, &linux_user->user);
|
||||
|
||||
filp->private_data = linux_user;
|
||||
nvgpu_get(g);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_ops_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct nvgpu_cdev *cdev;
|
||||
struct gk20a *g;
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_user_linux *linux_user = NULL;
|
||||
int err = 0;
|
||||
|
||||
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
|
||||
g = nvgpu_get_gk20a_from_cdev(cdev);
|
||||
|
||||
linux_user = filp->private_data;
|
||||
if (linux_user == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (nvgpu_nvs_ctrl_fifo_user_is_active(&linux_user->user)) {
|
||||
err = -EBUSY;
|
||||
}
|
||||
|
||||
if (nvgpu_nvs_ctrl_fifo_is_exclusive_user(g->sched_ctrl_fifo, &linux_user->user)) {
|
||||
nvgpu_nvs_ctrl_fifo_reset_exclusive_user(g->sched_ctrl_fifo, &linux_user->user);
|
||||
}
|
||||
|
||||
nvgpu_nvs_ctrl_fifo_remove_user(g->sched_ctrl_fifo, &linux_user->user);
|
||||
|
||||
filp->private_data = NULL;
|
||||
|
||||
nvgpu_kfree(g, linux_user);
|
||||
nvgpu_put(g);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
extern const struct file_operations nvgpu_nvs_ctrl_fifo_ops;
|
||||
|
||||
@@ -26,4 +26,7 @@ ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
|
||||
size_t size, loff_t *off);
|
||||
struct nvgpu_nvs_domain *nvgpu_nvs_domain_get_from_file(int fd);
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_ops_open(struct inode *inode, struct file *filp);
|
||||
int nvgpu_nvs_ctrl_fifo_ops_release(struct inode *inode, struct file *filp);
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user