gpu: nvgpu: add support for nvs control_fifo

Add a device node for management of nvs control fifo buffers for
scheduling domains. The current design consists of a master structure
struct nvgpu_nvs_domain_sched_ctrl for management of users as well
as control queues. Initially all users are added as non-exclusive users.

Subsequent changes will add support for IOCTLS to manage opening of
Send/Receive and Event buffers, querying characteristics etc.

In subsequent changes, a user that tries to open a Send/Receive queue
will first try to reserve itself as an exclusive user and only if that
succeeds can proceed with creation of both Send/Receive queues.

Exclusive users will be reset to non-exclusive users just before they
close their device node handle.

Jira NVGPU-8128

Change-Id: I15a83f70cd49c685510a9fd5ea4476ebb3544378
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2691404
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2022-06-17 10:29:09 +05:30
committed by mobile promotions
parent cce488d87e
commit 62c03dfaef
12 changed files with 365 additions and 2 deletions

View File

@@ -21,6 +21,7 @@
#include <nvgpu/nvs.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/os_sched.h>
#include "os_linux.h"
#include <nvs/sched.h>
@@ -595,3 +596,82 @@ ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
return bytes;
}
struct nvgpu_nvs_domain_ctrl_fifo_user_linux {
struct nvs_domain_ctrl_fifo_user user;
struct nvgpu_cdev *cdev;
};
int nvgpu_nvs_ctrl_fifo_ops_open(struct inode *inode, struct file *filp)
{
struct nvgpu_cdev *cdev;
struct gk20a *g;
int pid;
struct nvgpu_nvs_domain_ctrl_fifo_user_linux *linux_user;
bool writable = filp->f_mode & FMODE_WRITE;
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
g = nvgpu_get_gk20a_from_cdev(cdev);
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVS_CTRL_FIFO)) {
return -EOPNOTSUPP;
}
pid = nvgpu_current_pid(g);
if (nvgpu_nvs_ctrl_fifo_user_exists(g->sched_ctrl_fifo, pid, writable)) {
nvgpu_err(g, "User already exists");
return -EEXIST;
}
linux_user = nvgpu_kzalloc(g, sizeof(*linux_user));
if (linux_user == NULL) {
return -ENOMEM;
}
linux_user->cdev = cdev;
linux_user->user.pid = pid;
if (writable)
linux_user->user.has_write_access = true;
nvgpu_nvs_ctrl_fifo_add_user(g->sched_ctrl_fifo, &linux_user->user);
filp->private_data = linux_user;
nvgpu_get(g);
return 0;
}
int nvgpu_nvs_ctrl_fifo_ops_release(struct inode *inode, struct file *filp)
{
struct nvgpu_cdev *cdev;
struct gk20a *g;
struct nvgpu_nvs_domain_ctrl_fifo_user_linux *linux_user = NULL;
int err = 0;
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
g = nvgpu_get_gk20a_from_cdev(cdev);
linux_user = filp->private_data;
if (linux_user == NULL) {
return -ENODEV;
}
if (nvgpu_nvs_ctrl_fifo_user_is_active(&linux_user->user)) {
err = -EBUSY;
}
if (nvgpu_nvs_ctrl_fifo_is_exclusive_user(g->sched_ctrl_fifo, &linux_user->user)) {
nvgpu_nvs_ctrl_fifo_reset_exclusive_user(g->sched_ctrl_fifo, &linux_user->user);
}
nvgpu_nvs_ctrl_fifo_remove_user(g->sched_ctrl_fifo, &linux_user->user);
filp->private_data = NULL;
nvgpu_kfree(g, linux_user);
nvgpu_put(g);
return err;
}
extern const struct file_operations nvgpu_nvs_ctrl_fifo_ops;