mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add support for nvs control_fifo
Add a device node for management of nvs control fifo buffers for scheduling domains. The current design consists of a master structure struct nvgpu_nvs_domain_sched_ctrl for management of users as well as control queues. Initially all users are added as non-exclusive users. Subsequent changes will add support for IOCTLS to manage opening of Send/Receive and Event buffers, querying characteristics etc. In subsequent changes, a user that tries to open a Send/Receive queue will first try to reserve itself as an exclusive user and only if that succeeds can proceed with creation of both Send/Receive queues. Exclusive users will be reset to non-exclusive users just before they close their device node handle. Jira NVGPU-8128 Change-Id: I15a83f70cd49c685510a9fd5ea4476ebb3544378 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2691404 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
cce488d87e
commit
62c03dfaef
@@ -214,6 +214,7 @@ nvsched:
|
||||
safe: no
|
||||
owner: Alex W
|
||||
sources: [ common/nvs/nvs_sched.c,
|
||||
common/nvs/nvs_sched_ctrl.c,
|
||||
include/external-nvs/impl.h,
|
||||
include/external-nvs/types.h,
|
||||
include/nvgpu/nvs.h ]
|
||||
|
||||
@@ -995,7 +995,8 @@ include $(srctree.nvgpu)/nvsched/Makefile.sources
|
||||
nvgpu-y += $(patsubst %.c,../../../nvsched/%.o,$(NVS_SOURCES))
|
||||
nvgpu-y += \
|
||||
os/linux/ioctl_nvs.o \
|
||||
common/nvs/nvs_sched.o
|
||||
common/nvs/nvs_sched.o \
|
||||
common/nvs/nvs_sched_ctrl.o
|
||||
ccflags-y += \
|
||||
$(patsubst %,-I$(srctree.nvgpu)/nvsched/%,$(NVS_INCLUDE)) \
|
||||
-I$(srctree.nvgpu)/drivers/gpu/nvgpu/include/external-nvs \
|
||||
|
||||
@@ -453,6 +453,9 @@ void nvgpu_nvs_remove_support(struct gk20a *g)
|
||||
nvgpu_kfree(g, sched->sched);
|
||||
nvgpu_kfree(g, sched);
|
||||
g->scheduler = NULL;
|
||||
|
||||
nvgpu_nvs_ctrl_fifo_destroy(g);
|
||||
|
||||
nvgpu_mutex_destroy(&g->sched_mutex);
|
||||
}
|
||||
|
||||
@@ -475,6 +478,14 @@ int nvgpu_nvs_open(struct gk20a *g)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_NVS_CTRL_FIFO)) {
|
||||
g->sched_ctrl_fifo = nvgpu_nvs_ctrl_fifo_create(g);
|
||||
if (g->sched_ctrl_fifo == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* separately allocated to keep the definition hidden from other files */
|
||||
g->scheduler->sched = nvgpu_kzalloc(g, sizeof(*g->scheduler->sched));
|
||||
if (g->scheduler->sched == NULL) {
|
||||
@@ -510,6 +521,8 @@ unlock:
|
||||
nvgpu_kfree(g, g->scheduler);
|
||||
g->scheduler = NULL;
|
||||
}
|
||||
if (g->sched_ctrl_fifo)
|
||||
nvgpu_nvs_ctrl_fifo_destroy(g);
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&g->sched_mutex);
|
||||
|
||||
205
drivers/gpu/nvgpu/common/nvs/nvs_sched_ctrl.c
Normal file
205
drivers/gpu/nvgpu/common/nvs/nvs_sched_ctrl.c
Normal file
@@ -0,0 +1,205 @@
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvs/log.h>
|
||||
#include <nvgpu/nvs.h>
|
||||
#include <nvgpu/lock.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/list.h>
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_users {
|
||||
/* Flag to reserve exclusive user */
|
||||
bool reserved_exclusive_rw_user;
|
||||
/* Store the single Read/Write User */
|
||||
struct nvgpu_list_node exclusive_user;
|
||||
/* Store multiple Read-Only events subscriber e.g. debugger etc. */
|
||||
struct nvgpu_list_node list_non_exclusive_user;
|
||||
/* Active users available */
|
||||
u32 usage_counter;
|
||||
|
||||
struct nvgpu_spinlock user_lock;
|
||||
};
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo {
|
||||
/*
|
||||
* Instance of global struct gk20a;
|
||||
*/
|
||||
struct gk20a *g;
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_users users;
|
||||
};
|
||||
|
||||
void nvgpu_nvs_ctrl_fifo_reset_exclusive_user(
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user)
|
||||
{
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
nvgpu_list_del(&user->sched_ctrl_list);
|
||||
nvgpu_list_add_tail(&user->sched_ctrl_list, &sched_ctrl->users.list_non_exclusive_user);
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
}
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!user->has_write_access) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
|
||||
if (nvgpu_list_empty(&sched_ctrl->users.exclusive_user)) {
|
||||
nvgpu_list_del(&user->sched_ctrl_list);
|
||||
nvgpu_list_add_tail(&user->sched_ctrl_list, &sched_ctrl->users.exclusive_user);
|
||||
} else {
|
||||
ret = -EBUSY;
|
||||
}
|
||||
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool nvgpu_nvs_ctrl_fifo_user_exists(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
int pid, bool rw)
|
||||
{
|
||||
bool user_exists = false;
|
||||
struct nvs_domain_ctrl_fifo_user *user;
|
||||
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
|
||||
nvgpu_list_for_each_entry(user, &sched_ctrl->users.list_non_exclusive_user,
|
||||
nvs_domain_ctrl_fifo_user, sched_ctrl_list) {
|
||||
if (user->pid == pid) {
|
||||
user_exists = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!user_exists) {
|
||||
if (!nvgpu_list_empty(&sched_ctrl->users.exclusive_user)) {
|
||||
user = nvgpu_list_first_entry(&sched_ctrl->users.exclusive_user,
|
||||
nvs_domain_ctrl_fifo_user, sched_ctrl_list);
|
||||
if (user->pid == pid) {
|
||||
user_exists = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
|
||||
return user_exists;
|
||||
}
|
||||
|
||||
bool nvgpu_nvs_ctrl_fifo_is_exclusive_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
struct nvs_domain_ctrl_fifo_user *user)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
struct nvs_domain_ctrl_fifo_user *exclusive_user = NULL;
|
||||
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
|
||||
if (!nvgpu_list_empty(&sched_ctrl->users.exclusive_user)) {
|
||||
exclusive_user = nvgpu_list_first_entry(&sched_ctrl->users.exclusive_user,
|
||||
nvs_domain_ctrl_fifo_user, sched_ctrl_list);
|
||||
|
||||
if (exclusive_user == user) {
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
struct nvs_domain_ctrl_fifo_user *user)
|
||||
{
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
|
||||
nvgpu_list_add(&user->sched_ctrl_list, &sched_ctrl->users.list_non_exclusive_user);
|
||||
|
||||
sched_ctrl->users.usage_counter++;
|
||||
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
}
|
||||
|
||||
bool nvgpu_nvs_ctrl_fifo_user_is_active(struct nvs_domain_ctrl_fifo_user *user)
|
||||
{
|
||||
return user->active_used_queues != 0;
|
||||
}
|
||||
|
||||
void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
struct nvs_domain_ctrl_fifo_user *user)
|
||||
{
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
|
||||
nvgpu_list_del(&user->sched_ctrl_list);
|
||||
|
||||
sched_ctrl->users.usage_counter--;
|
||||
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
}
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched = nvgpu_kzalloc(g, sizeof(*sched));
|
||||
|
||||
if (sched == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nvgpu_spinlock_init(&sched->users.user_lock);
|
||||
nvgpu_init_list_node(&sched->users.exclusive_user);
|
||||
nvgpu_init_list_node(&sched->users.list_non_exclusive_user);
|
||||
|
||||
return sched;
|
||||
}
|
||||
|
||||
bool nvgpu_nvs_ctrl_fifo_is_busy(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl)
|
||||
{
|
||||
bool ret = 0;
|
||||
|
||||
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
|
||||
ret = (sched_ctrl->users.usage_counter != 0);
|
||||
nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nvgpu_nvs_ctrl_fifo_destroy(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo;
|
||||
|
||||
if (sched_ctrl == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_assert(!nvgpu_nvs_ctrl_fifo_is_busy(sched_ctrl));
|
||||
|
||||
nvgpu_kfree(g, sched_ctrl);
|
||||
g->sched_ctrl_fifo = NULL;
|
||||
}
|
||||
@@ -221,6 +221,7 @@ struct gk20a;
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_ROP_IN_GPC, "ROP is part of GPC"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_BUFFER_METADATA, "Buffer metadata support"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_NVS, "Domain scheduler support"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_NVS_CTRL_FIFO, "Domain scheduler control support"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_TEGRA_RAW, \
|
||||
"TEGRA_RAW format support"), \
|
||||
DEFINE_FLAG(NVGPU_SUPPORT_EMULATE_MODE, \
|
||||
|
||||
@@ -900,6 +900,7 @@ struct gk20a {
|
||||
u32 support_gpu_tools;
|
||||
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl_fifo;
|
||||
struct nvgpu_nvs_scheduler *scheduler;
|
||||
struct nvgpu_mutex sched_mutex;
|
||||
|
||||
|
||||
@@ -41,6 +41,36 @@ struct gk20a;
|
||||
struct nvgpu_nvs_domain_ioctl;
|
||||
struct nvgpu_runlist;
|
||||
struct nvgpu_runlist_domain;
|
||||
struct nvgpu_nvs_ctrl_queue;
|
||||
struct nvgpu_nvs_domain_ctrl_fifo;
|
||||
|
||||
/* Structure to store user info common to all schedulers */
|
||||
struct nvs_domain_ctrl_fifo_user {
|
||||
/*
|
||||
* Flag to determine whether the user has write access.
|
||||
* User having write access can update Request/Response buffers.
|
||||
*/
|
||||
bool has_write_access;
|
||||
/*
|
||||
* PID of the user. Used to prevent a given user from opening
|
||||
* multiple instances of control-fifo device node.
|
||||
*/
|
||||
int pid;
|
||||
/* Mask of actively used queue */
|
||||
u32 active_used_queues;
|
||||
/*
|
||||
* Listnode used for keeping references to the user in
|
||||
* the master struct nvgpu_nvs_domain_ctrl_fifo
|
||||
*/
|
||||
struct nvgpu_list_node sched_ctrl_list;
|
||||
};
|
||||
|
||||
static inline struct nvs_domain_ctrl_fifo_user *
|
||||
nvs_domain_ctrl_fifo_user_from_sched_ctrl_list(struct nvgpu_list_node *node)
|
||||
{
|
||||
return (struct nvs_domain_ctrl_fifo_user *)
|
||||
((uintptr_t)node - offsetof(struct nvs_domain_ctrl_fifo_user, sched_ctrl_list));
|
||||
};
|
||||
|
||||
/*
|
||||
* NvGPU KMD domain implementation details for nvsched.
|
||||
@@ -120,6 +150,23 @@ const char *nvgpu_nvs_domain_get_name(struct nvgpu_nvs_domain *dom);
|
||||
#define nvs_dbg(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_nvs, fmt, ##arg)
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g);
|
||||
bool nvgpu_nvs_ctrl_fifo_user_exists(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
int pid, bool rw);
|
||||
bool nvgpu_nvs_ctrl_fifo_user_is_active(struct nvs_domain_ctrl_fifo_user *user);
|
||||
void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
struct nvs_domain_ctrl_fifo_user *user);
|
||||
bool nvgpu_nvs_ctrl_fifo_is_exclusive_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
struct nvs_domain_ctrl_fifo_user *user);
|
||||
void nvgpu_nvs_ctrl_fifo_reset_exclusive_user(
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user);
|
||||
int nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(
|
||||
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user);
|
||||
void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
|
||||
struct nvs_domain_ctrl_fifo_user *user);
|
||||
bool nvgpu_nvs_ctrl_fifo_is_busy(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl);
|
||||
void nvgpu_nvs_ctrl_fifo_destroy(struct gk20a *g);
|
||||
|
||||
#else
|
||||
static inline int nvgpu_nvs_init(struct gk20a *g)
|
||||
{
|
||||
|
||||
@@ -162,6 +162,12 @@ static const struct file_operations gk20a_sched_ops = {
|
||||
.read = gk20a_sched_dev_read,
|
||||
};
|
||||
|
||||
const struct file_operations nvgpu_nvs_ctrl_fifo_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = nvgpu_nvs_ctrl_fifo_ops_release,
|
||||
.open = nvgpu_nvs_ctrl_fifo_ops_open,
|
||||
};
|
||||
|
||||
static const struct file_operations nvgpu_nvs_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = nvgpu_nvs_dev_release,
|
||||
@@ -198,6 +204,7 @@ static const struct nvgpu_dev_node dev_node_list[] = {
|
||||
{"prof-dev", &gk20a_prof_dev_ops, false, true },
|
||||
{"sched", &gk20a_sched_ops, false, false },
|
||||
{"nvsched", &nvgpu_nvs_ops, false, false },
|
||||
{"nvsched_ctrl_fifo", &nvgpu_nvs_ctrl_fifo_ops, false, false },
|
||||
{"tsg", &gk20a_tsg_ops, false, false },
|
||||
};
|
||||
|
||||
|
||||
@@ -307,6 +307,8 @@ static struct nvgpu_flags_mapping flags_mapping[] = {
|
||||
NVGPU_SUPPORT_BUFFER_METADATA},
|
||||
{NVGPU_GPU_FLAGS_SUPPORT_NVS,
|
||||
NVGPU_SUPPORT_NVS},
|
||||
{NVGPU_GPU_FLAGS_SUPPORT_NVS_SCHED_CTRL_FIFO,
|
||||
NVGPU_SUPPORT_NVS_CTRL_FIFO},
|
||||
};
|
||||
|
||||
static u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags(struct gk20a *g)
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <nvgpu/nvs.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#include <nvgpu/os_sched.h>
|
||||
#include "os_linux.h"
|
||||
|
||||
#include <nvs/sched.h>
|
||||
@@ -595,3 +596,82 @@ ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_user_linux {
|
||||
struct nvs_domain_ctrl_fifo_user user;
|
||||
struct nvgpu_cdev *cdev;
|
||||
};
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_ops_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct nvgpu_cdev *cdev;
|
||||
struct gk20a *g;
|
||||
int pid;
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_user_linux *linux_user;
|
||||
bool writable = filp->f_mode & FMODE_WRITE;
|
||||
|
||||
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
|
||||
g = nvgpu_get_gk20a_from_cdev(cdev);
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVS_CTRL_FIFO)) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
pid = nvgpu_current_pid(g);
|
||||
if (nvgpu_nvs_ctrl_fifo_user_exists(g->sched_ctrl_fifo, pid, writable)) {
|
||||
nvgpu_err(g, "User already exists");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
linux_user = nvgpu_kzalloc(g, sizeof(*linux_user));
|
||||
if (linux_user == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
linux_user->cdev = cdev;
|
||||
linux_user->user.pid = pid;
|
||||
if (writable)
|
||||
linux_user->user.has_write_access = true;
|
||||
|
||||
nvgpu_nvs_ctrl_fifo_add_user(g->sched_ctrl_fifo, &linux_user->user);
|
||||
|
||||
filp->private_data = linux_user;
|
||||
nvgpu_get(g);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_ops_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct nvgpu_cdev *cdev;
|
||||
struct gk20a *g;
|
||||
struct nvgpu_nvs_domain_ctrl_fifo_user_linux *linux_user = NULL;
|
||||
int err = 0;
|
||||
|
||||
cdev = container_of(inode->i_cdev, struct nvgpu_cdev, cdev);
|
||||
g = nvgpu_get_gk20a_from_cdev(cdev);
|
||||
|
||||
linux_user = filp->private_data;
|
||||
if (linux_user == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (nvgpu_nvs_ctrl_fifo_user_is_active(&linux_user->user)) {
|
||||
err = -EBUSY;
|
||||
}
|
||||
|
||||
if (nvgpu_nvs_ctrl_fifo_is_exclusive_user(g->sched_ctrl_fifo, &linux_user->user)) {
|
||||
nvgpu_nvs_ctrl_fifo_reset_exclusive_user(g->sched_ctrl_fifo, &linux_user->user);
|
||||
}
|
||||
|
||||
nvgpu_nvs_ctrl_fifo_remove_user(g->sched_ctrl_fifo, &linux_user->user);
|
||||
|
||||
filp->private_data = NULL;
|
||||
|
||||
nvgpu_kfree(g, linux_user);
|
||||
nvgpu_put(g);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
extern const struct file_operations nvgpu_nvs_ctrl_fifo_ops;
|
||||
|
||||
@@ -26,4 +26,7 @@ ssize_t nvgpu_nvs_dev_read(struct file *filp, char __user *buf,
|
||||
size_t size, loff_t *off);
|
||||
struct nvgpu_nvs_domain *nvgpu_nvs_domain_get_from_file(int fd);
|
||||
|
||||
int nvgpu_nvs_ctrl_fifo_ops_open(struct inode *inode, struct file *filp);
|
||||
int nvgpu_nvs_ctrl_fifo_ops_release(struct inode *inode, struct file *filp);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -201,6 +201,8 @@ struct nvgpu_gpu_zbc_query_table_args {
|
||||
#define NVGPU_GPU_FLAGS_SUPPORT_VAB (1ULL << 52)
|
||||
/* The NVS scheduler interface is usable */
|
||||
#define NVGPU_GPU_FLAGS_SUPPORT_NVS (1ULL << 53)
|
||||
/* The NVS control fifo interface is usable */
|
||||
#define NVGPU_GPU_FLAGS_SUPPORT_NVS_SCHED_CTRL_FIFO (1ULL << 54)
|
||||
/* SM LRF ECC is enabled */
|
||||
#define NVGPU_GPU_FLAGS_ECC_ENABLED_SM_LRF (1ULL << 60)
|
||||
/* SM SHM ECC is enabled */
|
||||
|
||||
Reference in New Issue
Block a user