gpu: nvgpu: add Doxygen documentation for Control-Fifo

Add Doxygen for Control-FIFO APIs. Add null checks where
necessary.

Jira NVGPU-8619

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: I75f92108c73a521e45299b8870e106916954e7a8
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2805551
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Prateek Sethi <prsethi@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Tested-by: Prateek Sethi <prsethi@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Debarshi Dutta
2022-11-08 16:12:46 +05:30
committed by mobile promotions
parent 9143860355
commit 2d38294912
3 changed files with 557 additions and 47 deletions

View File

@@ -30,6 +30,22 @@
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/nvgpu_init.h> #include <nvgpu/nvgpu_init.h>
/**
* @brief A structure for managing all the list of control-fifo users
*
* Globally manage set of users i.e. exclusive and non-exclusive users.
* Initially, all users upon creation are characterised as non-exclusive.
*
* Priviledged users with Write permissions can request for exclusive
* access. A user can be converted from non-exclusive to exclusive
* only if meets the above criteria and there exists no other exclusive
* user. Only one exclusive user can exist at a time. An exclusive user
* can be reset back into non-exclusive user.
*
* Maintain a list of non-exclusive users and only one exclusive user.
*
* Add a fast lock for accessing/modification of the users.
*/
struct nvgpu_nvs_domain_ctrl_fifo_users { struct nvgpu_nvs_domain_ctrl_fifo_users {
/* Flag to reserve exclusive user */ /* Flag to reserve exclusive user */
bool reserved_exclusive_rw_user; bool reserved_exclusive_rw_user;
@@ -43,60 +59,92 @@ struct nvgpu_nvs_domain_ctrl_fifo_users {
struct nvgpu_spinlock user_lock; struct nvgpu_spinlock user_lock;
}; };
/**
* @brief A structure for managing the queues for control-fifo.
*
* This structure contains the Send/Receive and Event Queues
* for managing the manual mode scheduling.
*
* A coarse grained lock is also defined for access control of the queue.
*/
struct nvgpu_nvs_domain_ctrl_fifo_queues { struct nvgpu_nvs_domain_ctrl_fifo_queues {
/* /**
* send indicates a buffer having data(PUT) written by a userspace client * Send indicates a buffer having data(PUT) written by a userspace client
* and queried by the scheduler(GET). * and queried by the scheduler(GET).
*/ */
struct nvgpu_nvs_ctrl_queue send; struct nvgpu_nvs_ctrl_queue send;
/**
* Receive indicates a buffer having data(PUT) written by scheduler
* and queried by the userspace client(GET).
*/
struct nvgpu_nvs_ctrl_queue receive;
/**
* Event indicates a buffer that is subscribed to by userspace clients to
* receive events. This buffer is Read-Only for the users and only scheduler can
* write to it.
*/
struct nvgpu_nvs_ctrl_queue event;
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
/* /*
* This contains the APIs required for reading the send queue. * This contains the APIs required for reading the send queue.
*/ */
struct nvs_control_fifo_receiver *send_queue_receiver; struct nvs_control_fifo_receiver *send_queue_receiver;
/*
* receive indicates a buffer having data(PUT) written by scheduler
* and queried by the userspace client(GET).
*/
struct nvgpu_nvs_ctrl_queue receive;
/* /*
* This contains the APIs required for writing the receive queue. * This contains the APIs required for writing the receive queue.
*/ */
struct nvs_control_fifo_sender *receiver_queue_sender; struct nvs_control_fifo_sender *receiver_queue_sender;
#endif
/* /**
* event indicates a buffer that is subscribed to by userspace clients to
* receive events. This buffer is Read-Only for the users and only scheduler can
* write to it.
*/
struct nvgpu_nvs_ctrl_queue event;
/*
* Global mutex for coarse grained access control * Global mutex for coarse grained access control
* of all Queues for all UMD interfaces. e.g. IOCTL/devctls * of all Queues for all UMD interfaces. e.g. IOCTL/devctls
* and mmap calls. Keeping this as coarse-grained for now till * and mmap calls.
* GSP's implementation is complete.
*/ */
struct nvgpu_mutex queue_lock; struct nvgpu_mutex queue_lock;
}; };
/**
* @brief A master structure for Control-Fifo
*
* A global structure for Control-Fifo per GPU isntance.
* All the Users and Queues are centrally managed from here.
*
* In addition, any capabilities for the scheduler are
* also managed here.
*/
struct nvgpu_nvs_domain_ctrl_fifo { struct nvgpu_nvs_domain_ctrl_fifo {
/* /**
* Instance of global struct gk20a; * Instance of global struct gk20a;
*/ */
struct gk20a *g; struct gk20a *g;
/**
* A placeholder for holding users of control-fifo.
*
*/
struct nvgpu_nvs_domain_ctrl_fifo_users users; struct nvgpu_nvs_domain_ctrl_fifo_users users;
/**
* A placeholder for storing the queues of control-fifo.
*
*/
struct nvgpu_nvs_domain_ctrl_fifo_queues queues; struct nvgpu_nvs_domain_ctrl_fifo_queues queues;
/**
* Store capabilities of control-fifo
*
*/
struct nvs_domain_ctrl_fifo_capabilities capabilities; struct nvs_domain_ctrl_fifo_capabilities capabilities;
}; };
void nvgpu_nvs_ctrl_fifo_reset_exclusive_user( void nvgpu_nvs_ctrl_fifo_reset_exclusive_user(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user) struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user)
{ {
if (sched_ctrl == NULL || user == NULL) {
return;
}
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock); nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
nvgpu_list_del(&user->sched_ctrl_list); nvgpu_list_del(&user->sched_ctrl_list);
nvgpu_list_add_tail(&user->sched_ctrl_list, &sched_ctrl->users.list_non_exclusive_user); nvgpu_list_add_tail(&user->sched_ctrl_list, &sched_ctrl->users.list_non_exclusive_user);
@@ -108,6 +156,10 @@ int nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(
{ {
int ret = 0; int ret = 0;
if (sched_ctrl == NULL || user == NULL) {
return -ENODEV;
}
if (!user->has_write_access) { if (!user->has_write_access) {
return -EPERM; return -EPERM;
} }
@@ -132,6 +184,10 @@ bool nvgpu_nvs_ctrl_fifo_user_exists(struct nvgpu_nvs_domain_ctrl_fifo *sched_ct
bool user_exists = false; bool user_exists = false;
struct nvs_domain_ctrl_fifo_user *user; struct nvs_domain_ctrl_fifo_user *user;
if (sched_ctrl == NULL) {
return false;
}
(void)rw; (void)rw;
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock); nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
@@ -162,9 +218,12 @@ bool nvgpu_nvs_ctrl_fifo_is_exclusive_user(struct nvgpu_nvs_domain_ctrl_fifo *sc
struct nvs_domain_ctrl_fifo_user *user) struct nvs_domain_ctrl_fifo_user *user)
{ {
bool result = false; bool result = false;
struct nvs_domain_ctrl_fifo_user *exclusive_user = NULL; struct nvs_domain_ctrl_fifo_user *exclusive_user = NULL;
if (sched_ctrl == NULL || user == NULL) {
return false;
}
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock); nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
if (!nvgpu_list_empty(&sched_ctrl->users.exclusive_user)) { if (!nvgpu_list_empty(&sched_ctrl->users.exclusive_user)) {
@@ -184,6 +243,11 @@ bool nvgpu_nvs_ctrl_fifo_is_exclusive_user(struct nvgpu_nvs_domain_ctrl_fifo *sc
void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvs_domain_ctrl_fifo_user *user) struct nvs_domain_ctrl_fifo_user *user)
{ {
if (sched_ctrl == NULL || user == NULL) {
return;
}
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock); nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
nvgpu_list_add(&user->sched_ctrl_list, &sched_ctrl->users.list_non_exclusive_user); nvgpu_list_add(&user->sched_ctrl_list, &sched_ctrl->users.list_non_exclusive_user);
@@ -195,12 +259,21 @@ void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
bool nvgpu_nvs_ctrl_fifo_user_is_active(struct nvs_domain_ctrl_fifo_user *user) bool nvgpu_nvs_ctrl_fifo_user_is_active(struct nvs_domain_ctrl_fifo_user *user)
{ {
if (user == NULL) {
return false;
}
return user->active_used_queues != 0; return user->active_used_queues != 0;
} }
void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvs_domain_ctrl_fifo_user *user) struct nvs_domain_ctrl_fifo_user *user)
{ {
if (sched_ctrl == NULL || user == NULL) {
return;
}
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock); nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
nvgpu_list_del(&user->sched_ctrl_list); nvgpu_list_del(&user->sched_ctrl_list);
@@ -328,6 +401,10 @@ bool nvgpu_nvs_ctrl_fifo_is_busy(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl)
{ {
bool ret = 0; bool ret = 0;
if (sched_ctrl == NULL) {
return false;
}
nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock); nvgpu_spinlock_acquire(&sched_ctrl->users.user_lock);
ret = (sched_ctrl->users.usage_counter != 0); ret = (sched_ctrl->users.usage_counter != 0);
nvgpu_spinlock_release(&sched_ctrl->users.user_lock); nvgpu_spinlock_release(&sched_ctrl->users.user_lock);
@@ -388,12 +465,19 @@ struct nvgpu_nvs_ctrl_queue *nvgpu_nvs_ctrl_fifo_get_queue(
struct nvs_domain_ctrl_fifo_capabilities *nvgpu_nvs_ctrl_fifo_get_capabilities( struct nvs_domain_ctrl_fifo_capabilities *nvgpu_nvs_ctrl_fifo_get_capabilities(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl) struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl)
{ {
if (sched_ctrl == NULL) {
return NULL;
}
return &sched_ctrl->capabilities; return &sched_ctrl->capabilities;
} }
bool nvgpu_nvs_buffer_is_valid(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf) bool nvgpu_nvs_buffer_is_valid(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf)
{ {
(void)g; (void)g;
if (buf == NULL) {
return false;
}
return buf->valid; return buf->valid;
} }
@@ -402,13 +486,24 @@ int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
size_t bytes, u8 mask, struct nvgpu_nvs_ctrl_queue *buf) size_t bytes, u8 mask, struct nvgpu_nvs_ctrl_queue *buf)
{ {
int err; int err;
struct gk20a *g = sched_ctrl->g; struct gk20a *g;
struct vm_gk20a *system_vm = g->mm.pmu.vm; struct vm_gk20a *system_vm;
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD #ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
struct nvs_control_fifo_receiver *send_queue_receiver; struct nvs_control_fifo_receiver *send_queue_receiver;
struct nvs_control_fifo_sender *receiver_queue_sender; struct nvs_control_fifo_sender *receiver_queue_sender;
#endif #endif
if (sched_ctrl == NULL) {
return -ENODEV;
}
if (buf == NULL) {
return -EINVAL;
}
g = sched_ctrl->g;
system_vm = g->mm.pmu.vm;
(void)memset(buf, 0, sizeof(*buf)); (void)memset(buf, 0, sizeof(*buf));
buf->g = g; buf->g = g;
@@ -450,14 +545,29 @@ fail:
void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvgpu_nvs_ctrl_queue *buf) struct nvgpu_nvs_ctrl_queue *buf)
{ {
struct gk20a *g = sched_ctrl->g; struct gk20a *g;
struct vm_gk20a *system_vm = g->mm.pmu.vm; struct vm_gk20a *system_vm;
u8 mask = buf->mask; u8 mask;
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD #ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
struct nvs_control_fifo_receiver * const send_queue_receiver = struct nvs_control_fifo_receiver *send_queue_receiver;
nvgpu_nvs_domain_ctrl_fifo_get_receiver(g); struct nvs_control_fifo_sender *receiver_queue_sender;
struct nvs_control_fifo_sender * const receiver_queue_sender = #endif
nvgpu_nvs_domain_ctrl_fifo_get_sender(g);
if (sched_ctrl == NULL) {
return;
}
if (buf == NULL) {
return;
}
g = sched_ctrl->g;
system_vm = g->mm.pmu.vm;
mask = buf->mask;
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
send_queue_receiver = nvgpu_nvs_domain_ctrl_fifo_get_receiver(g);
receiver_queue_sender = nvgpu_nvs_domain_ctrl_fifo_get_sender(g);
if (mask == NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE) { if (mask == NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE) {
nvgpu_nvs_domain_ctrl_fifo_set_receiver(g, NULL); nvgpu_nvs_domain_ctrl_fifo_set_receiver(g, NULL);
@@ -490,24 +600,40 @@ void nvgpu_nvs_ctrl_fifo_unlock_queues(struct gk20a *g)
bool nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(struct nvgpu_nvs_ctrl_queue *queue) bool nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(struct nvgpu_nvs_ctrl_queue *queue)
{ {
if (queue == NULL) {
return false;
}
return queue->ref != 0; return queue->ref != 0;
} }
void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue) struct nvgpu_nvs_ctrl_queue *queue)
{ {
if (queue == NULL || user == NULL) {
return;
}
user->active_used_queues |= queue->mask; user->active_used_queues |= queue->mask;
queue->ref++; queue->ref++;
} }
void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue) struct nvgpu_nvs_ctrl_queue *queue)
{ {
if (queue == NULL || user == NULL) {
return;
}
user->active_used_queues &= ~((u32)queue->mask); user->active_used_queues &= ~((u32)queue->mask);
queue->ref--; queue->ref--;
} }
bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo_user *user, bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue) struct nvgpu_nvs_ctrl_queue *queue)
{ {
if (queue == NULL || user == NULL) {
return false;
}
return (user->active_used_queues & queue->mask); return (user->active_used_queues & queue->mask);
} }
@@ -518,23 +644,23 @@ void nvgpu_nvs_ctrl_fifo_erase_all_queues(struct gk20a *g)
nvgpu_nvs_ctrl_fifo_lock_queues(g); nvgpu_nvs_ctrl_fifo_lock_queues(g);
if (nvgpu_nvs_buffer_is_valid(g, &sched_ctrl->queues.send)) { if (nvgpu_nvs_buffer_is_valid(g, &sched_ctrl->queues.send)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, &sched_ctrl->queues.send); nvgpu_nvs_ctrl_fifo_erase_queue_locked(g, &sched_ctrl->queues.send);
} }
if (nvgpu_nvs_buffer_is_valid(g, &sched_ctrl->queues.receive)) { if (nvgpu_nvs_buffer_is_valid(g, &sched_ctrl->queues.receive)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, &sched_ctrl->queues.receive); nvgpu_nvs_ctrl_fifo_erase_queue_locked(g, &sched_ctrl->queues.receive);
} }
if (nvgpu_nvs_buffer_is_valid(g, &sched_ctrl->queues.event)) { if (nvgpu_nvs_buffer_is_valid(g, &sched_ctrl->queues.event)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, &sched_ctrl->queues.event); nvgpu_nvs_ctrl_fifo_erase_queue_locked(g, &sched_ctrl->queues.event);
} }
nvgpu_nvs_ctrl_fifo_unlock_queues(g); nvgpu_nvs_ctrl_fifo_unlock_queues(g);
} }
void nvgpu_nvs_ctrl_fifo_erase_queue(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue) void nvgpu_nvs_ctrl_fifo_erase_queue_locked(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue)
{ {
if (queue->free != NULL) { if (queue != NULL && queue->free != NULL) {
queue->free(g, queue); queue->free(g, queue);
} }
} }

View File

@@ -51,27 +51,55 @@ struct nvgpu_nvs_ctrl_queue;
struct nvgpu_nvs_domain_ctrl_fifo; struct nvgpu_nvs_domain_ctrl_fifo;
struct nvgpu_nvs_domain; struct nvgpu_nvs_domain;
/**
* @brief Manage Control-Fifo Capabilities
*
* This is a placeholder for storing different capabilities of the control-fifo.
*
* Current implementation includes the backend implementation of the scheduler.
* NVGPU_NVS_DOMAIN_SCHED_KMD, NVGPU_NVS_DOMAIN_SCHED_GSP.
*/
struct nvs_domain_ctrl_fifo_capabilities { struct nvs_domain_ctrl_fifo_capabilities {
/* Store type of scheduler backend */ /* Store type of scheduler backend */
uint8_t scheduler_implementation_hw; uint8_t scheduler_implementation_hw;
}; };
/* Structure to store user info common to all schedulers */ /**
* @brief A structure to characterise users of the control-fifo device node.
*
* Users refer to Usermode processes/threads that open the control-fifo device
* node. Users of the control-fifo device nodes mainly fall into the following
* two categories.
*
* Exclusive R/W user: A user who has exclusive access to the control-fifo
* scheduling queues namely Send, Receive and Event Queues. Exclusive users
* can open/close a combination of the above queues for scheduling purposes.
*
* Exclusive user controls the scheduling control-flow by writing commands into
* the shared queues. An example of a typical flow is where a user writes
* the Domain ID into the 'Send' queue and awaits for the scheduler to schedule the
* domain and then write the response into the 'Receive' queue.
*
* Non-Exclusive R/O user: A user who acts as an observer and has access to
* the Event queue only.
*
* Other fields exist that store other necessary metadata for each of the user.
*/
struct nvs_domain_ctrl_fifo_user { struct nvs_domain_ctrl_fifo_user {
/* /**
* Flag to determine whether the user has write access. * Flag to determine whether the user has write access.
* User having write access can update Request/Response buffers. * Only users having write access can be marked as exclusive user
*/ */
bool has_write_access; bool has_write_access;
/* /**
* PID of the user. Used to prevent a given user from opening * PID of the user. Used to prevent a given user from opening
* multiple instances of control-fifo device node. * multiple instances of control-fifo device node.
*/ */
int pid; int pid;
/* Mask of actively used queue */ /** Mask of actively used queues */
u32 active_used_queues; u32 active_used_queues;
/* /**
* Used to hold the scheduler capabilities. * Used to hold the scheduler capabilities.
*/ */
struct nvs_domain_ctrl_fifo_capabilities capabilities; struct nvs_domain_ctrl_fifo_capabilities capabilities;
@@ -90,7 +118,12 @@ nvs_domain_ctrl_fifo_user_from_sched_ctrl_list(struct nvgpu_list_node *node)
}; };
/** /**
* NvGPU KMD domain implementation details for nvsched. * @brief A wrapper for nvs_domain. This is the front facing object for NVGPU-KMD's
* domain management code. This manages the lifecycle of one instance of
* struct nvgpu_runlist_domain per engine and maintains its links with each engine's
* struct nvgpu_runlist.
*
* This structure also maintains a link with nvs_domain via a parent node.
*/ */
struct nvgpu_nvs_domain { struct nvgpu_nvs_domain {
u64 id; u64 id;
@@ -133,6 +166,10 @@ struct nvgpu_nvs_domain {
#define NVS_WORKER_STATE_PAUSED 3 #define NVS_WORKER_STATE_PAUSED 3
#define NVS_WORKER_STATE_SHOULD_RESUME 4 #define NVS_WORKER_STATE_SHOULD_RESUME 4
/**
* @brief A central structure to manage the CPU based
* worker thread for KMD scheduling.
*/
struct nvgpu_nvs_worker { struct nvgpu_nvs_worker {
nvgpu_atomic_t nvs_sched_state; nvgpu_atomic_t nvs_sched_state;
struct nvgpu_cond wq_request; struct nvgpu_cond wq_request;
@@ -141,20 +178,66 @@ struct nvgpu_nvs_worker {
u32 current_timeout; u32 current_timeout;
}; };
/**
* @brief A central structure one each for a given GPU instance.
* This structure contains instances corresponding to worker, top
* level scheduler object, active domain and shadow_domain.
*
* This object stores a global 64 bit id_counter per GPU instance for
* allocating IDs to domains incrementally.
*/
struct nvgpu_nvs_scheduler { struct nvgpu_nvs_scheduler {
/**
* @brief Instance of the top level scheduler object.
*
*/
struct nvs_sched *sched; struct nvs_sched *sched;
/**
* @brief 64 bit based atomic counter for managing domain IDs for userspace.
* Upon a userspace request, a new domain is created and one 64bit integer
* is allocated for the domain ID.
*/
nvgpu_atomic64_t id_counter; nvgpu_atomic64_t id_counter;
struct nvgpu_nvs_worker worker; struct nvgpu_nvs_worker worker;
struct nvgpu_nvs_domain *active_domain; struct nvgpu_nvs_domain *active_domain;
/**
* @brief An instance of shadow domain object maintained by NVGPU.
*
*/
struct nvgpu_nvs_domain *shadow_domain; struct nvgpu_nvs_domain *shadow_domain;
}; };
/**
* @brief Describes the type of queue.
*
* NVGPU_NVS_NUM_CONTROL indicates Send/Receive queue.
* These queues are meant for Control-Fifo operations such
* as Handshaking, Scheduling, Control-Flow etc.
*
* NVGPU_NVS_NUM_EVENT indicates Event queue. Event queues
* are meant for generating events such as Recovery, Preemption
* etc.
*/
enum nvgpu_nvs_ctrl_queue_num { enum nvgpu_nvs_ctrl_queue_num {
NVGPU_NVS_NUM_CONTROL = 0, NVGPU_NVS_NUM_CONTROL = 0,
NVGPU_NVS_NUM_EVENT, NVGPU_NVS_NUM_EVENT,
NVGPU_NVS_INVALID, NVGPU_NVS_INVALID,
}; };
/**
* @brief Describes the direction of a queue.
*
* The direction is described in terms of CLIENT
* and SCHEDULER.
*
* NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER indicates that client is a producer
* and scheduler is a consumer e.g. Send Queue
*
* NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT indicates that client is a consumer
* and scheduler is a producer e.g. Receive Queue/Event Queue
*/
enum nvgpu_nvs_ctrl_queue_direction { enum nvgpu_nvs_ctrl_queue_direction {
NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER = 0, NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER = 0,
NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT, NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT,
@@ -438,7 +521,25 @@ const char *nvgpu_nvs_domain_get_name(struct nvgpu_nvs_domain *dom);
#define nvs_dbg(g, fmt, arg...) \ #define nvs_dbg(g, fmt, arg...) \
nvgpu_log(g, gpu_dbg_nvs, fmt, ##arg) nvgpu_log(g, gpu_dbg_nvs, fmt, ##arg)
/**
* @brief Acquire a lock for access-control of Control-Fifo queues.
*
* Doesn't support recursive calls.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
*/
void nvgpu_nvs_ctrl_fifo_lock_queues(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_lock_queues(struct gk20a *g);
/**
* @brief Release lock for access-control of Control-Fifo queues.
*
* A lock must have been held by the same thread using
* nvgpu_nvs_ctrl_fifo_lock_queues().
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
*/
void nvgpu_nvs_ctrl_fifo_unlock_queues(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_unlock_queues(struct gk20a *g);
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD #ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
@@ -448,25 +549,199 @@ void nvgpu_nvs_worker_resume(struct gk20a *g);
bool nvgpu_nvs_ctrl_fifo_is_enabled(struct gk20a *g); bool nvgpu_nvs_ctrl_fifo_is_enabled(struct gk20a *g);
struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g); struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g);
/**
* @brief Check whether a process having a given PID has already opened the Control-Fifo
* device node.
*
* A user here typically indicates a process that opens the control-fifo device node.
* The PIDs are stored for each such process and is used for detecting duplicates.
*
* Iterate through the list of non-exclusive users as well as exclusive users and check
* whether any user exists with the given PID. The check is entirely done within a user
* lock.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL.
* @param pid[In] This pid is compared with the existing users to identify duplicates.
* @param rw[In] Permission of the user.
* @retval true If some user exists having same pid as that of input pid.
* @retval false No user exists having same pid as that of input pid or sched_ctrl is NULL.
*/
bool nvgpu_nvs_ctrl_fifo_user_exists(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, bool nvgpu_nvs_ctrl_fifo_user_exists(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
int pid, bool rw); int pid, bool rw);
/**
* @brief Idle the GPU when using Control-Fifo nodes
*
* Nvgpu KMD increments a power lock everytime Control-Fifo device node is
* opened by a User process.
*
* This method can be used to allow decrementing the power lock. This doesn't
* guarantee that work submits would complete. User of this API needs to make
* sure that no jobs would be submitted when this API is called. This API
* should be balanced by a call to an equivalent unidle() version.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
*/
void nvgpu_nvs_ctrl_fifo_idle(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_idle(struct gk20a *g);
/**
* @brief Unidle the GPU when using Control-Fifo nodes
*
* Invoke this function after a previous call to idle the GPU has already
* been done.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
*/
void nvgpu_nvs_ctrl_fifo_unidle(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_unidle(struct gk20a *g);
/**
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL.
*
* Atomically check whether the usage_counter in the users field is non-zero.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL.
* @retval true Atleast one user is using the control-fifo
* @retval false No user is using the control-fifo or sched_ctrl is NULL.
*/
bool nvgpu_nvs_ctrl_fifo_is_busy(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl); bool nvgpu_nvs_ctrl_fifo_is_busy(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl);
/**
* @brief Destroy the master control-fifo structure
*
* Assert that nvgpu_nvs_ctrl_fifo_is_busy() is not true.
*
* Erase all the Queues and clear other references to the memory.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
*/
void nvgpu_nvs_ctrl_fifo_destroy(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_destroy(struct gk20a *g);
/**
* @brief Check whether a user is actively in use.
*
* Verify whether user's active_used_queues count is non-zero.
*
* @param user A user for the control-fifo node. Must not be NULL.
* @retval true If active_used_queues is set to non-zero.
* @retval false If active_used_queues is set to zero.
*/
bool nvgpu_nvs_ctrl_fifo_user_is_active(struct nvs_domain_ctrl_fifo_user *user); bool nvgpu_nvs_ctrl_fifo_user_is_active(struct nvs_domain_ctrl_fifo_user *user);
/**
* @brief Add a control-fifo user into the master structure.
*
* Atomically add a control-fifo user into the master structure as a non-exclusive user.
* By default all users are added as non-exclusive users. nvgpu_nvs_ctrl_fifo_reserve_exclusive_user()
* can be used to convert a non-exclusive user to an exclusive.
*
* Increment the control-fifo's user's usage_counter.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL
* @param user User Pointer to a User to be attached to the master structure. Must not be NULL
*/
void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_ctrl_fifo_add_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvs_domain_ctrl_fifo_user *user); struct nvs_domain_ctrl_fifo_user *user);
/**
* @brief Check whether a given user is marked as exclusive user.
*
* Atomically check whether a given user is marked as exclusive.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL
* @param user User Pointer to a User to be attached to the master structure. Must not be NULL
* @retval true If the user is marked as exclusive.
* @retval false If the user is not marked as exclusive or other inputs are NULL.
*/
bool nvgpu_nvs_ctrl_fifo_is_exclusive_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, bool nvgpu_nvs_ctrl_fifo_is_exclusive_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvs_domain_ctrl_fifo_user *user); struct nvs_domain_ctrl_fifo_user *user);
/**
* @brief Reset exclusive users as non-exclusive users
*
* Atomically change the characterisation of exclusive users and non-exclusive users.
* Separate the exclusive user from the Exclusive entry and move it to the list of
* non-exclusive users.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL
* @param user User Pointer to a User to be attached to the master structure. Must not be NULL
*/
void nvgpu_nvs_ctrl_fifo_reset_exclusive_user( void nvgpu_nvs_ctrl_fifo_reset_exclusive_user(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user); struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user);
/**
* @brief Convert a non-exclusive user to an exclusive user.
*
* Atomically change a non-exclusive user to an exclusive user provided the user
* has a write access. Remove the corresponding entry of the user from the non-exclusive list
* and add it to the exclusive entry.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL
* @param user User Pointer to a User to be attached to the master structure. Must not be NULL
*
* @retval 0 If the user is marked as exclusive.
* @retval ENODEV if sched_ctrl or user is NULL
* @retval EPERM if the user doesn't have write access.
* @retval EBUSY if an exclusive user already exists.
*/
int nvgpu_nvs_ctrl_fifo_reserve_exclusive_user( int nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user); struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user);
/**
* @brief Remove user from the master control-fifo structure.
*
* Atomically remove a user from the master control-fifo structure i.e. sched_ctrl.
* Remove the entry from the sched_ctrl's user entry and decrement its usage counter.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL
* @param user User Pointer to a User to be attached to the master structure. Must not be NULL
*/
void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvs_domain_ctrl_fifo_user *user); struct nvs_domain_ctrl_fifo_user *user);
/**
* @brief Return an instance of the capabilities of the control-fifo.
*
* @param sched_ctrl Pointer to the master structure for control-fifo. Must not be NULL
* @return struct nvs_domain_ctrl_fifo_capabilities*
* @retval NULL if doesn't exist.
*/
struct nvs_domain_ctrl_fifo_capabilities *nvgpu_nvs_ctrl_fifo_get_capabilities( struct nvs_domain_ctrl_fifo_capabilities *nvgpu_nvs_ctrl_fifo_get_capabilities(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl); struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl);
/**
* @brief Obtain a reference to the Queue based on input parameters.
*
* Returns an appropriate queue based on the combination of queue_num
* and queue_direction and also sets the mask value corresponding to the queue type.
*
* 1) If queue_num is set to NVGPU_NVS_NUM_CONTROL and queue_direction is set to
* NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER, return pointer to Send queue and set mask as
* NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE
*
* 2) else for NVGPU_NVS_NUM_CONTROL and queue_direction set to
* NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT, return pointer to Receive queue and set mask as
* NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_READ
*
* 3) Otherwise, if queue_num is NVGPU_NVS_NUM_EVENT and queue_direction set to
* NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT return pointer to Event Queue and set mask as
* NVGPU_NVS_CTRL_FIFO_QUEUE_CLIENT_EVENTS_READ.
*
* 4) Return NULL otherwise
*
* @param sched_ctrl[In] Pointer to the master structure for control-fifo.
* Must not be NULL.
* @param queue_num[In] Indicates the type of Queue.
* @param queue_direction[In] Indicates the direction of Queue
* @param mask[Out] Pointer to a One byte variable to indicate the mask.
* Must not be NULL.
* @retval struct nvgpu_nvs_ctrl_queue* NULL if conditions are not met or input parameters
* are NULL.
* @retval struct nvgpu_nvs_ctrl_queue* Pointer to a Queue based on the input parameters.
*/
struct nvgpu_nvs_ctrl_queue *nvgpu_nvs_ctrl_fifo_get_queue( struct nvgpu_nvs_ctrl_queue *nvgpu_nvs_ctrl_fifo_get_queue(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
enum nvgpu_nvs_ctrl_queue_num queue_num, enum nvgpu_nvs_ctrl_queue_num queue_num,
@@ -484,20 +759,129 @@ int nvgpu_nvs_ctrl_fifo_scheduler_handle_requests(struct gk20a *g);
#endif #endif
/* Below methods require nvgpu_nvs_ctrl_fifo_lock_queues() to be held. */ /* Below methods require nvgpu_nvs_ctrl_fifo_lock_queues() to be held. */
/**
* @brief Check if buffer's valid entry is marked to true.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
* @param buf Input Queue buffer. Must not be NULL.
* @retval true If buffer's valid is set to true, indicates buffer is valid
* @retval false if buf is NULL or valid is set to false.
*/
bool nvgpu_nvs_buffer_is_valid(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf); bool nvgpu_nvs_buffer_is_valid(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf);
/**
* @brief Allocate DMA memory for shared queues.
*
* Use System VM to allocate memory for shared queues for Control-Fifo
* operations. This memory is suitable for doing DMA and is mapped into the GMMU.
* This memory will be placed in SYSMEM.
*
* Mark the queue's valid entry as true to indicate validity of the queue.
* and set the mask to the corresponding input entry buf.
*
* @param sched_ctrl[In] Pointer to the master structure for control-fifo.
* Must not be NULL.
* @param bytes[In] Size of the Queue in bytes.
* @param mask[In] Queue Mask.
* @param buf[In] Input Queue buffer. Must not be NULL.
*
* @retval 0 If buffer's valid is set to true, indicates buffer is valid
* @retval false if buf is NULL or valid is set to false.
*/
int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
size_t bytes, u8 mask, struct nvgpu_nvs_ctrl_queue *buf); size_t bytes, u8 mask, struct nvgpu_nvs_ctrl_queue *buf);
/**
* @brief Free memory allocated for the shared queue.
*
* Free the memory corresponding to the queue if the queue's valid entry is
* true.
*
* @param sched_ctrl[In] Pointer to the master structure for control-fifo.
* Must not be NULL.
* @param buf[In] Input Queue buffer. Must not be NULL.
*/
void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvgpu_nvs_ctrl_queue *buf); struct nvgpu_nvs_ctrl_queue *buf);
/**
* @brief Check whether queue has any active users.
*
* Check whether queue's reference counter is non-zero.
*
* @param queue[In] Input Queue buffer. Must not be NULL.
* @retval true If queue's reference counter is non-zero.
* @retval false If queue is NULL or reference counter is zero.
*/
bool nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(struct nvgpu_nvs_ctrl_queue *queue); bool nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(struct nvgpu_nvs_ctrl_queue *queue);
/**
* @brief Mark user as subscribed to the queue.
*
* Bitwise 'OR' the queue's mask to the user's active_used_queues.
* and increment the queue's reference counter. This is required to prevent
* multiple accesses into the queue by the same user. This can be used to prevent
* the same user from opening the device nodes multiple times.
*
* @param user[In] User that needs to subscribe to the queue. Must not be NULL.
* @param queue[In] Input Queue buffer. Must not be NULL.
*/
void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue); struct nvgpu_nvs_ctrl_queue *queue);
/**
* @brief Mark user as unsubscribed to the queue.
*
* Decrement the queue's reference counter and remove the corresponding queue's
* mask from user's active_used_queues.
*
* @param user[In] User that needs to subscribe to the queue. Must not be NULL.
* @param queue[In] Input Queue buffer. Must not be NULL.
*/
void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue); struct nvgpu_nvs_ctrl_queue *queue);
/**
* @brief Check whether a given user is subscribed to the queue.
*
* @param user[In] User that needs to subscribe to the queue. Must not be NULL.
* @param queue[In] Input Queue buffer. Must not be NULL.
* @retval true If user's active_used_queues field contains the queue's mask.
* @retval false If either of Input values are empty or user's active_used_queues doesn't
* contain the queue's mask.
*/
bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo_user *user, bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue); struct nvgpu_nvs_ctrl_queue *queue);
void nvgpu_nvs_ctrl_fifo_erase_queue(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue);
/**
* @brief Call queue's free method.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
* @param queue[In] Input Queue buffer. Must not be NULL.
*/
void nvgpu_nvs_ctrl_fifo_erase_queue_locked(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue);
/**
* @brief Erase all queues within the control-fifo structure.
*
* Erase the Send/Receive and Event Queues while the global queue lock is taken.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
*/
void nvgpu_nvs_ctrl_fifo_erase_all_queues(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_erase_all_queues(struct gk20a *g);
/**
* @brief Get the Shadow Domain.
*
* Get a pointer to the shadow domain. The shadow domain is always present.
*
* @param g [in] The GPU super structure. Function does not perform any
* validation of the parameter.
* @return struct nvgpu_nvs_domain* Shadow Domain.
*/
struct nvgpu_nvs_domain * struct nvgpu_nvs_domain *
nvgpu_nvs_get_shadow_domain_locked(struct gk20a *g); nvgpu_nvs_get_shadow_domain_locked(struct gk20a *g);
struct nvgpu_nvs_domain *nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id); struct nvgpu_nvs_domain *nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id);

View File

@@ -851,7 +851,7 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
*/ */
if ((num_queue == NVGPU_NVS_NUM_CONTROL) || if ((num_queue == NVGPU_NVS_NUM_CONTROL) ||
!nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(queue)) { !nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(queue)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, queue); nvgpu_nvs_ctrl_fifo_erase_queue_locked(g, queue);
} }
err = fd; err = fd;
goto fail; goto fail;
@@ -910,7 +910,7 @@ static void nvgpu_nvs_ctrl_fifo_undo_create_queue(struct gk20a *g,
*/ */
if (nvgpu_nvs_buffer_is_valid(g, queue) && if (nvgpu_nvs_buffer_is_valid(g, queue) &&
!nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(queue)) { !nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(queue)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, queue); nvgpu_nvs_ctrl_fifo_erase_queue_locked(g, queue);
} }
put_unused_fd(args->dmabuf_fd); put_unused_fd(args->dmabuf_fd);
@@ -975,7 +975,7 @@ static int nvgpu_nvs_ctrl_fifo_destroy_queue(struct gk20a *g,
*/ */
if (num_queue == NVGPU_NVS_NUM_CONTROL) { if (num_queue == NVGPU_NVS_NUM_CONTROL) {
if (!nvgpu_nvs_buf_linux_is_mapped(g, queue)) { if (!nvgpu_nvs_buf_linux_is_mapped(g, queue)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, queue); nvgpu_nvs_ctrl_fifo_erase_queue_locked(g, queue);
} else { } else {
err = -EBUSY; err = -EBUSY;
goto fail; goto fail;