gpu: nvgpu: add support for event queue observers

Add support for accessing Event Queue for non-exclusive
users. Allows, non-exclusive users to open Event Queues
before exclusive users. Non-Exclusive users can only
use the Event Queue in a read-only mode.

Add VM_SHARED for Event Queues across all users instead of just
Read-Only users. Event queues are shared with multiple processes
and as such require VM_SHARED across all users(exclusive and
observers).

Jira NVGPU-8608

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: Id9733c2511ded6f06dd9feea880005bdc92e51a0
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2745083
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2022-07-14 13:04:43 +05:30
committed by mobile promotions
parent 1f9fbc85fe
commit 3d95f2b803
5 changed files with 111 additions and 40 deletions

View File

@@ -340,15 +340,22 @@ void nvgpu_nvs_ctrl_fifo_unlock_queues(struct gk20a *g)
nvgpu_mutex_release(&sched_ctrl->queues.queue_lock); nvgpu_mutex_release(&sched_ctrl->queues.queue_lock);
} }
bool nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(struct nvgpu_nvs_ctrl_queue *queue)
{
return queue->ref != 0;
}
void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue) struct nvgpu_nvs_ctrl_queue *queue)
{ {
user->active_used_queues |= queue->mask; user->active_used_queues |= queue->mask;
queue->ref++;
} }
void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue) struct nvgpu_nvs_ctrl_queue *queue)
{ {
user->active_used_queues &= ~queue->mask; user->active_used_queues &= ~queue->mask;
queue->ref--;
} }
bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo_user *user, bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue) struct nvgpu_nvs_ctrl_queue *queue)

View File

@@ -217,6 +217,7 @@ struct nvgpu_nvs_ctrl_queue {
void *priv; void *priv;
bool valid; bool valid;
u8 mask; u8 mask;
u8 ref;
void (*free)(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue); void (*free)(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue);
}; };
@@ -276,6 +277,7 @@ int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
size_t bytes, u8 mask, struct nvgpu_nvs_ctrl_queue *buf); size_t bytes, u8 mask, struct nvgpu_nvs_ctrl_queue *buf);
void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvgpu_nvs_ctrl_queue *buf); struct nvgpu_nvs_ctrl_queue *buf);
bool nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(struct nvgpu_nvs_ctrl_queue *queue);
void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_subscribe_queue(struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_queue *queue); struct nvgpu_nvs_ctrl_queue *queue);
void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user, void nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(struct nvs_domain_ctrl_fifo_user *user,

View File

@@ -84,7 +84,7 @@ static int nvs_release_user_mappings_locked(struct gk20a *g, struct nvgpu_nvs_li
struct vm_area_struct *vma = current_entry->vma; struct vm_area_struct *vma = current_entry->vma;
zap_vma_entries(g, vma); zap_vma_entries(g, vma);
linux_buf->ref--; linux_buf->mapped_ref--;
} }
return err; return err;
@@ -99,7 +99,7 @@ static void nvs_vma_close(struct vm_area_struct *vma)
nvgpu_nvs_ctrl_fifo_lock_queues(g); nvgpu_nvs_ctrl_fifo_lock_queues(g);
linux_buf->ref--; linux_buf->mapped_ref--;
nvgpu_list_del(&vma_metadata->node); nvgpu_list_del(&vma_metadata->node);
/* This VMA is freed now and points to invalid ptes */ /* This VMA is freed now and points to invalid ptes */
@@ -146,7 +146,7 @@ static int nvgpu_nvs_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP; VM_DONTDUMP;
if (linux_buf->read_only) { if (buf->mask == NVGPU_NVS_CTRL_FIFO_QUEUE_CLIENT_EVENTS_READ) {
vma->vm_flags |= VM_SHARED; vma->vm_flags |= VM_SHARED;
} }
@@ -170,7 +170,7 @@ static int nvgpu_nvs_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma
vma_metadata->buf = buf; vma_metadata->buf = buf;
nvgpu_init_list_node(&vma_metadata->node); nvgpu_init_list_node(&vma_metadata->node);
linux_buf->ref++; linux_buf->mapped_ref++;
nvgpu_list_add_tail(&vma_metadata->node, &linux_buf->list_mapped_user_vmas); nvgpu_list_add_tail(&vma_metadata->node, &linux_buf->list_mapped_user_vmas);
vma->vm_private_data = vma_metadata; vma->vm_private_data = vma_metadata;
@@ -220,8 +220,6 @@ static void nvgpu_nvs_destroy_buf_linux_locked(struct gk20a *g, struct nvgpu_nvs
nvs_release_user_mappings_locked(g, priv); nvs_release_user_mappings_locked(g, priv);
dma_buf_put(priv->dmabuf);
nvgpu_nvs_buffer_free(sched_ctrl, buf); nvgpu_nvs_buffer_free(sched_ctrl, buf);
nvgpu_kfree(g, priv); nvgpu_kfree(g, priv);
@@ -234,12 +232,41 @@ bool nvgpu_nvs_buf_linux_is_mapped(struct gk20a *g, struct nvgpu_nvs_ctrl_queue
struct nvgpu_nvs_linux_buf_priv *priv = NULL; struct nvgpu_nvs_linux_buf_priv *priv = NULL;
priv = (struct nvgpu_nvs_linux_buf_priv *)buf->priv; priv = (struct nvgpu_nvs_linux_buf_priv *)buf->priv;
is_mapped = (priv->ref != 0U); is_mapped = (priv->mapped_ref != 0U);
return is_mapped; return is_mapped;
} }
int nvgpu_nvs_get_buf_linux(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf, int nvgpu_nvs_get_buf(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf,
bool read_only)
{
struct nvgpu_nvs_linux_buf_priv *priv;
int err;
/*
* This ref is released when the dma_buf is closed.
*/
if (!nvgpu_get(g))
return -ENODEV;
priv = (struct nvgpu_nvs_linux_buf_priv *)buf->priv;
priv->dmabuf_temp = nvgpu_nvs_buf_export_dmabuf(buf, read_only);
if (IS_ERR(priv->dmabuf_temp)) {
nvgpu_err(g, "Unable to export dma buf");
err = PTR_ERR(priv->dmabuf_temp);
priv->dmabuf_temp = NULL;
goto fail;
}
return 0;
fail:
nvgpu_put(g);
return err;
}
int nvgpu_nvs_alloc_and_get_buf(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf,
size_t bytes, u8 mask, bool read_only) size_t bytes, u8 mask, bool read_only)
{ {
struct nvgpu_nvs_linux_buf_priv *priv; struct nvgpu_nvs_linux_buf_priv *priv;
@@ -261,7 +288,6 @@ int nvgpu_nvs_get_buf_linux(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf,
} }
nvgpu_init_list_node(&priv->list_mapped_user_vmas); nvgpu_init_list_node(&priv->list_mapped_user_vmas);
priv->read_only = read_only;
err = nvgpu_nvs_buffer_alloc(sched_ctrl, bytes, mask, buf); err = nvgpu_nvs_buffer_alloc(sched_ctrl, bytes, mask, buf);
if (err != 0) { if (err != 0) {
@@ -269,10 +295,11 @@ int nvgpu_nvs_get_buf_linux(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf,
goto fail; goto fail;
} }
priv->dmabuf = nvgpu_nvs_buf_export_dmabuf(buf, read_only); priv->dmabuf_temp = nvgpu_nvs_buf_export_dmabuf(buf, read_only);
if (IS_ERR(priv->dmabuf)) { if (IS_ERR(priv->dmabuf_temp)) {
nvgpu_err(g, "Unable to export dma buf"); nvgpu_err(g, "Unable to export dma buf");
err = PTR_ERR(priv->dmabuf); err = PTR_ERR(priv->dmabuf_temp);
priv->dmabuf_temp = NULL;
goto fail; goto fail;
} }

View File

@@ -24,18 +24,24 @@ struct dma_buf;
struct gk20a; struct gk20a;
struct nvgpu_nvs_linux_buf_priv { struct nvgpu_nvs_linux_buf_priv {
struct dma_buf *dmabuf; /* This is used to temporarily contain the dmabuf for handling failure */
struct dma_buf *dmabuf_temp;
bool read_only; bool read_only;
u32 ref; u32 mapped_ref;
struct nvgpu_list_node list_mapped_user_vmas; struct nvgpu_list_node list_mapped_user_vmas;
}; };
struct nvgpu_nvs_domain_ctrl_fifo_user_vma { struct nvgpu_nvs_domain_ctrl_fifo_user_vma {
bool read_only;
struct nvgpu_nvs_ctrl_queue *buf; struct nvgpu_nvs_ctrl_queue *buf;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct nvgpu_list_node node; struct nvgpu_list_node node;
}; };
int nvgpu_nvs_get_buf(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf,
bool read_only);
/** /**
* @brief Construct a buffer for use as a shared message passing * @brief Construct a buffer for use as a shared message passing
* queue between user and backend scheduler. Function is * queue between user and backend scheduler. Function is
@@ -52,7 +58,7 @@ struct nvgpu_nvs_domain_ctrl_fifo_user_vma {
* @param read_only Indicates whether a read-only buffer is requested. * @param read_only Indicates whether a read-only buffer is requested.
* @return int 0 on success, else fail. * @return int 0 on success, else fail.
*/ */
int nvgpu_nvs_get_buf_linux(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf, int nvgpu_nvs_alloc_and_get_buf(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf,
size_t bytes, u8 mask, bool read_only); size_t bytes, u8 mask, bool read_only);
/** /**

View File

@@ -786,20 +786,15 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
flag |= O_RDWR; flag |= O_RDWR;
} }
/* Support for Read-Only Observers will be added later */
if (read_only) {
err = -EOPNOTSUPP;
goto fail;
}
if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) { if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) {
if (nvgpu_nvs_buffer_is_valid(g, queue)) { /* Observers are not supported for Control Queues, So ensure, buffer is invalid */
if (nvgpu_nvs_buffer_is_valid(g, queue) && (num_queue == NVGPU_NVS_NUM_CONTROL)) {
err = -EBUSY; err = -EBUSY;
goto fail; goto fail;
} }
} }
/* For event queue, prevent multiple subscription by the same user */ /* Prevent multiple subscription by the same user. */
if (nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(user, queue)) { if (nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(user, queue)) {
err = -EEXIST; err = -EEXIST;
goto fail; goto fail;
@@ -807,20 +802,39 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
queue_size = NVS_QUEUE_DEFAULT_SIZE; queue_size = NVS_QUEUE_DEFAULT_SIZE;
/* Ensure, event queue is constructed only once across all users. */ /* Exclusive User or First Observer */
if (!nvgpu_nvs_buffer_is_valid(g, queue)) { if (!nvgpu_nvs_buffer_is_valid(g, queue)) {
err = nvgpu_nvs_get_buf_linux(g, queue, queue_size, mask, read_only); err = nvgpu_nvs_alloc_and_get_buf(g, queue, queue_size, mask, read_only);
} else {
/* User is not already subscribed.
* Other observers or (Exclusive User & Event Queue).
*/
err = nvgpu_nvs_get_buf(g, queue, read_only);
}
if (err != 0) { if (err != 0) {
goto fail; goto fail;
} }
}
/* At this point
* 1) dma_mapping exists
* 2) An instance of struct dma_buf * exists in priv->dmabuf_temp
*/
priv = queue->priv; priv = queue->priv;
fd = dma_buf_fd(priv->dmabuf, flag); fd = dma_buf_fd(priv->dmabuf_temp, flag);
if (fd < 0) { if (fd < 0) {
/* Might have valid user vmas for previous event queue users */ /* Release the dmabuf pointer here */
if (!nvgpu_nvs_buf_linux_is_mapped(g, queue)) { dma_buf_put(priv->dmabuf_temp);
priv->dmabuf_temp = NULL;
/* Erase mapping for num_queues = NVGPU_NVS_NUM_CONTROL,
* For num_queues = NVGPU_NVS_NUM_EVENT, erase only if
* underlying backing buffer is not already being used.
*/
if ((num_queue == NVGPU_NVS_NUM_CONTROL) ||
!nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(queue)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, queue); nvgpu_nvs_ctrl_fifo_erase_queue(g, queue);
} }
err = fd; err = fd;
@@ -853,6 +867,7 @@ static void nvgpu_nvs_ctrl_fifo_undo_create_queue(struct gk20a *g,
enum nvgpu_nvs_ctrl_queue_num num_queue; enum nvgpu_nvs_ctrl_queue_num num_queue;
enum nvgpu_nvs_ctrl_queue_direction queue_direction; enum nvgpu_nvs_ctrl_queue_direction queue_direction;
struct nvgpu_nvs_ctrl_queue *queue; struct nvgpu_nvs_ctrl_queue *queue;
struct nvgpu_nvs_linux_buf_priv *priv;
u8 mask = 0; u8 mask = 0;
nvgpu_nvs_ctrl_fifo_lock_queues(g); nvgpu_nvs_ctrl_fifo_lock_queues(g);
@@ -866,17 +881,24 @@ static void nvgpu_nvs_ctrl_fifo_undo_create_queue(struct gk20a *g,
return; return;
} }
priv = (struct nvgpu_nvs_linux_buf_priv *)queue->priv;
nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(user, queue); nvgpu_nvs_ctrl_fifo_user_unsubscribe_queue(user, queue);
/* For Control Queues, no mappings exist, For Event Queues, mappings might exist */ /* put the dma_buf here */
if (nvgpu_nvs_buffer_is_valid(g, queue) && !nvgpu_nvs_buf_linux_is_mapped(g, queue)) { dma_buf_put(priv->dmabuf_temp);
priv->dmabuf_temp = NULL;
/* Control queues has no other subscribed users,
* Event queue might have other subscribed users.
*/
if (nvgpu_nvs_buffer_is_valid(g, queue) &&
!nvgpu_nvs_ctrl_fifo_queue_has_subscribed_users(queue)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, queue); nvgpu_nvs_ctrl_fifo_erase_queue(g, queue);
} }
if (args->dmabuf_fd != 0) {
put_unused_fd(args->dmabuf_fd); put_unused_fd(args->dmabuf_fd);
args->dmabuf_fd = 0; args->dmabuf_fd = 0;
}
nvgpu_nvs_ctrl_fifo_unlock_queues(g); nvgpu_nvs_ctrl_fifo_unlock_queues(g);
} }
@@ -928,11 +950,17 @@ static int nvgpu_nvs_ctrl_fifo_destroy_queue(struct gk20a *g,
goto fail; goto fail;
} }
/* For Control Queues, no mappings should exist, For Event Queues, mappings might exist */ /* For Event Queues, don't erase even if the buffer
if (nvgpu_nvs_buffer_is_valid(g, queue)) { * is currently not mapped. There might be some observers
* who has acquired the dma_bufs but hasn't mapped yet.
* Erase the queue only when the last user is removed.
*
* For Control Queues, no mappings should exist
*/
if (num_queue == NVGPU_NVS_NUM_CONTROL) {
if (!nvgpu_nvs_buf_linux_is_mapped(g, queue)) { if (!nvgpu_nvs_buf_linux_is_mapped(g, queue)) {
nvgpu_nvs_ctrl_fifo_erase_queue(g, queue); nvgpu_nvs_ctrl_fifo_erase_queue(g, queue);
} else if (is_exclusive_user) { } else {
err = -EBUSY; err = -EBUSY;
goto fail; goto fail;
} }
@@ -1044,6 +1072,7 @@ long nvgpu_nvs_ctrl_fifo_ops_ioctl(struct file *filp, unsigned int cmd, unsigned
nvgpu_nvs_ctrl_fifo_undo_create_queue(g, user, args); nvgpu_nvs_ctrl_fifo_undo_create_queue(g, user, args);
err = -EFAULT; err = -EFAULT;
args->dmabuf_fd = -1; args->dmabuf_fd = -1;
args->queue_size = 0;
goto done; goto done;
} }