gpu: nvgpu: modify queue implementation

Modify queue implementation to handle wraparound cases.
Introduce additional error checks for queue public APIs.
Modify the visibility of certain functions which need not
be public.

JIRA NVGPU-6908

Change-Id: Ibe5d2f8520d4ba0991c19d77e976f9dc8d45a452
Signed-off-by: ajesh <akv@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2536318
(cherry picked from commit 736043d13aa1a35ac21146bf93ebe8b4b081ccea)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2548583
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ajesh
2021-05-28 11:59:16 +03:00
committed by mobile promotions
parent 966938d3e6
commit 07d9b4cdb7
4 changed files with 43 additions and 30 deletions

View File

@@ -33,10 +33,10 @@ struct nvgpu_posix_fault_inj;
* varying length. An user has to request for the allocation of required size * varying length. An user has to request for the allocation of required size
* of memory to hold the messages before using any other public API of this * of memory to hold the messages before using any other public API of this
* unit. Requested size for allocation should be greater than zero and less * unit. Requested size for allocation should be greater than zero and less
* than or equal to INT_MAX. The alloc function ensures that the allocated size * than or equal to INT32_MAX. The alloc function ensures that the allocated size
* is always a power of two, irrespective of the requested size. It does that * is always a power of two, irrespective of the requested size. It does that
* by rounding up the requested size to nearest power of two if required. This * by rounding up the requested size to nearest power of two if required. This
* structure holds in and out indexes used to push and pop the messages * structure holds in and out indexes used to enqueue and dequeue the messages
* respectively. A mask value to indicate the size of the queue is also held. * respectively. A mask value to indicate the size of the queue is also held.
*/ */
struct nvgpu_queue { struct nvgpu_queue {
@@ -58,18 +58,6 @@ struct nvgpu_queue {
unsigned char *data; unsigned char *data;
}; };
/**
* @brief Calculate the unused message queue length.
*
* The size of all the messages currently enqueued is subtracted from the total
* size of the queue to get the unused queue length.
*
* @param queue [in] Queue structure to use.
*
* @return Return unused queue length.
*/
unsigned int nvgpu_queue_unused(struct nvgpu_queue *queue);
/** /**
* @brief Calculate the length of the message queue in use. * @brief Calculate the length of the message queue in use.
* *
@@ -113,6 +101,7 @@ int nvgpu_queue_alloc(struct nvgpu_queue *queue, unsigned int size);
*/ */
void nvgpu_queue_free(struct nvgpu_queue *queue); void nvgpu_queue_free(struct nvgpu_queue *queue);
#ifdef CONFIG_NVGPU_NON_FUSA
/** /**
* @brief Enqueue message into message queue. * @brief Enqueue message into message queue.
* *
@@ -124,7 +113,7 @@ void nvgpu_queue_free(struct nvgpu_queue *queue);
* @param buf [in] Pointer to source message buffer. * @param buf [in] Pointer to source message buffer.
* @param len [in] Size of the message to be enqueued. * @param len [in] Size of the message to be enqueued.
* *
* @return Returns \a len on success, otherwise returns error number to indicate * @return Returns 0 on success, otherwise returns error number to indicate
* the error. * the error.
* *
* @retval -ENOMEM if the message queue doesn't have enough free space to * @retval -ENOMEM if the message queue doesn't have enough free space to
@@ -132,6 +121,7 @@ void nvgpu_queue_free(struct nvgpu_queue *queue);
*/ */
int nvgpu_queue_in(struct nvgpu_queue *queue, const void *buf, int nvgpu_queue_in(struct nvgpu_queue *queue, const void *buf,
unsigned int len); unsigned int len);
#endif
/** /**
* @brief Enqueue message into message queue after acquiring the mutex lock. * @brief Enqueue message into message queue after acquiring the mutex lock.
@@ -147,7 +137,7 @@ int nvgpu_queue_in(struct nvgpu_queue *queue, const void *buf,
* @param len [in] Size of the message to be enqueued. * @param len [in] Size of the message to be enqueued.
* @param lock [in] Mutex lock for concurrency management. * @param lock [in] Mutex lock for concurrency management.
* *
* @return Returns \a len on success, otherwise returns error number to indicate * @return Returns 0 on success, otherwise returns error number to indicate
* the error. * the error.
* *
* @retval -ENOMEM if the message queue doesn't have enough free space to * @retval -ENOMEM if the message queue doesn't have enough free space to
@@ -156,6 +146,7 @@ int nvgpu_queue_in(struct nvgpu_queue *queue, const void *buf,
int nvgpu_queue_in_locked(struct nvgpu_queue *queue, const void *buf, int nvgpu_queue_in_locked(struct nvgpu_queue *queue, const void *buf,
unsigned int len, struct nvgpu_mutex *lock); unsigned int len, struct nvgpu_mutex *lock);
#ifdef CONFIG_NVGPU_NON_FUSA
/** /**
* @brief Dequeue message from message queue. * @brief Dequeue message from message queue.
* *
@@ -166,7 +157,7 @@ int nvgpu_queue_in_locked(struct nvgpu_queue *queue, const void *buf,
* @param buf [in] Pointer to destination message buffer. * @param buf [in] Pointer to destination message buffer.
* @param len [in] Size of the message to be dequeued. * @param len [in] Size of the message to be dequeued.
* *
* @return Returns \a len on success, otherwise returns error number to indicate * @return Returns 0 on success, otherwise returns error number to indicate
* the error. * the error.
* *
* @retval -ENOMEM if the length of the messages held by the message queue is * @retval -ENOMEM if the length of the messages held by the message queue is
@@ -174,6 +165,7 @@ int nvgpu_queue_in_locked(struct nvgpu_queue *queue, const void *buf,
*/ */
int nvgpu_queue_out(struct nvgpu_queue *queue, void *buf, int nvgpu_queue_out(struct nvgpu_queue *queue, void *buf,
unsigned int len); unsigned int len);
#endif
/** /**
* @brief Dequeue message from message queue after acquiring the mutex lock. * @brief Dequeue message from message queue after acquiring the mutex lock.
@@ -188,7 +180,7 @@ int nvgpu_queue_out(struct nvgpu_queue *queue, void *buf,
* @param len [in] Size of the message to be dequeued. * @param len [in] Size of the message to be dequeued.
* @param lock [in] Mutex lock for concurrency management. * @param lock [in] Mutex lock for concurrency management.
* *
* @return Returns \a len on success, otherwise returns error number to indicate * @return Returns 0 on success, otherwise returns error number to indicate
* the error. * the error.
* *
* @retval -ENOMEM if the length of the messages held by the message queue is * @retval -ENOMEM if the length of the messages held by the message queue is

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -42,10 +42,18 @@ struct nvgpu_posix_fault_inj *nvgpu_queue_out_get_fault_injection(void)
unsigned int nvgpu_queue_available(struct nvgpu_queue *queue) unsigned int nvgpu_queue_available(struct nvgpu_queue *queue)
{ {
return nvgpu_safe_sub_u32(queue->in, queue->out); u32 ret;
if (queue->in >= queue->out) {
ret = queue->in - queue->out;
} else {
ret = (UINT32_MAX - queue->out) + (queue->in + 1U);
}
return ret;
} }
unsigned int nvgpu_queue_unused(struct nvgpu_queue *queue) static unsigned int nvgpu_queue_unused(struct nvgpu_queue *queue)
{ {
return nvgpu_safe_sub_u32(nvgpu_safe_add_u32(queue->mask, 1U), return nvgpu_safe_sub_u32(nvgpu_safe_add_u32(queue->mask, 1U),
nvgpu_queue_available(queue)); nvgpu_queue_available(queue));
@@ -114,7 +122,9 @@ static int posix_queue_in_common(struct nvgpu_queue *queue, const void *src,
if (lock != NULL) { if (lock != NULL) {
nvgpu_mutex_acquire(lock); nvgpu_mutex_acquire(lock);
} }
l = nvgpu_queue_unused(queue); l = nvgpu_queue_unused(queue);
if (len > l) { if (len > l) {
if (lock != NULL) { if (lock != NULL) {
nvgpu_mutex_release(lock); nvgpu_mutex_release(lock);
@@ -128,19 +138,26 @@ static int posix_queue_in_common(struct nvgpu_queue *queue, const void *src,
*/ */
nvgpu_smp_wmb(); nvgpu_smp_wmb();
queue->in = nvgpu_safe_add_u32(queue->in, len); if ((UINT_MAX - queue->in) < len) {
queue->in = (len - (UINT_MAX - queue->in)) - 1U;
} else {
queue->in = queue->in + len;
}
if (lock != NULL) { if (lock != NULL) {
nvgpu_mutex_release(lock); nvgpu_mutex_release(lock);
} }
return (int)len; return 0;
} }
#ifdef CONFIG_NVGPU_NON_FUSA
int nvgpu_queue_in(struct nvgpu_queue *queue, const void *buf, int nvgpu_queue_in(struct nvgpu_queue *queue, const void *buf,
unsigned int len) unsigned int len)
{ {
return posix_queue_in_common(queue, buf, len, NULL); return posix_queue_in_common(queue, buf, len, NULL);
} }
#endif
int nvgpu_queue_in_locked(struct nvgpu_queue *queue, const void *buf, int nvgpu_queue_in_locked(struct nvgpu_queue *queue, const void *buf,
unsigned int len, struct nvgpu_mutex *lock) unsigned int len, struct nvgpu_mutex *lock)
@@ -173,7 +190,8 @@ static int nvgpu_queue_out_common(struct nvgpu_queue *queue, void *buf,
if (lock != NULL) { if (lock != NULL) {
nvgpu_mutex_acquire(lock); nvgpu_mutex_acquire(lock);
} }
l = queue->in - queue->out;
l = nvgpu_queue_available(queue);
if (l < len) { if (l < len) {
if (lock != NULL) { if (lock != NULL) {
nvgpu_mutex_release(lock); nvgpu_mutex_release(lock);
@@ -186,19 +204,26 @@ static int nvgpu_queue_out_common(struct nvgpu_queue *queue, void *buf,
*/ */
nvgpu_smp_wmb(); nvgpu_smp_wmb();
queue->out = nvgpu_safe_add_u32(queue->out, len); if ((UINT_MAX - queue->out) < len) {
queue->out = (len - (UINT_MAX - queue->out)) - 1U;
} else {
queue->out = queue->out + len;
}
if (lock != NULL) { if (lock != NULL) {
nvgpu_mutex_release(lock); nvgpu_mutex_release(lock);
} }
return (int)len; return 0;
} }
#ifdef CONFIG_NVGPU_NON_FUSA
int nvgpu_queue_out(struct nvgpu_queue *queue, void *buf, int nvgpu_queue_out(struct nvgpu_queue *queue, void *buf,
unsigned int len) unsigned int len)
{ {
return nvgpu_queue_out_common(queue, buf, len, NULL); return nvgpu_queue_out_common(queue, buf, len, NULL);
} }
#endif
int nvgpu_queue_out_locked(struct nvgpu_queue *queue, void *buf, int nvgpu_queue_out_locked(struct nvgpu_queue *queue, void *buf,
unsigned int len, struct nvgpu_mutex *lock) unsigned int len, struct nvgpu_mutex *lock)

View File

@@ -637,9 +637,7 @@ nvgpu_posix_register_io
nvgpu_pte_words nvgpu_pte_words
nvgpu_queue_alloc nvgpu_queue_alloc
nvgpu_queue_free nvgpu_queue_free
nvgpu_queue_in
nvgpu_queue_in_locked nvgpu_queue_in_locked
nvgpu_queue_out
nvgpu_queue_out_locked nvgpu_queue_out_locked
nvgpu_queue_out_get_fault_injection nvgpu_queue_out_get_fault_injection
nvgpu_queue_available nvgpu_queue_available

View File

@@ -654,9 +654,7 @@ nvgpu_posix_register_io
nvgpu_pte_words nvgpu_pte_words
nvgpu_queue_alloc nvgpu_queue_alloc
nvgpu_queue_free nvgpu_queue_free
nvgpu_queue_in
nvgpu_queue_in_locked nvgpu_queue_in_locked
nvgpu_queue_out
nvgpu_queue_out_locked nvgpu_queue_out_locked
nvgpu_queue_out_get_fault_injection nvgpu_queue_out_get_fault_injection
nvgpu_queue_available nvgpu_queue_available