gpu: nvgpu: Add correct nomenclature for NVS ioctls

Its preferable to use the following naming convention
NVGPU_<group>_IOCTL_<function>.

The IOCTL interfaces are updated accordingly.

Also, all KMD based defines as part of the UAPI need
to be prefixed by NVGPU.

Jira NVGPU-8619

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: I2210336536cbcc0415885f3f92a2f7fa982fa39c
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2814484
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2022-11-23 10:42:07 +05:30
committed by mobile promotions
parent eb60e7f1f1
commit 8e60795b9c
3 changed files with 59 additions and 71 deletions

View File

@@ -285,18 +285,6 @@ enum nvgpu_nvs_ctrl_queue_direction {
*/
#define NVGPU_NVS_CTRL_FIFO_QUEUE_CLIENT_EVENTS_READ 4U
/*
* Direction of the requested queue is from CLIENT(producer)
* to SCHEDULER(consumer).
*/
#define NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER 0
/*
* Direction of the requested queue is from SCHEDULER(producer)
* to CLIENT(consumer).
*/
#define NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT 1
/* Structure to hold control_queues. This can be then passed to GSP or Rm based subscheduler. */
struct nvgpu_nvs_ctrl_queue {
struct nvgpu_mem mem;

View File

@@ -715,12 +715,12 @@ static int nvgpu_nvs_ctrl_fifo_create_queue_verify_flags(struct gk20a *g,
return -EINVAL;
}
if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) {
if ((args->queue_num != NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
if (args->access_type == NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) {
if ((args->queue_num != NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
&& (args->queue_num != NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL))
return -EINVAL;
if ((args->direction != NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER)
&& (args->direction != NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT))
if ((args->direction != NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER)
&& (args->direction != NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT))
return -EINVAL;
if (!nvgpu_nvs_ctrl_fifo_is_exclusive_user(g->sched_ctrl_fifo, user)) {
err = nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(g->sched_ctrl_fifo, user);
@@ -728,10 +728,10 @@ static int nvgpu_nvs_ctrl_fifo_create_queue_verify_flags(struct gk20a *g,
return err;
}
}
} else if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE) {
if (args->queue_num != NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
} else if (args->access_type == NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE) {
if (args->queue_num != NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
return -EINVAL;
if (args->direction != NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT)
if (args->direction != NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT)
return -EINVAL;
} else {
return -EINVAL;
@@ -745,7 +745,7 @@ static enum nvgpu_nvs_ctrl_queue_num nvgpu_nvs_translate_queue_num(u32 queue_num
enum nvgpu_nvs_ctrl_queue_num num_queue = NVGPU_NVS_INVALID;
if (queue_num_arg == NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL)
num_queue = NVGPU_NVS_NUM_CONTROL;
else if (queue_num_arg == NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
else if (queue_num_arg == NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
num_queue = NVGPU_NVS_NUM_EVENT;
return num_queue;
@@ -755,9 +755,9 @@ static enum nvgpu_nvs_ctrl_queue_direction
nvgpu_nvs_translate_queue_direction(u32 queue_direction)
{
enum nvgpu_nvs_ctrl_queue_direction direction = NVGPU_NVS_DIR_INVALID;
if (queue_direction == NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER)
if (queue_direction == NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER)
direction = NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER;
else if (queue_direction == NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT)
else if (queue_direction == NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT)
direction = NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT;
return direction;
@@ -795,14 +795,14 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
goto fail;
}
read_only = (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) ? false : true;
read_only = (args->access_type == NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) ? false : true;
if (read_only) {
flag |= O_RDONLY;
} else {
flag |= O_RDWR;
}
if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) {
if (args->access_type == NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) {
/* Observers are not supported for Control Queues, So ensure, buffer is invalid */
if (nvgpu_nvs_buffer_is_valid(g, queue) && (num_queue == NVGPU_NVS_NUM_CONTROL)) {
err = -EBUSY;
@@ -998,17 +998,17 @@ fail:
static u32 nvgpu_nvs_translate_hw_scheduler_impl(struct gk20a *g, uint8_t impl)
{
if (impl == NVGPU_NVS_DOMAIN_SCHED_KMD) {
return NVS_DOMAIN_SCHED_KMD;
return NVGPU_NVS_DOMAIN_SCHED_KMD;
} else if (impl == NVGPU_NVS_DOMAIN_SCHED_GSP) {
return NVS_DOMAIN_SCHED_GSP;
return NVGPU_NVS_DOMAIN_SCHED_GSP;
}
return NVS_DOMAIN_SCHED_INVALID;
return NVGPU_NVS_DOMAIN_SCHED_INVALID;
}
static int nvgpu_nvs_query_scheduler_characteristics(struct gk20a *g,
struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args *args)
struct nvgpu_nvs_ctrl_fifo_ioctl_query_scheduler_characteristics_args *args)
{
struct nvs_domain_ctrl_fifo_capabilities *capabilities;
@@ -1027,7 +1027,7 @@ static int nvgpu_nvs_query_scheduler_characteristics(struct gk20a *g,
capabilities = nvgpu_nvs_ctrl_fifo_get_capabilities(g->sched_ctrl_fifo);
args->domain_scheduler_implementation =
nvgpu_nvs_translate_hw_scheduler_impl(g, capabilities->scheduler_implementation_hw);
args->available_queues = NVS_CTRL_FIFO_QUEUE_NUM_EVENT;
args->available_queues = NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT;
if (user->has_write_access) {
args->available_queues |= NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL;
@@ -1075,7 +1075,7 @@ long nvgpu_nvs_ctrl_fifo_ops_ioctl(struct file *filp, unsigned int cmd, unsigned
}
switch (cmd) {
case NVGPU_NVS_CTRL_FIFO_CREATE_QUEUE:
case NVGPU_NVS_CTRL_FIFO_IOCTL_CREATE_QUEUE:
{
struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args *args =
(struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args *)buf;
@@ -1094,7 +1094,7 @@ long nvgpu_nvs_ctrl_fifo_ops_ioctl(struct file *filp, unsigned int cmd, unsigned
break;
}
case NVGPU_NVS_CTRL_FIFO_RELEASE_QUEUE:
case NVGPU_NVS_CTRL_FIFO_IOCTL_RELEASE_QUEUE:
{
struct nvgpu_nvs_ctrl_fifo_ioctl_release_queue_args *args =
(struct nvgpu_nvs_ctrl_fifo_ioctl_release_queue_args *)buf;
@@ -1105,15 +1105,15 @@ long nvgpu_nvs_ctrl_fifo_ops_ioctl(struct file *filp, unsigned int cmd, unsigned
break;
}
case NVGPU_NVS_CTRL_FIFO_ENABLE_EVENT:
case NVGPU_NVS_CTRL_FIFO_IOCTL_ENABLE_EVENT:
{
err = -EOPNOTSUPP;
goto done;
}
case NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS:
case NVGPU_NVS_CTRL_FIFO_IOCTL_QUERY_SCHEDULER_CHARACTERISTICS:
{
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args *args =
(struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args *)buf;
struct nvgpu_nvs_ctrl_fifo_ioctl_query_scheduler_characteristics_args *args =
(struct nvgpu_nvs_ctrl_fifo_ioctl_query_scheduler_characteristics_args *)buf;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVS)) {
err = -EOPNOTSUPP;

View File

@@ -158,32 +158,32 @@ struct nvgpu_nvs_ioctl_query_domains {
/* Request for a Control Queue. */
#define NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL 1U
/* Request for an Event queue. */
#define NVS_CTRL_FIFO_QUEUE_NUM_EVENT 2U
#define NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT 2U
/* Direction of the requested queue is from CLIENT(producer)
* to SCHEDULER(consumer).
*/
#define NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER 0
#define NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER 0
/* Direction of the requested queue is from SCHEDULER(producer)
* to CLIENT(consumer).
*/
#define NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT 1
#define NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT 1
#define NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE 1
#define NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE 0
#define NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE 1
#define NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE 0
/**
* NVGPU_NVS_CTRL_FIFO_CREATE_QUEUE
* NVGPU_NVS_CTRL_FIFO_IOCTL_CREATE_QUEUE
*
* Create shared queues for domain scheduler's control fifo.
*
* 'queue_num' is set by UMD to NVS_CTRL_FIFO_QUEUE_NUM_CONTROL
* for Send/Receive queues and NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* for Send/Receive queues and NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* for Event Queue.
*
* 'direction' is set by UMD to NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER
* for Send Queue and NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT
* 'direction' is set by UMD to NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER
* for Send Queue and NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT
* for Receive/Event Queue.
*
* The parameter 'queue_size' is set by KMD.
@@ -193,10 +193,10 @@ struct nvgpu_nvs_ioctl_query_domains {
* so until the client closes the control-fifo device node.
*
* Clients that require exclusive access shall set 'access_type'
* to NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE, otherwise set it to
* NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE.
* to NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE, otherwise set it to
* NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE.
*
* Note, queues of NVS_CTRL_FIFO_QUEUE_NUM_EVENT has shared read-only
* Note, queues of NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT has shared read-only
* access irrespective of the type of client.
*
* 'dmabuf_fd' is populated by the KMD for the success case, else its set to -1.
@@ -242,23 +242,23 @@ struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args {
};
/**
* NVGPU_NVS_CTRL_FIFO_RELEASE_QUEUE
* NVGPU_NVS_CTRL_FIFO_IOCTL_RELEASE_QUEUE
*
* Release a domain scheduler's queue.
*
* 'queue_num' is set by UMD to NVS_CTRL_FIFO_QUEUE_NUM_CONTROL
* for Send/Receive queues and NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* for Send/Receive queues and NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* for Event Queue.
*
* 'direction' is set by UMD to NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER
* for Send Queue and NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT
* 'direction' is set by UMD to NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER
* for Send Queue and NVGPU_NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT
* for Receive/Event Queue.
*
* Returns an error if queues of type NVS_CTRL_FIFO_QUEUE_NUM_CONTROL
* have an active mapping.
*
* Mapped buffers are removed immediately for queues of type
* NVS_CTRL_FIFO_QUEUE_NUM_CONTROL while those of type NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* NVS_CTRL_FIFO_QUEUE_NUM_CONTROL while those of type NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* are removed when the last user releases the control device node.
*
* User must ensure to invoke this IOCTL after invoking munmap on
@@ -285,9 +285,9 @@ struct nvgpu_nvs_ctrl_fifo_ioctl_release_queue_args {
struct nvgpu_nvs_ctrl_fifo_ioctl_event {
/* Enable Fault Detection Event */
#define NVS_CTRL_FIFO_EVENT_FAULTDETECTED 1LLU
#define NVGPU_NVS_CTRL_FIFO_EVENT_FAULTDETECTED 1LLU
/* Enable Fault Recovery Detection Event */
#define NVS_CTRL_FIFO_EVENT_FAULTRECOVERY 2LLU
#define NVGPU_NVS_CTRL_FIFO_EVENT_FAULTRECOVERY 2LLU
__u64 event_mask;
/* Must be 0. */
@@ -295,38 +295,38 @@ struct nvgpu_nvs_ctrl_fifo_ioctl_event {
};
/**
* NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS
* NVGPU_NVS_CTRL_FIFO_IOCTL_QUERY_SCHEDULER_CHARACTERISTICS
*
* Query the characteristics of the domain scheduler.
* For R/W user, available_queues is set to
* NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL | NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL | NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT
*
* For Non-Exclusive users(can be multiple), available_queues is set to
* NVS_CTRL_FIFO_QUEUE_NUM_EVENT.
* NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_EVENT.
*
* Note that, even for multiple R/W users, only one user at a time
* can exist as an exclusive user. Only exclusive users can create/destroy
* queues of type 'NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL'
*/
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args {
struct nvgpu_nvs_ctrl_fifo_ioctl_query_scheduler_characteristics_args {
/*
* Invalid domain scheduler.
* The value of 'domain_scheduler_implementation'
* when 'has_domain_scheduler_control_fifo' is 0.
*/
#define NVS_DOMAIN_SCHED_INVALID 0U
#define NVGPU_NVS_DOMAIN_SCHED_INVALID 0U
/*
* CPU based scheduler implementation. Intended use is mainly
* for debug and testing purposes. Doesn't meet latency requirements.
* Implementation will be supported in the initial versions and eventually
* discarded.
*/
#define NVS_DOMAIN_SCHED_KMD 1U
#define NVGPU_NVS_DOMAIN_SCHED_KMD 1U
/*
* GSP based scheduler implementation that meets latency requirements.
* This implementation will eventually replace NVS_DOMAIN_SCHED_KMD.
* This implementation will eventually replace NVGPU_NVS_DOMAIN_SCHED_KMD.
*/
#define NVS_DOMAIN_SCHED_GSP 2U
#define NVGPU_NVS_DOMAIN_SCHED_GSP 2U
/*
* - Out: Value is expected to be among the above available flags.
*/
@@ -342,24 +342,24 @@ struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args {
__u32 available_queues;
/* Must be 0. */
__u64 reserved2;
__u64 reserved2[8];
};
#define NVGPU_NVS_CTRL_FIFO_CREATE_QUEUE \
#define NVGPU_NVS_CTRL_FIFO_IOCTL_CREATE_QUEUE \
_IOWR(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 1, \
struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args)
#define NVGPU_NVS_CTRL_FIFO_RELEASE_QUEUE \
#define NVGPU_NVS_CTRL_FIFO_IOCTL_RELEASE_QUEUE \
_IOWR(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 2, \
struct nvgpu_nvs_ctrl_fifo_ioctl_release_queue_args)
#define NVGPU_NVS_CTRL_FIFO_ENABLE_EVENT \
#define NVGPU_NVS_CTRL_FIFO_IOCTL_ENABLE_EVENT \
_IOW(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 3, \
struct nvgpu_nvs_ctrl_fifo_ioctl_event)
#define NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS \
_IOW(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 4, \
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args)
#define NVGPU_NVS_CTRL_FIFO_IOCTL_QUERY_SCHEDULER_CHARACTERISTICS \
_IOR(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 4, \
struct nvgpu_nvs_ctrl_fifo_ioctl_query_scheduler_characteristics_args)
#define NVGPU_NVS_CTRL_FIFO_IOCTL_LAST \
_IOC_NR(NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS)
_IOC_NR(NVGPU_NVS_CTRL_FIFO_IOCTL_QUERY_SCHEDULER_CHARACTERISTICS)
#define NVGPU_NVS_CTRL_FIFO_IOCTL_MAX_ARG_SIZE \
sizeof(struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args)
sizeof(struct nvgpu_nvs_ctrl_fifo_ioctl_query_scheduler_characteristics_args)
#endif