gpu: nvgpu: add uapi support for control_fifo

Add follow IOCTL entries in UAPI headers. The
corresponding implementation will follow in
subsequent patches.

NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS
NVGPU_NVS_CTRL_FIFO_CREATE_QUEUE
NVGPU_NVS_CTRL_FIFO_RELEASE_QUEUE
NVGPU_NVS_CTRL_FIFO_ENABLE_EVENT

Jira NVGPU-8129

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: Id7aaa8593a782ed5266b4f96f762e6b9d71a323b
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2700751
Reviewed-by: Sami Kiminki <skiminki@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2022-06-03 11:29:09 +05:30
committed by mobile promotions
parent c99819ffd8
commit e355ab9c21

View File

@@ -17,6 +17,7 @@
#include "nvgpu-uapi-common.h"
#define NVGPU_NVS_IOCTL_MAGIC 'N'
#define NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC 'F'
/**
* Domain parameters to pass to the kernel.
@@ -149,10 +150,216 @@ struct nvgpu_nvs_ioctl_query_domains {
_IOWR(NVGPU_NVS_IOCTL_MAGIC, 3, \
struct nvgpu_nvs_ioctl_query_domains)
#define NVGPU_NVS_IOCTL_LAST \
_IOC_NR(NVGPU_NVS_IOCTL_QUERY_DOMAINS)
#define NVGPU_NVS_IOCTL_MAX_ARG_SIZE \
sizeof(struct nvgpu_nvs_ioctl_create_domain)
/* Request for a Control Queue. */
#define NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL 1U
/* Request for an Event queue. */
#define NVS_CTRL_FIFO_QUEUE_NUM_EVENT 2U
/* Direction of the requested queue is from CLIENT(producer)
* to SCHEDULER(consumer).
*/
#define NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER 0
/* Direction of the requested queue is from SCHEDULER(producer)
* to CLIENT(consumer).
*/
#define NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT 1
#define NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE 1
#define NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE 0
/**
* NVGPU_NVS_CTRL_FIFO_CREATE_QUEUE
*
* Create shared queues for domain scheduler's control fifo.
*
* 'queue_num' is set by UMD to NVS_CTRL_FIFO_QUEUE_NUM_CONTROL
* for Send/Receive queues and NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* for Event Queue.
*
* 'direction' is set by UMD to NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER
* for Send Queue and NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT
* for Receive/Event Queue.
*
* The parameter 'queue_size' is set by KMD.
*
* Initially, all clients are setup as non-exclusive. The first client to successfully
* request an exclusive access is internally marked as an exclusive client. It remains
* so until the client closes the control-fifo device node.
*
* Clients that require exclusive access shall set 'access_type'
* to NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE, otherwise set it to
* NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE.
*
* Note, queues of NVS_CTRL_FIFO_QUEUE_NUM_EVENT has shared read-only
* access irrespective of the type of client.
*
* 'dmabuf_fd' is populated by the KMD for the success case, else its set to -1.
*/
struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args {
/* - In: Denote the queue num. */
__u32 queue_num;
/* - In: Denote the direction of producer => consumer */
__u8 direction;
/* - In: Denote the type of request */
__u8 access_type;
/* Must be 0. */
__u16 reserve0;
/* - Out: Size of the queue in bytes. Multiple of 4 bytes */
__u32 queue_size;
/* - Out: dmabuf file descriptor(FD) of the shared queue exposed via the KMD.
* - This field is expected to be populated by the KMD.
* - UMD is expected to close the FD.
*
* - mmap() is used to access the queue.
* - MAP_SHARED must be specified.
* - Exclusive access clients may map with read-write access (PROT_READ | PROT_WRITE).
* Shared access clients may map only with read-only access (PROT_READ)
*
* - Cpu Caching Mode
* - cached-coherent memory type is used when the system supports this between the client and scheduler.
* - non-cached memory type otherwise.
*
* - On Tegra:
* Normal cacheable (inner shareable) on T194/T234 with the KMD scheduler.
* Normal cacheable (outer shareable, I/O coherency enabled) for T234 with the GSP scheduler.
*
* - On generic ARM:
* Normal cacheable (inner shareable) with the KMD scheduler.
* Normal non-cacheable write-combining with the GSP scheduler.
*/
__s32 dmabuf_fd;
};
/**
* NVGPU_NVS_CTRL_FIFO_RELEASE_QUEUE
*
* Release a domain scheduler's queue.
*
* 'queue_num' is set by UMD to NVS_CTRL_FIFO_QUEUE_NUM_CONTROL
* for Send/Receive queues and NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* for Event Queue.
*
* 'direction' is set by UMD to NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER
* for Send Queue and NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT
* for Receive/Event Queue.
*
* Returns an error if queues of type NVS_CTRL_FIFO_QUEUE_NUM_CONTROL
* have an active mapping.
*
* Mapped buffers are removed immediately for queues of type
* NVS_CTRL_FIFO_QUEUE_NUM_CONTROL while those of type NVS_CTRL_FIFO_QUEUE_NUM_EVENT
* are removed when the last user releases the control device node.
*
* User must ensure to invoke this IOCTL after invoking munmap on
* the mmapped address. Otherwise, accessing the buffer could lead to segfaults.
*
*/
struct nvgpu_nvs_ctrl_fifo_ioctl_release_queue_args {
/* - In: Denote the queue num. */
__u32 queue_num;
/* - In: Denote the direction of producer => consumer */
__u8 direction;
/* Must be 0. */
__u8 reserve0;
/* Must be 0. */
__u16 reserve1;
/* Must be 0. */
__u64 reserve2;
};
struct nvgpu_nvs_ctrl_fifo_ioctl_event {
/* Enable Fault Detection Event */
#define NVS_CTRL_FIFO_EVENT_FAULTDETECTED 1LLU
/* Enable Fault Recovery Detection Event */
#define NVS_CTRL_FIFO_EVENT_FAULTRECOVERY 2LLU
__u64 event_mask;
/* Must be 0. */
__u64 reserve0;
};
/**
* NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS
*
* Query the characteristics of the domain scheduler.
* For R/W user, available_queues is set to
* NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL | NVS_CTRL_FIFO_QUEUE_NUM_EVENT
*
* For Non-Exclusive users(can be multiple), available_queues is set to
* NVS_CTRL_FIFO_QUEUE_NUM_EVENT.
*
* Note that, even for multiple R/W users, only one user at a time
* can exist as an exclusive user. Only exclusive users can create/destroy
* queues of type 'NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL'
*/
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args {
/*
* Invalid domain scheduler.
* The value of 'domain_scheduler_implementation'
* when 'has_domain_scheduler_control_fifo' is 0.
*/
#define NVS_DOMAIN_SCHED_INVALID 0U
/*
* CPU based scheduler implementation. Intended use is mainly
* for debug and testing purposes. Doesn't meet latency requirements.
* Implementation will be supported in the initial versions and eventually
* discarded.
*/
#define NVS_DOMAIN_SCHED_KMD 1U
/*
* GSP based scheduler implementation that meets latency requirements.
* This implementation will eventually replace NVS_DOMAIN_SCHED_KMD.
*/
#define NVS_DOMAIN_SCHED_GSP 2U
/*
* - Out: Value is expected to be among the above available flags.
*/
__u8 domain_scheduler_implementation;
/* Must be 0 */
__u8 reserved0;
/* Must be 0 */
__u16 reserved1;
/* - Out: Mask of supported queue nums. */
__u32 available_queues;
/* Must be 0. */
__u64 reserved2;
};
#define NVGPU_NVS_CTRL_FIFO_CREATE_QUEUE \
_IOWR(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 1, \
struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args)
#define NVGPU_NVS_CTRL_FIFO_RELEASE_QUEUE \
_IOWR(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 2, \
struct nvgpu_nvs_ctrl_fifo_ioctl_release_queue_args)
#define NVGPU_NVS_CTRL_FIFO_ENABLE_EVENT \
_IOW(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 3, \
struct nvgpu_nvs_ctrl_fifo_ioctl_event)
#define NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS \
_IOW(NVGPU_NVS_CTRL_FIFO_IOCTL_MAGIC, 4, \
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args)
#define NVGPU_NVS_CTRL_FIFO_IOCTL_LAST \
_IOC_NR(NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS)
#define NVGPU_NVS_CTRL_FIFO_IOCTL_MAX_ARG_SIZE \
sizeof(struct nvgpu_nvs_ctrl_fifo_ioctl_create_queue_args)
#endif