gpu: nvgpu: implement domain scheduler characteristics ioctl

Added the NVGPU_GPU_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS
ioctl as part of the ctrl device node.

Jira NVGPU-8129

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: I651bd1958b6a27dc17687dee663bb93c2f807b68
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2723871
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2022-06-22 11:02:16 +05:30
committed by mobile promotions
parent e7f9de6567
commit 7a956cf5a2
3 changed files with 92 additions and 3 deletions

View File

@@ -78,6 +78,8 @@ struct nvgpu_nvs_domain_ctrl_fifo {
struct nvgpu_nvs_domain_ctrl_fifo_users users;
struct nvgpu_nvs_domain_ctrl_fifo_queues queues;
struct nvs_domain_ctrl_fifo_capabilities capabilities;
};
void nvgpu_nvs_ctrl_fifo_reset_exclusive_user(
@@ -203,6 +205,8 @@ struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g)
return NULL;
}
sched->capabilities.scheduler_implementation_hw = NVGPU_NVS_DOMAIN_SCHED_KMD;
nvgpu_spinlock_init(&sched->users.user_lock);
nvgpu_mutex_init(&sched->queues.queue_lock);
nvgpu_init_list_node(&sched->users.exclusive_user);
@@ -272,6 +276,12 @@ struct nvgpu_nvs_ctrl_queue *nvgpu_nvs_ctrl_fifo_get_queue(
return queue;
}
struct nvs_domain_ctrl_fifo_capabilities *nvgpu_nvs_ctrl_fifo_get_capabilities(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl)
{
return &sched_ctrl->capabilities;
}
bool nvgpu_nvs_buffer_is_valid(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf)
{
return buf->valid;

View File

@@ -49,6 +49,11 @@ struct nvgpu_runlist_domain;
struct nvgpu_nvs_ctrl_queue;
struct nvgpu_nvs_domain_ctrl_fifo;
struct nvs_domain_ctrl_fifo_capabilities {
/* Store type of scheduler backend */
uint8_t scheduler_implementation_hw;
};
/* Structure to store user info common to all schedulers */
struct nvs_domain_ctrl_fifo_user {
/*
@@ -63,6 +68,11 @@ struct nvs_domain_ctrl_fifo_user {
int pid;
/* Mask of actively used queue */
u32 active_used_queues;
/*
* Used to hold the scheduler capabilities.
*/
struct nvs_domain_ctrl_fifo_capabilities capabilities;
/*
* Listnode used for keeping references to the user in
* the master struct nvgpu_nvs_domain_ctrl_fifo
@@ -253,6 +263,8 @@ int nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, struct nvs_domain_ctrl_fifo_user *user);
void nvgpu_nvs_ctrl_fifo_remove_user(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
struct nvs_domain_ctrl_fifo_user *user);
struct nvs_domain_ctrl_fifo_capabilities *nvgpu_nvs_ctrl_fifo_get_capabilities(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl);
struct nvgpu_nvs_ctrl_queue *nvgpu_nvs_ctrl_fifo_get_queue(
struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
enum nvgpu_nvs_ctrl_queue_num queue_num,

View File

@@ -700,9 +700,11 @@ static int nvgpu_nvs_ctrl_fifo_create_queue_verify_flags(struct gk20a *g,
}
if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) {
if (args->queue_num == 0)
if ((args->queue_num != NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
&& (args->queue_num != NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL))
return -EINVAL;
if (args->direction == 0)
if ((args->direction != NVS_CTRL_FIFO_QUEUE_DIRECTION_CLIENT_TO_SCHEDULER)
&& (args->direction != NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT))
return -EINVAL;
if (!nvgpu_nvs_ctrl_fifo_is_exclusive_user(g->sched_ctrl_fifo, user)) {
err = nvgpu_nvs_ctrl_fifo_reserve_exclusive_user(g->sched_ctrl_fifo, user);
@@ -710,11 +712,13 @@ static int nvgpu_nvs_ctrl_fifo_create_queue_verify_flags(struct gk20a *g,
return err;
}
}
} else {
} else if (args->access_type == NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_NON_EXCLUSIVE) {
if (args->queue_num != NVS_CTRL_FIFO_QUEUE_NUM_EVENT)
return -EINVAL;
if (args->direction != NVS_CTRL_FIFO_QUEUE_DIRECTION_SCHEDULER_TO_CLIENT)
return -EINVAL;
} else {
return -EINVAL;
}
return 0;
@@ -947,6 +951,47 @@ fail:
return err;
}
static u32 nvgpu_nvs_translate_hw_scheduler_impl(struct gk20a *g, uint8_t impl)
{
if (impl == NVGPU_NVS_DOMAIN_SCHED_KMD) {
return NVS_DOMAIN_SCHED_KMD;
} else if (impl == NVGPU_NVS_DOMAIN_SCHED_GSP) {
return NVS_DOMAIN_SCHED_GSP;
}
return NVS_DOMAIN_SCHED_INVALID;
}
static int nvgpu_nvs_query_scheduler_characteristics(struct gk20a *g,
struct nvs_domain_ctrl_fifo_user *user,
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args *args)
{
struct nvs_domain_ctrl_fifo_capabilities *capabilities;
if (args->reserved0 != 0) {
return -EINVAL;
}
if (args->reserved1 != 0) {
return -EINVAL;
}
if (args->reserved2 != 0ULL) {
return -EINVAL;
}
capabilities = nvgpu_nvs_ctrl_fifo_get_capabilities(g->sched_ctrl_fifo);
args->domain_scheduler_implementation =
nvgpu_nvs_translate_hw_scheduler_impl(g, capabilities->scheduler_implementation_hw);
args->available_queues = NVS_CTRL_FIFO_QUEUE_NUM_EVENT;
if (user->has_write_access) {
args->available_queues |= NVGPU_NVS_CTRL_FIFO_QUEUE_NUM_CONTROL;
}
return 0;
}
long nvgpu_nvs_ctrl_fifo_ops_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
u8 buf[NVGPU_NVS_CTRL_FIFO_IOCTL_MAX_ARG_SIZE] = { 0 };
@@ -1020,6 +1065,28 @@ long nvgpu_nvs_ctrl_fifo_ops_ioctl(struct file *filp, unsigned int cmd, unsigned
err = -EOPNOTSUPP;
goto done;
}
case NVGPU_NVS_QUERY_CTRL_FIFO_SCHEDULER_CHARACTERISTICS:
{
struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args *args =
(struct nvgpu_nvs_ctrl_fifo_scheduler_characteristics_args *)buf;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_NVS_CTRL_FIFO)) {
err = -EOPNOTSUPP;
return err;
}
err = nvgpu_nvs_query_scheduler_characteristics(g, user, args);
if (err != 0) {
return err;
}
if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd))) {
err = -EFAULT;
goto done;
}
break;
}
default:
err = -ENOTTY;
goto done;