gpu: nvgpu: open TSG with the share token

Implement OPEN_TSG ioctl with share tokens.

Bug 3677982
JIRA NVGPU-8681

Change-Id: If44aef863c932163df769acef5b3586f97aaecd3
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2792082
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Scott Long <scottl@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Sagar Kamble
2022-10-11 22:05:37 +05:30
committed by mobile promotions
parent 96f675595c
commit ce26e92de6
4 changed files with 128 additions and 24 deletions

View File

@@ -725,10 +725,34 @@ clean_up:
static int gk20a_ctrl_open_tsg(struct gk20a *g, struct gk20a_ctrl_priv *priv, static int gk20a_ctrl_open_tsg(struct gk20a *g, struct gk20a_ctrl_priv *priv,
struct nvgpu_gpu_open_tsg_args *args) struct nvgpu_gpu_open_tsg_args *args)
{ {
int err; u64 device_instance_id = 0ULL;
int fd; bool open_share = false;
struct file *file; struct file *file;
char name[64]; char name[64];
int err;
int fd;
#ifdef CONFIG_NVGPU_TSG_SHARING
open_share =
(args->flags & NVGPU_GPU_IOCTL_OPEN_TSG_FLAGS_SHARE) != 0U;
if (!open_share) {
if (args->source_device_instance_id != 0UL ||
args->share_token != 0UL) {
nvgpu_err(g, "Source device inst id/token specified");
return -EINVAL;
}
} else {
if (args->source_device_instance_id == 0UL ||
args->share_token == 0UL) {
nvgpu_err(g, "Source device inst id/token not specified");
return -EINVAL;
}
}
device_instance_id = priv->device_instance_id;
#endif
err = get_unused_fd_flags(O_RDWR | O_CLOEXEC); err = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
if (err < 0) if (err < 0)
@@ -743,12 +767,17 @@ static int gk20a_ctrl_open_tsg(struct gk20a *g, struct gk20a_ctrl_priv *priv,
goto clean_up; goto clean_up;
} }
err = nvgpu_ioctl_tsg_open(g, priv, priv->cdev, file); err = nvgpu_ioctl_tsg_open(g, priv, priv->cdev, file, open_share,
args->source_device_instance_id,
device_instance_id,
args->share_token);
if (err) if (err)
goto clean_up_file; goto clean_up_file;
fd_install(fd, file); fd_install(fd, file);
args->tsg_fd = fd; args->tsg_fd = fd;
return 0; return 0;
clean_up_file: clean_up_file:
@@ -2999,4 +3028,50 @@ int nvgpu_gpu_tsg_revoke_share_tokens(struct gk20a *g,
return 0; return 0;
} }
struct nvgpu_tsg *nvgpu_gpu_open_tsg_with_share_token(struct gk20a *g,
u64 source_device_instance_id,
u64 target_device_instance_id,
u64 share_token)
{
struct nvgpu_tsg_share_token_node *token_node, *tmp;
struct gk20a_ctrl_priv *ctrl_priv;
struct nvgpu_tsg *tsg = NULL;
ctrl_priv = nvgpu_gpu_get_ctrl_priv(g, source_device_instance_id);
if (ctrl_priv == NULL) {
nvgpu_err(g, "Invalid source device instance id");
return NULL;
}
nvgpu_log_info(g, "Using the token src: %llx target: %llx token:%llx",
source_device_instance_id,
target_device_instance_id, share_token);
nvgpu_mutex_acquire(&ctrl_priv->tokens_lock);
nvgpu_list_for_each_entry_safe(token_node, tmp,
&ctrl_priv->tsg_share_tokens_list,
nvgpu_tsg_share_token_node, ctrl_entry) {
if ((token_node->token == share_token) &&
(token_node->target_device_instance_id ==
target_device_instance_id)) {
tsg = token_node->tsg;
nvgpu_ref_get(&tsg->refcount);
nvgpu_list_del(&token_node->ctrl_entry);
nvgpu_kfree(g, token_node);
break;
}
}
nvgpu_mutex_release(&ctrl_priv->tokens_lock);
if (tsg != NULL) {
nvgpu_mutex_acquire(&tsg->tsg_share_lock);
tsg->share_token_count--;
nvgpu_mutex_release(&tsg->tsg_share_lock);
}
return tsg;
}
#endif #endif

View File

@@ -17,6 +17,7 @@
#define __NVGPU_IOCTL_CTRL_H__ #define __NVGPU_IOCTL_CTRL_H__
struct gk20a_ctrl_priv; struct gk20a_ctrl_priv;
struct nvgpu_tsg;
int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp); int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp);
int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp); int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp);
@@ -42,6 +43,10 @@ int nvgpu_gpu_tsg_revoke_share_tokens(struct gk20a *g,
u64 source_device_instance_id, u64 source_device_instance_id,
struct nvgpu_tsg *tsg, struct nvgpu_tsg *tsg,
u32 *out_count); u32 *out_count);
struct nvgpu_tsg *nvgpu_gpu_open_tsg_with_share_token(struct gk20a *g,
u64 source_device_instance_id,
u64 target_device_instance_id,
u64 share_token);
#endif #endif
#endif #endif

View File

@@ -629,7 +629,9 @@ static int nvgpu_tsg_ioctl_revoke_share_token(struct gk20a *g,
#endif #endif
int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv, int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv,
struct nvgpu_cdev *cdev, struct file *filp) struct nvgpu_cdev *cdev, struct file *filp,
bool open_share, u64 source_device_instance_id,
u64 target_device_instance_id, u64 share_token)
{ {
struct tsg_private *priv; struct tsg_private *priv;
struct nvgpu_tsg *tsg; struct nvgpu_tsg *tsg;
@@ -650,6 +652,7 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv,
goto free_ref; goto free_ref;
} }
if (!open_share) {
err = gk20a_busy(g); err = gk20a_busy(g);
if (err) { if (err) {
nvgpu_err(g, "failed to power on, %d", err); nvgpu_err(g, "failed to power on, %d", err);
@@ -658,12 +661,28 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv,
tsg = nvgpu_tsg_open(g, nvgpu_current_pid(g)); tsg = nvgpu_tsg_open(g, nvgpu_current_pid(g));
gk20a_idle(g); gk20a_idle(g);
if (!tsg) { if (tsg == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto free_mem; goto free_mem;
} }
#ifdef CONFIG_NVGPU_TSG_SHARING gk20a_sched_ctrl_tsg_added(g, tsg);
#ifndef CONFIG_NVGPU_TSG_SHARING
}
#else
} else {
tsg = nvgpu_gpu_open_tsg_with_share_token(g,
source_device_instance_id,
target_device_instance_id,
share_token);
if (tsg == NULL) {
nvgpu_err(g, "TSG open with token failed");
err = -EINVAL;
goto free_mem;
}
}
if (ctrl_priv != NULL) { if (ctrl_priv != NULL) {
err = nvgpu_tsg_add_ctrl_dev_inst_id(tsg, ctrl_priv); err = nvgpu_tsg_add_ctrl_dev_inst_id(tsg, ctrl_priv);
if (err != 0) { if (err != 0) {
@@ -680,8 +699,6 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv,
priv->ctrl_priv = ctrl_priv; priv->ctrl_priv = ctrl_priv;
filp->private_data = priv; filp->private_data = priv;
gk20a_sched_ctrl_tsg_added(g, tsg);
return 0; return 0;
free_mem: free_mem:
@@ -708,7 +725,8 @@ int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp)
return ret; return ret;
} }
ret = nvgpu_ioctl_tsg_open(g, NULL, cdev, filp); ret = nvgpu_ioctl_tsg_open(g, NULL, cdev, filp, false,
0ULL, 0ULL, 0ULL);
gk20a_idle(g); gk20a_idle(g);
nvgpu_log_fn(g, "done"); nvgpu_log_fn(g, "done");
@@ -727,13 +745,13 @@ void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref)
gk20a_sched_ctrl_tsg_removed(g, tsg); gk20a_sched_ctrl_tsg_removed(g, tsg);
nvgpu_tsg_release(ref); nvgpu_tsg_release(ref);
nvgpu_put(g);
} }
int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp) int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
{ {
struct tsg_private *priv = filp->private_data; struct tsg_private *priv = filp->private_data;
struct nvgpu_tsg *tsg; struct nvgpu_tsg *tsg;
struct gk20a *g;
#ifdef CONFIG_NVGPU_TSG_SHARING #ifdef CONFIG_NVGPU_TSG_SHARING
u32 count; u32 count;
int err; int err;
@@ -745,15 +763,16 @@ int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
} }
tsg = priv->tsg; tsg = priv->tsg;
g = tsg->g;
#ifdef CONFIG_NVGPU_TSG_SHARING #ifdef CONFIG_NVGPU_TSG_SHARING
nvgpu_mutex_acquire(&tsg->tsg_share_lock); nvgpu_mutex_acquire(&tsg->tsg_share_lock);
err = nvgpu_gpu_tsg_revoke_share_tokens(tsg->g, err = nvgpu_gpu_tsg_revoke_share_tokens(g,
nvgpu_gpu_get_device_instance_id(priv->ctrl_priv), nvgpu_gpu_get_device_instance_id(priv->ctrl_priv),
tsg, &count); tsg, &count);
if (err != 0) { if (err != 0) {
nvgpu_err(tsg->g, "revoke token(%llu) failed %d", nvgpu_err(g, "revoke token(%llu) failed %d",
nvgpu_gpu_get_device_instance_id(priv->ctrl_priv), nvgpu_gpu_get_device_instance_id(priv->ctrl_priv),
err); err);
} }
@@ -766,7 +785,10 @@ int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp)
#endif #endif
nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);
nvgpu_kfree(tsg->g, priv); nvgpu_kfree(g, priv);
nvgpu_put(g);
return 0; return 0;
} }

View File

@@ -25,7 +25,9 @@ struct nvgpu_tsg *nvgpu_tsg_get_from_file(int fd);
int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp); int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp);
int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp); int nvgpu_ioctl_tsg_dev_open(struct inode *inode, struct file *filp);
int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv, int nvgpu_ioctl_tsg_open(struct gk20a *g, struct gk20a_ctrl_priv *ctrl_priv,
struct nvgpu_cdev *cdev, struct file *filp); struct nvgpu_cdev *cdev, struct file *filp,
bool open_share, u64 source_device_instance_id,
u64 target_device_instance_id, u64 share_token);
long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref); void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref);