gpu: nvgpu: pass gk20a struct to gk20a_busy

After driver remove, the device structure passed in gk20a_busy can be
invalid. To solve this the prototype of the function is modified to pass
the gk20a struct instead of the device pointer.

bug 200277762
JIRA: EVLR-1023

Change-Id: I08eb74bd3578834d45115098ed9936ebbb436fdf
Signed-off-by: David Nieto <dmartineznie@nvidia.com>
Reviewed-on: http://git-master/r/1320194
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
David Nieto
2017-03-13 18:45:37 -07:00
committed by mobile promotions
parent a84f601fba
commit 2a502bdd5f
16 changed files with 206 additions and 199 deletions

View File

@@ -59,11 +59,11 @@ int gk20a_as_alloc_share(struct gk20a_as *as,
as_share->id = generate_as_share_id(as_share->as); as_share->id = generate_as_share_id(as_share->as);
/* this will set as_share->vm. */ /* this will set as_share->vm. */
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto failed; goto failed;
err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags); err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags);
gk20a_idle(g->dev); gk20a_idle(g);
if (err) if (err)
goto failed; goto failed;
@@ -87,14 +87,14 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
gk20a_dbg_fn(""); gk20a_dbg_fn("");
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto release_fail; goto release_fail;
err = gk20a_vm_release_share(as_share); err = gk20a_vm_release_share(as_share);
gk20a_idle(g->dev); gk20a_idle(g);
release_fail: release_fail:
release_as_share_id(as_share->as, as_share->id); release_as_share_id(as_share->as, as_share->id);
@@ -375,7 +375,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT; return -EFAULT;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -449,7 +449,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break; break;
} }
gk20a_idle(g->dev); gk20a_idle(g);
if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd))) if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))

View File

@@ -764,6 +764,7 @@ __releases(&cde_app->mutex)
struct gk20a_cde_ctx, ctx_deleter_work); struct gk20a_cde_ctx, ctx_deleter_work);
struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
struct device *dev = cde_ctx->dev; struct device *dev = cde_ctx->dev;
struct gk20a *g = cde_ctx->g;
int err; int err;
/* someone has just taken it? engine deletion started? */ /* someone has just taken it? engine deletion started? */
@@ -773,7 +774,7 @@ __releases(&cde_app->mutex)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: attempting to delete temporary %p", cde_ctx); "cde: attempting to delete temporary %p", cde_ctx);
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
/* this context would find new use anyway later, so not freeing /* this context would find new use anyway later, so not freeing
* here does not leak anything */ * here does not leak anything */
@@ -801,7 +802,7 @@ __releases(&cde_app->mutex)
out: out:
nvgpu_mutex_release(&cde_app->mutex); nvgpu_mutex_release(&cde_app->mutex);
gk20a_idle(dev); gk20a_idle(g);
} }
static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g) static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g)
@@ -949,7 +950,7 @@ __releases(&cde_app->mutex)
scatterbuffer_byte_offset < compbits_byte_offset) scatterbuffer_byte_offset < compbits_byte_offset)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -1106,7 +1107,7 @@ __releases(&cde_app->mutex)
flags = __flags | NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET; flags = __flags | NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET;
/* gk20a_cde_execute_buffer() will grab a power reference of it's own */ /* gk20a_cde_execute_buffer() will grab a power reference of it's own */
gk20a_idle(g->dev); gk20a_idle(g);
/* execute the conversion buffer, combined with init first if it's the /* execute the conversion buffer, combined with init first if it's the
* first time */ * first time */
@@ -1131,7 +1132,7 @@ exit_unmap_surface:
exit_unmap_vaddr: exit_unmap_vaddr:
gk20a_vm_unmap(cde_ctx->vm, map_vaddr); gk20a_vm_unmap(cde_ctx->vm, map_vaddr);
exit_idle: exit_idle:
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1283,7 +1284,7 @@ __releases(&cde_app->mutex)
if (!cde_app->initialised) if (!cde_app->initialised)
return -ENOSYS; return -ENOSYS;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -1297,7 +1298,7 @@ __releases(&cde_app->mutex)
nvgpu_mutex_release(&cde_app->mutex); nvgpu_mutex_release(&cde_app->mutex);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }

View File

@@ -1230,7 +1230,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
int err; int err;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to release a channel!"); gk20a_err(dev_from_gk20a(g), "failed to release a channel!");
goto channel_release; goto channel_release;
@@ -1239,7 +1239,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
trace_gk20a_channel_release(dev_name(g->dev)); trace_gk20a_channel_release(dev_name(g->dev));
gk20a_channel_close(ch); gk20a_channel_close(ch);
gk20a_idle(g->dev); gk20a_idle(g);
channel_release: channel_release:
gk20a_put(g); gk20a_put(g);
@@ -1395,14 +1395,14 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp, s32 runlist_
goto free_ref; goto free_ref;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err); gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
goto fail_busy; goto fail_busy;
} }
/* All the user space channel should be non privilege */ /* All the user space channel should be non privilege */
ch = gk20a_open_new_channel(g, runlist_id, false); ch = gk20a_open_new_channel(g, runlist_id, false);
gk20a_idle(g->dev); gk20a_idle(g);
if (!ch) { if (!ch) {
gk20a_err(dev_from_gk20a(g), gk20a_err(dev_from_gk20a(g),
"failed to get f"); "failed to get f");
@@ -2693,7 +2693,7 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
channel_gk20a_free_job(c, job); channel_gk20a_free_job(c, job);
job_finished = 1; job_finished = 1;
gk20a_idle(g->dev); gk20a_idle(g);
if (!clean_all) { if (!clean_all) {
/* Timeout isn't supported here so don't touch it. */ /* Timeout isn't supported here so don't touch it. */
@@ -3120,7 +3120,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
return -EINVAL; return -EINVAL;
/* released by job cleanup via syncpt or sema interrupt */ /* released by job cleanup via syncpt or sema interrupt */
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s", gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s",
current->comm); current->comm);
@@ -3227,7 +3227,7 @@ clean_up:
gk20a_fence_put(pre_fence); gk20a_fence_put(pre_fence);
gk20a_fence_put(post_fence); gk20a_fence_put(post_fence);
if (need_deferred_cleanup) if (need_deferred_cleanup)
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -3930,7 +3930,8 @@ long gk20a_channel_ioctl(struct file *filp,
{ {
struct channel_priv *priv = filp->private_data; struct channel_priv *priv = filp->private_data;
struct channel_gk20a *ch = priv->c; struct channel_gk20a *ch = priv->c;
struct device *dev = ch->g->dev; struct gk20a *g = ch->g;
struct device *dev = g->dev;
u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
int err = 0; int err = 0;
@@ -3967,7 +3968,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD: case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
break; break;
case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX: case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -3976,14 +3977,14 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = ch->g->ops.gr.alloc_obj_ctx(ch, err = ch->g->ops.gr.alloc_obj_ctx(ch,
(struct nvgpu_alloc_obj_ctx_args *)buf); (struct nvgpu_alloc_obj_ctx_args *)buf);
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX: case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX:
{ {
struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args = struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args =
(struct nvgpu_alloc_gpfifo_ex_args *)buf; (struct nvgpu_alloc_gpfifo_ex_args *)buf;
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -3993,11 +3994,11 @@ long gk20a_channel_ioctl(struct file *filp,
if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) { if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) {
err = -EINVAL; err = -EINVAL;
gk20a_idle(dev); gk20a_idle(g);
break; break;
} }
err = gk20a_alloc_channel_gpfifo(ch, alloc_gpfifo_ex_args); err = gk20a_alloc_channel_gpfifo(ch, alloc_gpfifo_ex_args);
gk20a_idle(dev); gk20a_idle(g);
break; break;
} }
case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO: case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
@@ -4006,7 +4007,7 @@ long gk20a_channel_ioctl(struct file *filp,
struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args = struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args =
(struct nvgpu_alloc_gpfifo_args *)buf; (struct nvgpu_alloc_gpfifo_args *)buf;
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4027,7 +4028,7 @@ long gk20a_channel_ioctl(struct file *filp,
alloc_gpfifo_ex_args.flags = alloc_gpfifo_args->flags; alloc_gpfifo_ex_args.flags = alloc_gpfifo_args->flags;
err = gk20a_alloc_channel_gpfifo(ch, &alloc_gpfifo_ex_args); err = gk20a_alloc_channel_gpfifo(ch, &alloc_gpfifo_ex_args);
gk20a_idle(dev); gk20a_idle(g);
break; break;
} }
case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO: case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
@@ -4035,7 +4036,7 @@ long gk20a_channel_ioctl(struct file *filp,
(struct nvgpu_submit_gpfifo_args *)buf); (struct nvgpu_submit_gpfifo_args *)buf);
break; break;
case NVGPU_IOCTL_CHANNEL_WAIT: case NVGPU_IOCTL_CHANNEL_WAIT:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4052,10 +4053,10 @@ long gk20a_channel_ioctl(struct file *filp,
nvgpu_mutex_acquire(&ch->ioctl_lock); nvgpu_mutex_acquire(&ch->ioctl_lock);
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_ZCULL_BIND: case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4064,10 +4065,10 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_channel_zcull_bind(ch, err = gk20a_channel_zcull_bind(ch,
(struct nvgpu_zcull_bind_args *)buf); (struct nvgpu_zcull_bind_args *)buf);
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4076,11 +4077,11 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_init_error_notifier(ch, err = gk20a_init_error_notifier(ch,
(struct nvgpu_set_error_notifier *)buf); (struct nvgpu_set_error_notifier *)buf);
gk20a_idle(dev); gk20a_idle(g);
break; break;
#ifdef CONFIG_GK20A_CYCLE_STATS #ifdef CONFIG_GK20A_CYCLE_STATS
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS: case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4089,7 +4090,7 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_channel_cycle_stats(ch, err = gk20a_channel_cycle_stats(ch,
(struct nvgpu_cycle_stats_args *)buf); (struct nvgpu_cycle_stats_args *)buf);
gk20a_idle(dev); gk20a_idle(g);
break; break;
#endif #endif
case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT: case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
@@ -4123,7 +4124,7 @@ long gk20a_channel_ioctl(struct file *filp,
ch->has_timedout; ch->has_timedout;
break; break;
case NVGPU_IOCTL_CHANNEL_SET_PRIORITY: case NVGPU_IOCTL_CHANNEL_SET_PRIORITY:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4133,12 +4134,12 @@ long gk20a_channel_ioctl(struct file *filp,
err = ch->g->ops.fifo.channel_set_priority(ch, err = ch->g->ops.fifo.channel_set_priority(ch,
((struct nvgpu_set_priority_args *)buf)->priority); ((struct nvgpu_set_priority_args *)buf)->priority);
gk20a_idle(dev); gk20a_idle(g);
gk20a_channel_trace_sched_param( gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_priority, ch); trace_gk20a_channel_set_priority, ch);
break; break;
case NVGPU_IOCTL_CHANNEL_ENABLE: case NVGPU_IOCTL_CHANNEL_ENABLE:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4149,10 +4150,10 @@ long gk20a_channel_ioctl(struct file *filp,
ch->g->ops.fifo.enable_channel(ch); ch->g->ops.fifo.enable_channel(ch);
else else
err = -ENOSYS; err = -ENOSYS;
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_DISABLE: case NVGPU_IOCTL_CHANNEL_DISABLE:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4163,10 +4164,10 @@ long gk20a_channel_ioctl(struct file *filp,
ch->g->ops.fifo.disable_channel(ch); ch->g->ops.fifo.disable_channel(ch);
else else
err = -ENOSYS; err = -ENOSYS;
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_PREEMPT: case NVGPU_IOCTL_CHANNEL_PREEMPT:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4174,10 +4175,10 @@ long gk20a_channel_ioctl(struct file *filp,
break; break;
} }
err = gk20a_fifo_preempt(ch->g, ch); err = gk20a_fifo_preempt(ch->g, ch);
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_FORCE_RESET: case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4186,7 +4187,7 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = ch->g->ops.fifo.force_reset_ch(ch, err = ch->g->ops.fifo.force_reset_ch(ch,
NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true); NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true);
gk20a_idle(dev); gk20a_idle(g);
break; break;
case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL: case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
err = gk20a_channel_event_id_ctrl(ch, err = gk20a_channel_event_id_ctrl(ch,
@@ -4194,7 +4195,7 @@ long gk20a_channel_ioctl(struct file *filp,
break; break;
#ifdef CONFIG_GK20A_CYCLE_STATS #ifdef CONFIG_GK20A_CYCLE_STATS
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT: case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4203,7 +4204,7 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_channel_cycle_stats_snapshot(ch, err = gk20a_channel_cycle_stats_snapshot(ch,
(struct nvgpu_cycle_stats_snapshot_args *)buf); (struct nvgpu_cycle_stats_snapshot_args *)buf);
gk20a_idle(dev); gk20a_idle(g);
break; break;
#endif #endif
case NVGPU_IOCTL_CHANNEL_WDT: case NVGPU_IOCTL_CHANNEL_WDT:
@@ -4211,7 +4212,7 @@ long gk20a_channel_ioctl(struct file *filp,
(struct nvgpu_channel_wdt_args *)buf); (struct nvgpu_channel_wdt_args *)buf);
break; break;
case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE: case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4221,12 +4222,12 @@ long gk20a_channel_ioctl(struct file *filp,
err = gk20a_channel_set_runlist_interleave(ch, err = gk20a_channel_set_runlist_interleave(ch,
((struct nvgpu_runlist_interleave_args *)buf)->level); ((struct nvgpu_runlist_interleave_args *)buf)->level);
gk20a_idle(dev); gk20a_idle(g);
gk20a_channel_trace_sched_param( gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_runlist_interleave, ch); trace_gk20a_channel_set_runlist_interleave, ch);
break; break;
case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE: case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE:
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4236,13 +4237,13 @@ long gk20a_channel_ioctl(struct file *filp,
err = ch->g->ops.fifo.channel_set_timeslice(ch, err = ch->g->ops.fifo.channel_set_timeslice(ch,
((struct nvgpu_timeslice_args *)buf)->timeslice_us); ((struct nvgpu_timeslice_args *)buf)->timeslice_us);
gk20a_idle(dev); gk20a_idle(g);
gk20a_channel_trace_sched_param( gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_timeslice, ch); trace_gk20a_channel_set_timeslice, ch);
break; break;
case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE: case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE:
if (ch->g->ops.gr.set_preemption_mode) { if (ch->g->ops.gr.set_preemption_mode) {
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4252,7 +4253,7 @@ long gk20a_channel_ioctl(struct file *filp,
err = ch->g->ops.gr.set_preemption_mode(ch, err = ch->g->ops.gr.set_preemption_mode(ch,
((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode, ((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode,
((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode); ((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode);
gk20a_idle(dev); gk20a_idle(g);
} else { } else {
err = -EINVAL; err = -EINVAL;
} }
@@ -4262,7 +4263,7 @@ long gk20a_channel_ioctl(struct file *filp,
bool boost = bool boost =
((struct nvgpu_boosted_ctx_args *)buf)->boost; ((struct nvgpu_boosted_ctx_args *)buf)->boost;
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
dev_err(dev, dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4270,7 +4271,7 @@ long gk20a_channel_ioctl(struct file *filp,
break; break;
} }
err = ch->g->ops.gr.set_boosted_ctx(ch, boost); err = ch->g->ops.gr.set_boosted_ctx(ch, boost);
gk20a_idle(dev); gk20a_idle(g);
} else { } else {
err = -EINVAL; err = -EINVAL;
} }

View File

@@ -759,7 +759,7 @@ static int monitor_get(void *data, u64 *val)
u64 freq = clk->gpc_pll.clk_in; u64 freq = clk->gpc_pll.clk_in;
u32 count1, count2; u32 count1, count2;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -783,7 +783,7 @@ static int monitor_get(void *data, u64 *val)
do_div(freq, ncycle); do_div(freq, ncycle);
*val = freq; *val = freq;
gk20a_idle(g->dev); gk20a_idle(g);
if (count1 != count2) if (count1 != count2)
return -EBUSY; return -EBUSY;

View File

@@ -74,10 +74,10 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
priv->g = g; priv->g = g;
if (!g->gr.sw_ready) { if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto free_ref; goto free_ref;
gk20a_idle(g->dev); gk20a_idle(g);
} }
#ifdef CONFIG_ARCH_TEGRA_18x_SOC #ifdef CONFIG_ARCH_TEGRA_18x_SOC
@@ -393,7 +393,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
struct gk20a *g, struct gk20a *g,
struct nvgpu_gpu_mmu_debug_mode_args *args) struct nvgpu_gpu_mmu_debug_mode_args *args)
{ {
if (gk20a_busy(g->dev)) { if (gk20a_busy(g)) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n"); gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n");
return -EINVAL; return -EINVAL;
} }
@@ -402,7 +402,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
g->ops.fb.set_debug_mode(g, args->state == 1); g->ops.fb.set_debug_mode(g, args->state == 1);
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev); gk20a_idle(g);
return 0; return 0;
} }
@@ -653,7 +653,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
u32 gpu_timestamp_hi_new = 0; u32 gpu_timestamp_hi_new = 0;
u32 gpu_timestamp_hi_old = 0; u32 gpu_timestamp_hi_old = 0;
if (gk20a_busy(g->dev)) { if (gk20a_busy(g)) {
gk20a_err(dev_from_gk20a(g), "GPU not powered on\n"); gk20a_err(dev_from_gk20a(g), "GPU not powered on\n");
err = -EINVAL; err = -EINVAL;
goto end; goto end;
@@ -681,7 +681,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
} }
end: end:
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -721,7 +721,7 @@ static int nvgpu_gpu_get_gpu_time(
u64 time; u64 time;
int err; int err;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -729,7 +729,7 @@ static int nvgpu_gpu_get_gpu_time(
if (!err) if (!err)
args->gpu_timestamp = time; args->gpu_timestamp = time;
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1218,7 +1218,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_VOLTAGE)) if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_VOLTAGE))
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -1236,7 +1236,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
err = -EINVAL; err = -EINVAL;
} }
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1254,13 +1254,13 @@ static int nvgpu_gpu_get_current(struct gk20a *g,
if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_CURRENT)) if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_CURRENT))
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
err = pmgr_pwr_devices_get_current(g, &args->currnt); err = pmgr_pwr_devices_get_current(g, &args->currnt);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1278,13 +1278,13 @@ static int nvgpu_gpu_get_power(struct gk20a *g,
if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_POWER)) if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_POWER))
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
err = pmgr_pwr_devices_get_power(g, &args->power); err = pmgr_pwr_devices_get_power(g, &args->power);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1303,13 +1303,13 @@ static int nvgpu_gpu_get_temperature(struct gk20a *g,
if (!g->ops.therm.get_internal_sensor_curr_temp) if (!g->ops.therm.get_internal_sensor_curr_temp)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
err = g->ops.therm.get_internal_sensor_curr_temp(g, &temp_f24_8); err = g->ops.therm.get_internal_sensor_curr_temp(g, &temp_f24_8);
gk20a_idle(g->dev); gk20a_idle(g);
args->temp_f24_8 = (s32)temp_f24_8; args->temp_f24_8 = (s32)temp_f24_8;
@@ -1330,13 +1330,13 @@ static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
if (!g->ops.therm.configure_therm_alert) if (!g->ops.therm.configure_therm_alert)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
err = g->ops.therm.configure_therm_alert(g, args->temp_f24_8); err = g->ops.therm.configure_therm_alert(g, args->temp_f24_8);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1371,11 +1371,11 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
} }
if (!g->gr.sw_ready) { if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_idle(g->dev); gk20a_idle(g);
} }
switch (cmd) { switch (cmd) {
@@ -1439,11 +1439,11 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
} }
if (!err) { if (!err) {
err = gk20a_busy(dev); err = gk20a_busy(g);
if (!err) { if (!err) {
err = g->ops.gr.zbc_set_table(g, &g->gr, err = g->ops.gr.zbc_set_table(g, &g->gr,
zbc_val); zbc_val);
gk20a_idle(dev); gk20a_idle(g);
} }
} }

View File

@@ -249,7 +249,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "");
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -259,7 +259,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
if (likely(!err)) if (likely(!err))
err = g->ops.fecs_trace.poll(g); err = g->ops.fecs_trace.poll(g);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -287,7 +287,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
goto free_ref; goto free_ref;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto free_ref; goto free_ref;
@@ -330,7 +330,7 @@ done:
nvgpu_mutex_release(&dev->write_lock); nvgpu_mutex_release(&dev->write_lock);
idle: idle:
gk20a_idle(g->dev); gk20a_idle(g);
free_ref: free_ref:
if (err) if (err)
gk20a_put(g); gk20a_put(g);

View File

@@ -766,14 +766,14 @@ static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(
if (sm_id >= gr->no_of_sm) if (sm_id >= gr->no_of_sm)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
err = gr_gk20a_elpg_protected_call(g, err = gr_gk20a_elpg_protected_call(g,
g->ops.gr.clear_sm_error_state(g, ch, sm_id)); g->ops.gr.clear_sm_error_state(g, ch, sm_id));
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -819,7 +819,7 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
} }
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto err_free; goto err_free;
@@ -827,7 +827,7 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
g->ops.gr.update_sm_error_state(g, ch, g->ops.gr.update_sm_error_state(g, ch,
sm_id, sm_error_state)); sm_id, sm_error_state));
gk20a_idle(g->dev); gk20a_idle(g);
err_free: err_free:
kfree(sm_error_state); kfree(sm_error_state);
@@ -843,7 +843,7 @@ nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
int err = 0; int err = 0;
int ctx_resident_ch_fd = -1; int ctx_resident_ch_fd = -1;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -866,7 +866,7 @@ nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
args->resident_context_fd = ctx_resident_ch_fd; args->resident_context_fd = ctx_resident_ch_fd;
} }
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -905,7 +905,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
size = args->size; size = args->size;
offset = 0; offset = 0;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto fail_free_buffer; goto fail_free_buffer;
@@ -940,7 +940,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
} }
fail_idle: fail_idle:
gk20a_idle(g->dev); gk20a_idle(g);
fail_free_buffer: fail_free_buffer:
nvgpu_big_free(g, buffer); nvgpu_big_free(g, buffer);
fail_dmabuf_put: fail_dmabuf_put:
@@ -972,11 +972,11 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
} }
if (!g->gr.sw_ready) { if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_idle(g->dev); gk20a_idle(g);
} }
/* protect from threaded user space calls */ /* protect from threaded user space calls */
@@ -1283,7 +1283,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
(g->dbg_powergating_disabled_refcount++ == 0)) { (g->dbg_powergating_disabled_refcount++ == 0)) {
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy"); gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -1338,7 +1338,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
gk20a_pmu_pg_global_enable(g, true); gk20a_pmu_pg_global_enable(g, true);
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
gk20a_idle(g->dev); gk20a_idle(g);
} }
dbg_s->is_pg_disabled = false; dbg_s->is_pg_disabled = false;
@@ -1381,7 +1381,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
gk20a_dbg_fn("%s smpc ctxsw mode = %d", gk20a_dbg_fn("%s smpc ctxsw mode = %d",
dev_name(dbg_s->dev), args->mode); dev_name(dbg_s->dev), args->mode);
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron"); gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err; return err;
@@ -1409,7 +1409,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
err = g->ops.regops.apply_smpc_war(dbg_s); err = g->ops.regops.apply_smpc_war(dbg_s);
clean_up: clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1433,8 +1433,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
"session doesn't have a valid reservation"); "session doesn't have a valid reservation");
} }
err = gk20a_busy(g);
err = gk20a_busy(g->dev);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron"); gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err; return err;
@@ -1462,7 +1461,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
*/ */
clean_up: clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1480,7 +1479,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
if (!ch) if (!ch)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron"); gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err; return err;
@@ -1512,7 +1511,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
clean_up: clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }
@@ -1807,7 +1806,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
goto fail_unmap; goto fail_unmap;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron"); gk20a_err(dev_from_gk20a(g), "failed to poweron");
goto fail_unmap; goto fail_unmap;
@@ -1830,7 +1829,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
perf_pmasys_mem_block_valid_true_f() | perf_pmasys_mem_block_valid_true_f() |
perf_pmasys_mem_block_target_lfb_f()); perf_pmasys_mem_block_target_lfb_f());
gk20a_idle(g->dev); gk20a_idle(g);
return 0; return 0;
@@ -1848,7 +1847,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
if (!g->allow_all) if (!g->allow_all)
return -EACCES; return -EACCES;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron"); gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err; return err;
@@ -1864,7 +1863,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
perf_pmasys_mem_block_valid_false_f() | perf_pmasys_mem_block_valid_false_f() |
perf_pmasys_mem_block_target_f(0)); perf_pmasys_mem_block_target_f(0));
gk20a_idle(g->dev); gk20a_idle(g);
gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset, NULL); gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset, NULL);

View File

@@ -136,13 +136,14 @@ int gk20a_gr_debug_dump(struct device *dev)
static int gk20a_gr_debug_show(struct seq_file *s, void *unused) static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
{ {
struct device *dev = s->private; struct device *dev = s->private;
struct gk20a *g = gk20a_get_platform(dev)->g;
struct gk20a_debug_output o = { struct gk20a_debug_output o = {
.fn = gk20a_debug_write_to_seqfile, .fn = gk20a_debug_write_to_seqfile,
.ctx = s, .ctx = s,
}; };
int err; int err;
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev, "failed to power on gpu: %d", err); gk20a_err(dev, "failed to power on gpu: %d", err);
return -EINVAL; return -EINVAL;
@@ -150,7 +151,7 @@ static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
gk20a_gr_dump_regs(dev, &o); gk20a_gr_dump_regs(dev, &o);
gk20a_idle(dev); gk20a_idle(g);
return 0; return 0;
} }
@@ -183,7 +184,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused)
g = gk20a_get_platform(dev)->g; g = gk20a_get_platform(dev)->g;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(g->dev, "failed to power on gpu: %d", err); gk20a_err(g->dev, "failed to power on gpu: %d", err);
return -EFAULT; return -EFAULT;
@@ -193,7 +194,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused)
if (g->ops.debug.show_dump) if (g->ops.debug.show_dump)
g->ops.debug.show_dump(g, &o); g->ops.debug.show_dump(g, &o);
gk20a_idle(g->dev); gk20a_idle(g);
return 0; return 0;
} }

View File

@@ -332,7 +332,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g)
int cnt; int cnt;
int err; int err;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (unlikely(err)) if (unlikely(err))
return err; return err;
@@ -372,7 +372,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g)
done: done:
nvgpu_mutex_release(&trace->poll_lock); nvgpu_mutex_release(&trace->poll_lock);
gk20a_idle(g->dev); gk20a_idle(g);
return err; return err;
} }

View File

@@ -1434,24 +1434,26 @@ static int gk20a_can_busy(struct gk20a *g)
return 1; return 1;
} }
int gk20a_busy(struct device *dev) int gk20a_busy(struct gk20a *g)
{ {
int ret = 0; int ret = 0;
struct gk20a *g; struct device *dev;
struct gk20a_platform *platform;
if (!dev) if (!g)
return -ENODEV;
g = get_gk20a(dev);
platform = gk20a_get_platform(dev);
if (!g || !gk20a_can_busy(g))
return -ENODEV; return -ENODEV;
atomic_inc(&g->usage_count); atomic_inc(&g->usage_count);
down_read(&g->busy_lock); down_read(&g->busy_lock);
if (!gk20a_can_busy(g)) {
ret = -ENODEV;
atomic_dec(&g->usage_count);
goto fail;
}
dev = g->dev;
if (pm_runtime_enabled(dev)) { if (pm_runtime_enabled(dev)) {
ret = pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);
if (ret < 0) { if (ret < 0) {
@@ -1484,22 +1486,21 @@ void gk20a_idle_nosuspend(struct device *dev)
pm_runtime_put_noidle(dev); pm_runtime_put_noidle(dev);
} }
void gk20a_idle(struct device *dev) void gk20a_idle(struct gk20a *g)
{ {
struct gk20a_platform *platform; struct device *dev;
struct gk20a *g;
if (!dev)
return;
g = get_gk20a(dev);
platform = gk20a_get_platform(dev);
atomic_dec(&g->usage_count); atomic_dec(&g->usage_count);
down_read(&g->busy_lock);
dev = g->dev;
if (!(dev && gk20a_can_busy(g)))
goto fail;
if (pm_runtime_enabled(dev)) { if (pm_runtime_enabled(dev)) {
#ifdef CONFIG_PM #ifdef CONFIG_PM
if (atomic_read(&dev->power.usage_count) == 1) if (atomic_read(&g->dev->power.usage_count) == 1)
gk20a_scale_notify_idle(dev); gk20a_scale_notify_idle(dev);
#endif #endif
@@ -1509,6 +1510,8 @@ void gk20a_idle(struct device *dev)
} else { } else {
gk20a_scale_notify_idle(dev); gk20a_scale_notify_idle(dev);
} }
fail:
up_read(&g->busy_lock);
} }
void gk20a_disable(struct gk20a *g, u32 units) void gk20a_disable(struct gk20a *g, u32 units)

View File

@@ -1391,8 +1391,8 @@ void gk20a_remove_sysfs(struct device *dev);
void gk20a_busy_noresume(struct device *dev); void gk20a_busy_noresume(struct device *dev);
void gk20a_idle_nosuspend(struct device *dev); void gk20a_idle_nosuspend(struct device *dev);
int __must_check gk20a_busy(struct device *dev); int __must_check gk20a_busy(struct gk20a *g);
void gk20a_idle(struct device *dev); void gk20a_idle(struct gk20a *g);
void gk20a_disable(struct gk20a *g, u32 units); void gk20a_disable(struct gk20a *g, u32 units);
void gk20a_enable(struct gk20a *g, u32 units); void gk20a_enable(struct gk20a *g, u32 units);
void gk20a_reset(struct gk20a *g, u32 units); void gk20a_reset(struct gk20a *g, u32 units);

View File

@@ -48,7 +48,7 @@ static ssize_t elcg_enable_store(struct device *dev,
if (kstrtoul(buf, 10, &val) < 0) if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -60,7 +60,7 @@ static ssize_t elcg_enable_store(struct device *dev,
gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
} }
gk20a_idle(g->dev); gk20a_idle(g);
dev_info(dev, "ELCG is %s.\n", g->elcg_enabled ? "enabled" : dev_info(dev, "ELCG is %s.\n", g->elcg_enabled ? "enabled" :
"disabled"); "disabled");
@@ -93,7 +93,7 @@ static ssize_t blcg_enable_store(struct device *dev,
else else
g->blcg_enabled = false; g->blcg_enabled = false;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -117,7 +117,7 @@ static ssize_t blcg_enable_store(struct device *dev,
if (g->ops.clock_gating.blcg_xbar_load_gating_prod) if (g->ops.clock_gating.blcg_xbar_load_gating_prod)
g->ops.clock_gating.blcg_xbar_load_gating_prod(g, g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
g->blcg_enabled); g->blcg_enabled);
gk20a_idle(g->dev); gk20a_idle(g);
dev_info(dev, "BLCG is %s.\n", g->blcg_enabled ? "enabled" : dev_info(dev, "BLCG is %s.\n", g->blcg_enabled ? "enabled" :
"disabled"); "disabled");
@@ -156,7 +156,7 @@ static ssize_t slcg_enable_store(struct device *dev,
* init. Therefore, it would be incongruous to add it here. Once * init. Therefore, it would be incongruous to add it here. Once
* it is added to init, we should add it here too. * it is added to init, we should add it here too.
*/ */
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -184,7 +184,7 @@ static ssize_t slcg_enable_store(struct device *dev,
g->ops.clock_gating.slcg_pmu_load_gating_prod(g, g->slcg_enabled); g->ops.clock_gating.slcg_pmu_load_gating_prod(g, g->slcg_enabled);
if (g->ops.clock_gating.slcg_xbar_load_gating_prod) if (g->ops.clock_gating.slcg_xbar_load_gating_prod)
g->ops.clock_gating.slcg_xbar_load_gating_prod(g, g->slcg_enabled); g->ops.clock_gating.slcg_xbar_load_gating_prod(g, g->slcg_enabled);
gk20a_idle(g->dev); gk20a_idle(g);
dev_info(dev, "SLCG is %s.\n", g->slcg_enabled ? "enabled" : dev_info(dev, "SLCG is %s.\n", g->slcg_enabled ? "enabled" :
"disabled"); "disabled");
@@ -289,6 +289,8 @@ static ssize_t railgate_enable_store(struct device *dev,
{ {
struct gk20a_platform *platform = dev_get_drvdata(dev); struct gk20a_platform *platform = dev_get_drvdata(dev);
unsigned long railgate_enable = 0; unsigned long railgate_enable = 0;
/* dev is guaranteed to be valid here. Ok to de-reference */
struct gk20a *g = get_gk20a(dev);
int err = 0; int err = 0;
if (kstrtoul(buf, 10, &railgate_enable) < 0) if (kstrtoul(buf, 10, &railgate_enable) < 0)
@@ -296,12 +298,12 @@ static ssize_t railgate_enable_store(struct device *dev,
if (railgate_enable && !platform->can_railgate) { if (railgate_enable && !platform->can_railgate) {
/* release extra ref count */ /* release extra ref count */
gk20a_idle(dev); gk20a_idle(g);
platform->can_railgate = true; platform->can_railgate = true;
platform->user_railgate_disabled = false; platform->user_railgate_disabled = false;
} else if (railgate_enable == 0 && platform->can_railgate) { } else if (railgate_enable == 0 && platform->can_railgate) {
/* take extra ref count */ /* take extra ref count */
err = gk20a_busy(dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
platform->can_railgate = false; platform->can_railgate = false;
@@ -348,10 +350,10 @@ static ssize_t railgate_delay_store(struct device *dev,
dev_err(dev, "Invalid powergate delay\n"); dev_err(dev, "Invalid powergate delay\n");
/* wake-up system to make rail-gating delay effective immediately */ /* wake-up system to make rail-gating delay effective immediately */
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_idle(g->dev); gk20a_idle(g);
return count; return count;
} }
@@ -417,13 +419,13 @@ static ssize_t gk20a_load_show(struct device *dev,
if (!g->power_on) { if (!g->power_on) {
busy_time = 0; busy_time = 0;
} else { } else {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_pmu_load_update(g); gk20a_pmu_load_update(g);
gk20a_pmu_load_norm(g, &busy_time); gk20a_pmu_load_norm(g, &busy_time);
gk20a_idle(g->dev); gk20a_idle(g);
} }
res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time); res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time);
@@ -445,7 +447,7 @@ static ssize_t elpg_enable_store(struct device *dev,
if (!g->power_on) { if (!g->power_on) {
g->elpg_enabled = val ? true : false; g->elpg_enabled = val ? true : false;
} else { } else {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return -EAGAIN; return -EAGAIN;
/* /*
@@ -468,7 +470,7 @@ static ssize_t elpg_enable_store(struct device *dev,
gk20a_pmu_pg_global_enable(g, false); gk20a_pmu_pg_global_enable(g, false);
} }
} }
gk20a_idle(g->dev); gk20a_idle(g);
} }
dev_info(dev, "ELPG is %s.\n", g->elpg_enabled ? "enabled" : dev_info(dev, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
"disabled"); "disabled");
@@ -500,7 +502,7 @@ static ssize_t mscg_enable_store(struct device *dev,
if (!g->power_on) { if (!g->power_on) {
g->mscg_enabled = val ? true : false; g->mscg_enabled = val ? true : false;
} else { } else {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return -EAGAIN; return -EAGAIN;
/* /*
@@ -532,7 +534,7 @@ static ssize_t mscg_enable_store(struct device *dev,
} }
g->mscg_enabled = false; g->mscg_enabled = false;
} }
gk20a_idle(g->dev); gk20a_idle(g);
} }
dev_info(dev, "MSCG is %s.\n", g->mscg_enabled ? "enabled" : dev_info(dev, "MSCG is %s.\n", g->mscg_enabled ? "enabled" :
"disabled"); "disabled");
@@ -617,7 +619,7 @@ static ssize_t aelpg_enable_store(struct device *dev,
if (kstrtoul(buf, 10, &val) < 0) if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -636,7 +638,7 @@ static ssize_t aelpg_enable_store(struct device *dev,
} else { } else {
dev_info(dev, "PMU is not ready, AELPG request failed\n"); dev_info(dev, "PMU is not ready, AELPG request failed\n");
} }
gk20a_idle(g->dev); gk20a_idle(g);
dev_info(dev, "AELPG is %s.\n", g->aelpg_enabled ? "enabled" : dev_info(dev, "AELPG is %s.\n", g->aelpg_enabled ? "enabled" :
"disabled"); "disabled");
@@ -674,9 +676,9 @@ static ssize_t allow_all_enable_store(struct device *dev,
if (kstrtoul(buf, 10, &val) < 0) if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL; return -EINVAL;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
g->allow_all = (val ? true : false); g->allow_all = (val ? true : false);
gk20a_idle(g->dev); gk20a_idle(g);
return count; return count;
} }
@@ -811,7 +813,7 @@ static ssize_t tpc_fs_mask_read(struct device *dev,
u32 tpc_fs_mask = 0; u32 tpc_fs_mask = 0;
int err = 0; int err = 0;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -822,7 +824,7 @@ static ssize_t tpc_fs_mask_read(struct device *dev,
(gr->max_tpc_per_gpc_count * gpc_index); (gr->max_tpc_per_gpc_count * gpc_index);
} }
gk20a_idle(g->dev); gk20a_idle(g);
return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask); return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask);
} }

View File

@@ -5231,7 +5231,7 @@ int gk20a_pmu_load_update(struct gk20a *g)
void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
u32 *total_cycles) u32 *total_cycles)
{ {
if (!g->power_on || gk20a_busy(g->dev)) { if (!g->power_on || gk20a_busy(g)) {
*busy_cycles = 0; *busy_cycles = 0;
*total_cycles = 0; *total_cycles = 0;
return; return;
@@ -5242,20 +5242,20 @@ void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
rmb(); rmb();
*total_cycles = pwr_pmu_idle_count_value_v( *total_cycles = pwr_pmu_idle_count_value_v(
gk20a_readl(g, pwr_pmu_idle_count_r(2))); gk20a_readl(g, pwr_pmu_idle_count_r(2)));
gk20a_idle(g->dev); gk20a_idle(g);
} }
void gk20a_pmu_reset_load_counters(struct gk20a *g) void gk20a_pmu_reset_load_counters(struct gk20a *g)
{ {
u32 reg_val = pwr_pmu_idle_count_reset_f(1); u32 reg_val = pwr_pmu_idle_count_reset_f(1);
if (!g->power_on || gk20a_busy(g->dev)) if (!g->power_on || gk20a_busy(g))
return; return;
gk20a_writel(g, pwr_pmu_idle_count_r(2), reg_val); gk20a_writel(g, pwr_pmu_idle_count_r(2), reg_val);
wmb(); wmb();
gk20a_writel(g, pwr_pmu_idle_count_r(1), reg_val); gk20a_writel(g, pwr_pmu_idle_count_r(1), reg_val);
gk20a_idle(g->dev); gk20a_idle(g);
} }
void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
@@ -5499,13 +5499,13 @@ static int mscg_stat_show(struct seq_file *s, void *data)
/* Don't unnecessarily power on the device */ /* Don't unnecessarily power on the device */
if (g->power_on) { if (g->power_on) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_pmu_get_pg_stats(g, gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data); PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
gk20a_idle(g->dev); gk20a_idle(g);
} }
total_ingating = g->pg_ingating_time_us + total_ingating = g->pg_ingating_time_us +
(u64)pg_stat_data.ingating_time; (u64)pg_stat_data.ingating_time;
@@ -5557,13 +5557,13 @@ static int mscg_transitions_show(struct seq_file *s, void *data)
int err; int err;
if (g->power_on) { if (g->power_on) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_pmu_get_pg_stats(g, gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data); PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
gk20a_idle(g->dev); gk20a_idle(g);
} }
total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt; total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt;
@@ -5593,13 +5593,13 @@ static int elpg_stat_show(struct seq_file *s, void *data)
/* Don't unnecessarily power on the device */ /* Don't unnecessarily power on the device */
if (g->power_on) { if (g->power_on) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_pmu_get_pg_stats(g, gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_idle(g->dev); gk20a_idle(g);
} }
total_ingating = g->pg_ingating_time_us + total_ingating = g->pg_ingating_time_us +
(u64)pg_stat_data.ingating_time; (u64)pg_stat_data.ingating_time;
@@ -5650,13 +5650,13 @@ static int elpg_transitions_show(struct seq_file *s, void *data)
int err; int err;
if (g->power_on) { if (g->power_on) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_pmu_get_pg_stats(g, gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_idle(g->dev); gk20a_idle(g);
} }
total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt; total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt;
@@ -5772,7 +5772,7 @@ static ssize_t perfmon_events_enable_write(struct file *file,
/* Don't turn on gk20a unnecessarily */ /* Don't turn on gk20a unnecessarily */
if (g->power_on) { if (g->power_on) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -5783,7 +5783,7 @@ static ssize_t perfmon_events_enable_write(struct file *file,
g->pmu.perfmon_sampling_enabled = false; g->pmu.perfmon_sampling_enabled = false;
pmu_perfmon_stop_sampling(&(g->pmu)); pmu_perfmon_stop_sampling(&(g->pmu));
} }
gk20a_idle(g->dev); gk20a_idle(g);
} else { } else {
g->pmu.perfmon_sampling_enabled = val ? true : false; g->pmu.perfmon_sampling_enabled = val ? true : false;
} }

View File

@@ -233,13 +233,13 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
if (!kref_get_unless_zero(&tsg->refcount)) if (!kref_get_unless_zero(&tsg->refcount))
return -ENXIO; return -ENXIO;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto done; goto done;
err = gk20a_tsg_set_timeslice(tsg, arg->timeslice); err = gk20a_tsg_set_timeslice(tsg, arg->timeslice);
gk20a_idle(g->dev); gk20a_idle(g);
done: done:
kref_put(&tsg->refcount, gk20a_tsg_release); kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -266,13 +266,13 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
if (!kref_get_unless_zero(&tsg->refcount)) if (!kref_get_unless_zero(&tsg->refcount))
return -ENXIO; return -ENXIO;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto done; goto done;
err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave); err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave);
gk20a_idle(g->dev); gk20a_idle(g);
done: done:
kref_put(&tsg->refcount, gk20a_tsg_release); kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -389,11 +389,11 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g); gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
if (!sched->sw_ready) { if (!sched->sw_ready) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
goto free_ref; goto free_ref;
gk20a_idle(g->dev); gk20a_idle(g);
} }
if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) { if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) {
@@ -538,7 +538,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
int i; int i;
int err; int err;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -563,7 +563,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
nvgpu_mutex_release(&sched->status_lock); nvgpu_mutex_release(&sched->status_lock);
gk20a_idle(g->dev); gk20a_idle(g);
return 0; return 0;
} }
@@ -597,13 +597,13 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
if (!sched->sw_ready) { if (!sched->sw_ready) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
WARN_ON(err); WARN_ON(err);
return; return;
} }
gk20a_idle(g->dev); gk20a_idle(g);
} }
nvgpu_mutex_acquire(&sched->status_lock); nvgpu_mutex_acquire(&sched->status_lock);

View File

@@ -550,7 +550,7 @@ static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
goto done; goto done;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu"); gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
goto done; goto done;
@@ -558,7 +558,7 @@ static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
err = gk20a_tsg_set_priority(g, tsg, arg->priority); err = gk20a_tsg_set_priority(g, tsg, arg->priority);
gk20a_idle(g->dev); gk20a_idle(g);
done: done:
nvgpu_mutex_release(&sched->control_lock); nvgpu_mutex_release(&sched->control_lock);
return err; return err;
@@ -577,7 +577,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
err = -EPERM; err = -EPERM;
goto done; goto done;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu"); gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
goto done; goto done;
@@ -585,7 +585,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
err = gk20a_tsg_set_runlist_interleave(tsg, arg->level); err = gk20a_tsg_set_runlist_interleave(tsg, arg->level);
gk20a_idle(g->dev); gk20a_idle(g);
done: done:
nvgpu_mutex_release(&sched->control_lock); nvgpu_mutex_release(&sched->control_lock);
return err; return err;
@@ -604,13 +604,13 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
err = -EPERM; err = -EPERM;
goto done; goto done;
} }
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu"); gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
goto done; goto done;
} }
err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us); err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
gk20a_idle(g->dev); gk20a_idle(g);
done: done:
nvgpu_mutex_release(&sched->control_lock); nvgpu_mutex_release(&sched->control_lock);
return err; return err;
@@ -641,11 +641,11 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
} }
if (!g->gr.sw_ready) { if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
gk20a_idle(g->dev); gk20a_idle(g);
} }
switch (cmd) { switch (cmd) {
@@ -668,33 +668,33 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
case NVGPU_IOCTL_TSG_ENABLE: case NVGPU_IOCTL_TSG_ENABLE:
{ {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(g->dev, gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd); "failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err; return err;
} }
gk20a_enable_tsg(tsg); gk20a_enable_tsg(tsg);
gk20a_idle(g->dev); gk20a_idle(g);
break; break;
} }
case NVGPU_IOCTL_TSG_DISABLE: case NVGPU_IOCTL_TSG_DISABLE:
{ {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(g->dev, gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd); "failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err; return err;
} }
gk20a_disable_tsg(tsg); gk20a_disable_tsg(tsg);
gk20a_idle(g->dev); gk20a_idle(g);
break; break;
} }
case NVGPU_IOCTL_TSG_PREEMPT: case NVGPU_IOCTL_TSG_PREEMPT:
{ {
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) { if (err) {
gk20a_err(g->dev, gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd); "failed to host gk20a for ioctl cmd: 0x%x", cmd);
@@ -702,7 +702,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
} }
/* preempt TSG */ /* preempt TSG */
err = g->ops.fifo.preempt_tsg(g, tsg->tsgid); err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
gk20a_idle(g->dev); gk20a_idle(g);
break; break;
} }

View File

@@ -1751,7 +1751,7 @@ static int monitor_get(void *data, u64 *val)
u64 freq = clk->gpc_pll.clk_in; u64 freq = clk->gpc_pll.clk_in;
u32 count1, count2; u32 count1, count2;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -1789,7 +1789,7 @@ static int monitor_get(void *data, u64 *val)
gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save); gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save);
nvgpu_mutex_release(&g->clk.clk_mutex); nvgpu_mutex_release(&g->clk.clk_mutex);
gk20a_idle(g->dev); gk20a_idle(g);
if (count1 != count2) if (count1 != count2)
return -EBUSY; return -EBUSY;
@@ -1807,7 +1807,7 @@ static int voltage_get(void *data, u64 *val)
if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS) if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS)
return -ENOSYS; return -ENOSYS;
err = gk20a_busy(g->dev); err = gk20a_busy(g);
if (err) if (err)
return err; return err;
@@ -1820,7 +1820,7 @@ static int voltage_get(void *data, u64 *val)
nvgpu_mutex_release(&g->clk.clk_mutex); nvgpu_mutex_release(&g->clk.clk_mutex);
gk20a_idle(g->dev); gk20a_idle(g);
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(voltage_fops, voltage_get, NULL, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(voltage_fops, voltage_get, NULL, "%llu\n");