gpu: nvgpu: pass gk20a struct to gk20a_busy

After driver remove, the device structure passed in gk20a_busy can be
invalid. To solve this the prototype of the function is modified to pass
the gk20a struct instead of the device pointer.

bug 200277762
JIRA: EVLR-1023

Change-Id: I08eb74bd3578834d45115098ed9936ebbb436fdf
Signed-off-by: David Nieto <dmartineznie@nvidia.com>
Reviewed-on: http://git-master/r/1320194
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
David Nieto
2017-03-13 18:45:37 -07:00
committed by mobile promotions
parent a84f601fba
commit 2a502bdd5f
16 changed files with 206 additions and 199 deletions

View File

@@ -59,11 +59,11 @@ int gk20a_as_alloc_share(struct gk20a_as *as,
as_share->id = generate_as_share_id(as_share->as);
/* this will set as_share->vm. */
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto failed;
err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags);
gk20a_idle(g->dev);
gk20a_idle(g);
if (err)
goto failed;
@@ -87,14 +87,14 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
gk20a_dbg_fn("");
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto release_fail;
err = gk20a_vm_release_share(as_share);
gk20a_idle(g->dev);
gk20a_idle(g);
release_fail:
release_as_share_id(as_share->as, as_share->id);
@@ -375,7 +375,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -449,7 +449,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
gk20a_idle(g->dev);
gk20a_idle(g);
if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
if (copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)))

View File

@@ -764,6 +764,7 @@ __releases(&cde_app->mutex)
struct gk20a_cde_ctx, ctx_deleter_work);
struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
struct device *dev = cde_ctx->dev;
struct gk20a *g = cde_ctx->g;
int err;
/* someone has just taken it? engine deletion started? */
@@ -773,7 +774,7 @@ __releases(&cde_app->mutex)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
"cde: attempting to delete temporary %p", cde_ctx);
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
/* this context would find new use anyway later, so not freeing
* here does not leak anything */
@@ -801,7 +802,7 @@ __releases(&cde_app->mutex)
out:
nvgpu_mutex_release(&cde_app->mutex);
gk20a_idle(dev);
gk20a_idle(g);
}
static struct gk20a_cde_ctx *gk20a_cde_do_get_context(struct gk20a *g)
@@ -949,7 +950,7 @@ __releases(&cde_app->mutex)
scatterbuffer_byte_offset < compbits_byte_offset)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -1106,7 +1107,7 @@ __releases(&cde_app->mutex)
flags = __flags | NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET;
/* gk20a_cde_execute_buffer() will grab a power reference of it's own */
gk20a_idle(g->dev);
gk20a_idle(g);
/* execute the conversion buffer, combined with init first if it's the
* first time */
@@ -1131,7 +1132,7 @@ exit_unmap_surface:
exit_unmap_vaddr:
gk20a_vm_unmap(cde_ctx->vm, map_vaddr);
exit_idle:
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1283,7 +1284,7 @@ __releases(&cde_app->mutex)
if (!cde_app->initialised)
return -ENOSYS;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -1297,7 +1298,7 @@ __releases(&cde_app->mutex)
nvgpu_mutex_release(&cde_app->mutex);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}

View File

@@ -1230,7 +1230,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
int err;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to release a channel!");
goto channel_release;
@@ -1239,7 +1239,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
trace_gk20a_channel_release(dev_name(g->dev));
gk20a_channel_close(ch);
gk20a_idle(g->dev);
gk20a_idle(g);
channel_release:
gk20a_put(g);
@@ -1395,14 +1395,14 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp, s32 runlist_
goto free_ref;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
goto fail_busy;
}
/* All the user space channel should be non privilege */
ch = gk20a_open_new_channel(g, runlist_id, false);
gk20a_idle(g->dev);
gk20a_idle(g);
if (!ch) {
gk20a_err(dev_from_gk20a(g),
"failed to get f");
@@ -2693,7 +2693,7 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
channel_gk20a_free_job(c, job);
job_finished = 1;
gk20a_idle(g->dev);
gk20a_idle(g);
if (!clean_all) {
/* Timeout isn't supported here so don't touch it. */
@@ -3120,7 +3120,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
return -EINVAL;
/* released by job cleanup via syncpt or sema interrupt */
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s",
current->comm);
@@ -3227,7 +3227,7 @@ clean_up:
gk20a_fence_put(pre_fence);
gk20a_fence_put(post_fence);
if (need_deferred_cleanup)
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -3930,7 +3930,8 @@ long gk20a_channel_ioctl(struct file *filp,
{
struct channel_priv *priv = filp->private_data;
struct channel_gk20a *ch = priv->c;
struct device *dev = ch->g->dev;
struct gk20a *g = ch->g;
struct device *dev = g->dev;
u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0};
int err = 0;
@@ -3967,7 +3968,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
break;
case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -3976,14 +3977,14 @@ long gk20a_channel_ioctl(struct file *filp,
}
err = ch->g->ops.gr.alloc_obj_ctx(ch,
(struct nvgpu_alloc_obj_ctx_args *)buf);
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX:
{
struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args =
(struct nvgpu_alloc_gpfifo_ex_args *)buf;
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -3993,11 +3994,11 @@ long gk20a_channel_ioctl(struct file *filp,
if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) {
err = -EINVAL;
gk20a_idle(dev);
gk20a_idle(g);
break;
}
err = gk20a_alloc_channel_gpfifo(ch, alloc_gpfifo_ex_args);
gk20a_idle(dev);
gk20a_idle(g);
break;
}
case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
@@ -4006,7 +4007,7 @@ long gk20a_channel_ioctl(struct file *filp,
struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args =
(struct nvgpu_alloc_gpfifo_args *)buf;
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4027,7 +4028,7 @@ long gk20a_channel_ioctl(struct file *filp,
alloc_gpfifo_ex_args.flags = alloc_gpfifo_args->flags;
err = gk20a_alloc_channel_gpfifo(ch, &alloc_gpfifo_ex_args);
gk20a_idle(dev);
gk20a_idle(g);
break;
}
case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
@@ -4035,7 +4036,7 @@ long gk20a_channel_ioctl(struct file *filp,
(struct nvgpu_submit_gpfifo_args *)buf);
break;
case NVGPU_IOCTL_CHANNEL_WAIT:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4052,10 +4053,10 @@ long gk20a_channel_ioctl(struct file *filp,
nvgpu_mutex_acquire(&ch->ioctl_lock);
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4064,10 +4065,10 @@ long gk20a_channel_ioctl(struct file *filp,
}
err = gk20a_channel_zcull_bind(ch,
(struct nvgpu_zcull_bind_args *)buf);
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4076,11 +4077,11 @@ long gk20a_channel_ioctl(struct file *filp,
}
err = gk20a_init_error_notifier(ch,
(struct nvgpu_set_error_notifier *)buf);
gk20a_idle(dev);
gk20a_idle(g);
break;
#ifdef CONFIG_GK20A_CYCLE_STATS
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4089,7 +4090,7 @@ long gk20a_channel_ioctl(struct file *filp,
}
err = gk20a_channel_cycle_stats(ch,
(struct nvgpu_cycle_stats_args *)buf);
gk20a_idle(dev);
gk20a_idle(g);
break;
#endif
case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
@@ -4123,7 +4124,7 @@ long gk20a_channel_ioctl(struct file *filp,
ch->has_timedout;
break;
case NVGPU_IOCTL_CHANNEL_SET_PRIORITY:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4133,12 +4134,12 @@ long gk20a_channel_ioctl(struct file *filp,
err = ch->g->ops.fifo.channel_set_priority(ch,
((struct nvgpu_set_priority_args *)buf)->priority);
gk20a_idle(dev);
gk20a_idle(g);
gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_priority, ch);
break;
case NVGPU_IOCTL_CHANNEL_ENABLE:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4149,10 +4150,10 @@ long gk20a_channel_ioctl(struct file *filp,
ch->g->ops.fifo.enable_channel(ch);
else
err = -ENOSYS;
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_DISABLE:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4163,10 +4164,10 @@ long gk20a_channel_ioctl(struct file *filp,
ch->g->ops.fifo.disable_channel(ch);
else
err = -ENOSYS;
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_PREEMPT:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4174,10 +4175,10 @@ long gk20a_channel_ioctl(struct file *filp,
break;
}
err = gk20a_fifo_preempt(ch->g, ch);
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4186,7 +4187,7 @@ long gk20a_channel_ioctl(struct file *filp,
}
err = ch->g->ops.fifo.force_reset_ch(ch,
NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true);
gk20a_idle(dev);
gk20a_idle(g);
break;
case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL:
err = gk20a_channel_event_id_ctrl(ch,
@@ -4194,7 +4195,7 @@ long gk20a_channel_ioctl(struct file *filp,
break;
#ifdef CONFIG_GK20A_CYCLE_STATS
case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4203,7 +4204,7 @@ long gk20a_channel_ioctl(struct file *filp,
}
err = gk20a_channel_cycle_stats_snapshot(ch,
(struct nvgpu_cycle_stats_snapshot_args *)buf);
gk20a_idle(dev);
gk20a_idle(g);
break;
#endif
case NVGPU_IOCTL_CHANNEL_WDT:
@@ -4211,7 +4212,7 @@ long gk20a_channel_ioctl(struct file *filp,
(struct nvgpu_channel_wdt_args *)buf);
break;
case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4221,12 +4222,12 @@ long gk20a_channel_ioctl(struct file *filp,
err = gk20a_channel_set_runlist_interleave(ch,
((struct nvgpu_runlist_interleave_args *)buf)->level);
gk20a_idle(dev);
gk20a_idle(g);
gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_runlist_interleave, ch);
break;
case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE:
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4236,13 +4237,13 @@ long gk20a_channel_ioctl(struct file *filp,
err = ch->g->ops.fifo.channel_set_timeslice(ch,
((struct nvgpu_timeslice_args *)buf)->timeslice_us);
gk20a_idle(dev);
gk20a_idle(g);
gk20a_channel_trace_sched_param(
trace_gk20a_channel_set_timeslice, ch);
break;
case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE:
if (ch->g->ops.gr.set_preemption_mode) {
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4252,7 +4253,7 @@ long gk20a_channel_ioctl(struct file *filp,
err = ch->g->ops.gr.set_preemption_mode(ch,
((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode,
((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode);
gk20a_idle(dev);
gk20a_idle(g);
} else {
err = -EINVAL;
}
@@ -4262,7 +4263,7 @@ long gk20a_channel_ioctl(struct file *filp,
bool boost =
((struct nvgpu_boosted_ctx_args *)buf)->boost;
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
dev_err(dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -4270,7 +4271,7 @@ long gk20a_channel_ioctl(struct file *filp,
break;
}
err = ch->g->ops.gr.set_boosted_ctx(ch, boost);
gk20a_idle(dev);
gk20a_idle(g);
} else {
err = -EINVAL;
}

View File

@@ -759,7 +759,7 @@ static int monitor_get(void *data, u64 *val)
u64 freq = clk->gpc_pll.clk_in;
u32 count1, count2;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -783,7 +783,7 @@ static int monitor_get(void *data, u64 *val)
do_div(freq, ncycle);
*val = freq;
gk20a_idle(g->dev);
gk20a_idle(g);
if (count1 != count2)
return -EBUSY;

View File

@@ -74,10 +74,10 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
priv->g = g;
if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto free_ref;
gk20a_idle(g->dev);
gk20a_idle(g);
}
#ifdef CONFIG_ARCH_TEGRA_18x_SOC
@@ -393,7 +393,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
struct gk20a *g,
struct nvgpu_gpu_mmu_debug_mode_args *args)
{
if (gk20a_busy(g->dev)) {
if (gk20a_busy(g)) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu\n");
return -EINVAL;
}
@@ -402,7 +402,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
g->ops.fb.set_debug_mode(g, args->state == 1);
nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev);
gk20a_idle(g);
return 0;
}
@@ -653,7 +653,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
u32 gpu_timestamp_hi_new = 0;
u32 gpu_timestamp_hi_old = 0;
if (gk20a_busy(g->dev)) {
if (gk20a_busy(g)) {
gk20a_err(dev_from_gk20a(g), "GPU not powered on\n");
err = -EINVAL;
goto end;
@@ -681,7 +681,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
}
end:
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -721,7 +721,7 @@ static int nvgpu_gpu_get_gpu_time(
u64 time;
int err;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -729,7 +729,7 @@ static int nvgpu_gpu_get_gpu_time(
if (!err)
args->gpu_timestamp = time;
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1218,7 +1218,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_VOLTAGE))
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -1236,7 +1236,7 @@ static int nvgpu_gpu_get_voltage(struct gk20a *g,
err = -EINVAL;
}
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1254,13 +1254,13 @@ static int nvgpu_gpu_get_current(struct gk20a *g,
if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_CURRENT))
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
err = pmgr_pwr_devices_get_current(g, &args->currnt);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1278,13 +1278,13 @@ static int nvgpu_gpu_get_power(struct gk20a *g,
if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_SUPPORT_GET_POWER))
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
err = pmgr_pwr_devices_get_power(g, &args->power);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1303,13 +1303,13 @@ static int nvgpu_gpu_get_temperature(struct gk20a *g,
if (!g->ops.therm.get_internal_sensor_curr_temp)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
err = g->ops.therm.get_internal_sensor_curr_temp(g, &temp_f24_8);
gk20a_idle(g->dev);
gk20a_idle(g);
args->temp_f24_8 = (s32)temp_f24_8;
@@ -1330,13 +1330,13 @@ static int nvgpu_gpu_set_therm_alert_limit(struct gk20a *g,
if (!g->ops.therm.configure_therm_alert)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
err = g->ops.therm.configure_therm_alert(g, args->temp_f24_8);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1371,11 +1371,11 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
}
if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_idle(g->dev);
gk20a_idle(g);
}
switch (cmd) {
@@ -1439,11 +1439,11 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
}
if (!err) {
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (!err) {
err = g->ops.gr.zbc_set_table(g, &g->gr,
zbc_val);
gk20a_idle(dev);
gk20a_idle(g);
}
}

View File

@@ -249,7 +249,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "");
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -259,7 +259,7 @@ static int gk20a_ctxsw_dev_ioctl_poll(struct gk20a_ctxsw_dev *dev)
if (likely(!err))
err = g->ops.fecs_trace.poll(g);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -287,7 +287,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp)
goto free_ref;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto free_ref;
@@ -330,7 +330,7 @@ done:
nvgpu_mutex_release(&dev->write_lock);
idle:
gk20a_idle(g->dev);
gk20a_idle(g);
free_ref:
if (err)
gk20a_put(g);

View File

@@ -766,14 +766,14 @@ static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(
if (sm_id >= gr->no_of_sm)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
err = gr_gk20a_elpg_protected_call(g,
g->ops.gr.clear_sm_error_state(g, ch, sm_id));
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -819,7 +819,7 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
}
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto err_free;
@@ -827,7 +827,7 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
g->ops.gr.update_sm_error_state(g, ch,
sm_id, sm_error_state));
gk20a_idle(g->dev);
gk20a_idle(g);
err_free:
kfree(sm_error_state);
@@ -843,7 +843,7 @@ nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
int err = 0;
int ctx_resident_ch_fd = -1;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -866,7 +866,7 @@ nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
args->resident_context_fd = ctx_resident_ch_fd;
}
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -905,7 +905,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
size = args->size;
offset = 0;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto fail_free_buffer;
@@ -940,7 +940,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
}
fail_idle:
gk20a_idle(g->dev);
gk20a_idle(g);
fail_free_buffer:
nvgpu_big_free(g, buffer);
fail_dmabuf_put:
@@ -972,11 +972,11 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
}
if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_idle(g->dev);
gk20a_idle(g);
}
/* protect from threaded user space calls */
@@ -1283,7 +1283,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
(g->dbg_powergating_disabled_refcount++ == 0)) {
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -1338,7 +1338,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
gk20a_pmu_pg_global_enable(g, true);
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
gk20a_idle(g->dev);
gk20a_idle(g);
}
dbg_s->is_pg_disabled = false;
@@ -1381,7 +1381,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
gk20a_dbg_fn("%s smpc ctxsw mode = %d",
dev_name(dbg_s->dev), args->mode);
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err;
@@ -1409,7 +1409,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
err = g->ops.regops.apply_smpc_war(dbg_s);
clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1433,8 +1433,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
"session doesn't have a valid reservation");
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err;
@@ -1462,7 +1461,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
*/
clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1480,7 +1479,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
if (!ch)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err;
@@ -1512,7 +1511,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}
@@ -1807,7 +1806,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
goto fail_unmap;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
goto fail_unmap;
@@ -1830,7 +1829,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
perf_pmasys_mem_block_valid_true_f() |
perf_pmasys_mem_block_target_lfb_f());
gk20a_idle(g->dev);
gk20a_idle(g);
return 0;
@@ -1848,7 +1847,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
if (!g->allow_all)
return -EACCES;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to poweron");
return err;
@@ -1864,7 +1863,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
perf_pmasys_mem_block_valid_false_f() |
perf_pmasys_mem_block_target_f(0));
gk20a_idle(g->dev);
gk20a_idle(g);
gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset, NULL);

View File

@@ -136,13 +136,14 @@ int gk20a_gr_debug_dump(struct device *dev)
static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
{
struct device *dev = s->private;
struct gk20a *g = gk20a_get_platform(dev)->g;
struct gk20a_debug_output o = {
.fn = gk20a_debug_write_to_seqfile,
.ctx = s,
};
int err;
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev, "failed to power on gpu: %d", err);
return -EINVAL;
@@ -150,7 +151,7 @@ static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
gk20a_gr_dump_regs(dev, &o);
gk20a_idle(dev);
gk20a_idle(g);
return 0;
}
@@ -183,7 +184,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused)
g = gk20a_get_platform(dev)->g;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(g->dev, "failed to power on gpu: %d", err);
return -EFAULT;
@@ -193,7 +194,7 @@ static int gk20a_debug_show(struct seq_file *s, void *unused)
if (g->ops.debug.show_dump)
g->ops.debug.show_dump(g, &o);
gk20a_idle(g->dev);
gk20a_idle(g);
return 0;
}

View File

@@ -332,7 +332,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g)
int cnt;
int err;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (unlikely(err))
return err;
@@ -372,7 +372,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g)
done:
nvgpu_mutex_release(&trace->poll_lock);
gk20a_idle(g->dev);
gk20a_idle(g);
return err;
}

View File

@@ -1434,24 +1434,26 @@ static int gk20a_can_busy(struct gk20a *g)
return 1;
}
int gk20a_busy(struct device *dev)
int gk20a_busy(struct gk20a *g)
{
int ret = 0;
struct gk20a *g;
struct gk20a_platform *platform;
struct device *dev;
if (!dev)
return -ENODEV;
g = get_gk20a(dev);
platform = gk20a_get_platform(dev);
if (!g || !gk20a_can_busy(g))
if (!g)
return -ENODEV;
atomic_inc(&g->usage_count);
down_read(&g->busy_lock);
if (!gk20a_can_busy(g)) {
ret = -ENODEV;
atomic_dec(&g->usage_count);
goto fail;
}
dev = g->dev;
if (pm_runtime_enabled(dev)) {
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -1484,22 +1486,21 @@ void gk20a_idle_nosuspend(struct device *dev)
pm_runtime_put_noidle(dev);
}
void gk20a_idle(struct device *dev)
void gk20a_idle(struct gk20a *g)
{
struct gk20a_platform *platform;
struct gk20a *g;
if (!dev)
return;
g = get_gk20a(dev);
platform = gk20a_get_platform(dev);
struct device *dev;
atomic_dec(&g->usage_count);
down_read(&g->busy_lock);
dev = g->dev;
if (!(dev && gk20a_can_busy(g)))
goto fail;
if (pm_runtime_enabled(dev)) {
#ifdef CONFIG_PM
if (atomic_read(&dev->power.usage_count) == 1)
if (atomic_read(&g->dev->power.usage_count) == 1)
gk20a_scale_notify_idle(dev);
#endif
@@ -1509,6 +1510,8 @@ void gk20a_idle(struct device *dev)
} else {
gk20a_scale_notify_idle(dev);
}
fail:
up_read(&g->busy_lock);
}
void gk20a_disable(struct gk20a *g, u32 units)

View File

@@ -1391,8 +1391,8 @@ void gk20a_remove_sysfs(struct device *dev);
void gk20a_busy_noresume(struct device *dev);
void gk20a_idle_nosuspend(struct device *dev);
int __must_check gk20a_busy(struct device *dev);
void gk20a_idle(struct device *dev);
int __must_check gk20a_busy(struct gk20a *g);
void gk20a_idle(struct gk20a *g);
void gk20a_disable(struct gk20a *g, u32 units);
void gk20a_enable(struct gk20a *g, u32 units);
void gk20a_reset(struct gk20a *g, u32 units);

View File

@@ -48,7 +48,7 @@ static ssize_t elcg_enable_store(struct device *dev,
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -60,7 +60,7 @@ static ssize_t elcg_enable_store(struct device *dev,
gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
}
gk20a_idle(g->dev);
gk20a_idle(g);
dev_info(dev, "ELCG is %s.\n", g->elcg_enabled ? "enabled" :
"disabled");
@@ -93,7 +93,7 @@ static ssize_t blcg_enable_store(struct device *dev,
else
g->blcg_enabled = false;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -117,7 +117,7 @@ static ssize_t blcg_enable_store(struct device *dev,
if (g->ops.clock_gating.blcg_xbar_load_gating_prod)
g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
g->blcg_enabled);
gk20a_idle(g->dev);
gk20a_idle(g);
dev_info(dev, "BLCG is %s.\n", g->blcg_enabled ? "enabled" :
"disabled");
@@ -156,7 +156,7 @@ static ssize_t slcg_enable_store(struct device *dev,
* init. Therefore, it would be incongruous to add it here. Once
* it is added to init, we should add it here too.
*/
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -184,7 +184,7 @@ static ssize_t slcg_enable_store(struct device *dev,
g->ops.clock_gating.slcg_pmu_load_gating_prod(g, g->slcg_enabled);
if (g->ops.clock_gating.slcg_xbar_load_gating_prod)
g->ops.clock_gating.slcg_xbar_load_gating_prod(g, g->slcg_enabled);
gk20a_idle(g->dev);
gk20a_idle(g);
dev_info(dev, "SLCG is %s.\n", g->slcg_enabled ? "enabled" :
"disabled");
@@ -289,6 +289,8 @@ static ssize_t railgate_enable_store(struct device *dev,
{
struct gk20a_platform *platform = dev_get_drvdata(dev);
unsigned long railgate_enable = 0;
/* dev is guaranteed to be valid here. Ok to de-reference */
struct gk20a *g = get_gk20a(dev);
int err = 0;
if (kstrtoul(buf, 10, &railgate_enable) < 0)
@@ -296,12 +298,12 @@ static ssize_t railgate_enable_store(struct device *dev,
if (railgate_enable && !platform->can_railgate) {
/* release extra ref count */
gk20a_idle(dev);
gk20a_idle(g);
platform->can_railgate = true;
platform->user_railgate_disabled = false;
} else if (railgate_enable == 0 && platform->can_railgate) {
/* take extra ref count */
err = gk20a_busy(dev);
err = gk20a_busy(g);
if (err)
return err;
platform->can_railgate = false;
@@ -348,10 +350,10 @@ static ssize_t railgate_delay_store(struct device *dev,
dev_err(dev, "Invalid powergate delay\n");
/* wake-up system to make rail-gating delay effective immediately */
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_idle(g->dev);
gk20a_idle(g);
return count;
}
@@ -417,13 +419,13 @@ static ssize_t gk20a_load_show(struct device *dev,
if (!g->power_on) {
busy_time = 0;
} else {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_pmu_load_update(g);
gk20a_pmu_load_norm(g, &busy_time);
gk20a_idle(g->dev);
gk20a_idle(g);
}
res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time);
@@ -445,7 +447,7 @@ static ssize_t elpg_enable_store(struct device *dev,
if (!g->power_on) {
g->elpg_enabled = val ? true : false;
} else {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return -EAGAIN;
/*
@@ -468,7 +470,7 @@ static ssize_t elpg_enable_store(struct device *dev,
gk20a_pmu_pg_global_enable(g, false);
}
}
gk20a_idle(g->dev);
gk20a_idle(g);
}
dev_info(dev, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
"disabled");
@@ -500,7 +502,7 @@ static ssize_t mscg_enable_store(struct device *dev,
if (!g->power_on) {
g->mscg_enabled = val ? true : false;
} else {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return -EAGAIN;
/*
@@ -532,7 +534,7 @@ static ssize_t mscg_enable_store(struct device *dev,
}
g->mscg_enabled = false;
}
gk20a_idle(g->dev);
gk20a_idle(g);
}
dev_info(dev, "MSCG is %s.\n", g->mscg_enabled ? "enabled" :
"disabled");
@@ -617,7 +619,7 @@ static ssize_t aelpg_enable_store(struct device *dev,
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -636,7 +638,7 @@ static ssize_t aelpg_enable_store(struct device *dev,
} else {
dev_info(dev, "PMU is not ready, AELPG request failed\n");
}
gk20a_idle(g->dev);
gk20a_idle(g);
dev_info(dev, "AELPG is %s.\n", g->aelpg_enabled ? "enabled" :
"disabled");
@@ -674,9 +676,9 @@ static ssize_t allow_all_enable_store(struct device *dev,
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
g->allow_all = (val ? true : false);
gk20a_idle(g->dev);
gk20a_idle(g);
return count;
}
@@ -811,7 +813,7 @@ static ssize_t tpc_fs_mask_read(struct device *dev,
u32 tpc_fs_mask = 0;
int err = 0;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -822,7 +824,7 @@ static ssize_t tpc_fs_mask_read(struct device *dev,
(gr->max_tpc_per_gpc_count * gpc_index);
}
gk20a_idle(g->dev);
gk20a_idle(g);
return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask);
}

View File

@@ -5231,7 +5231,7 @@ int gk20a_pmu_load_update(struct gk20a *g)
void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
u32 *total_cycles)
{
if (!g->power_on || gk20a_busy(g->dev)) {
if (!g->power_on || gk20a_busy(g)) {
*busy_cycles = 0;
*total_cycles = 0;
return;
@@ -5242,20 +5242,20 @@ void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
rmb();
*total_cycles = pwr_pmu_idle_count_value_v(
gk20a_readl(g, pwr_pmu_idle_count_r(2)));
gk20a_idle(g->dev);
gk20a_idle(g);
}
void gk20a_pmu_reset_load_counters(struct gk20a *g)
{
u32 reg_val = pwr_pmu_idle_count_reset_f(1);
if (!g->power_on || gk20a_busy(g->dev))
if (!g->power_on || gk20a_busy(g))
return;
gk20a_writel(g, pwr_pmu_idle_count_r(2), reg_val);
wmb();
gk20a_writel(g, pwr_pmu_idle_count_r(1), reg_val);
gk20a_idle(g->dev);
gk20a_idle(g);
}
void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
@@ -5499,13 +5499,13 @@ static int mscg_stat_show(struct seq_file *s, void *data)
/* Don't unnecessarily power on the device */
if (g->power_on) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
gk20a_idle(g->dev);
gk20a_idle(g);
}
total_ingating = g->pg_ingating_time_us +
(u64)pg_stat_data.ingating_time;
@@ -5557,13 +5557,13 @@ static int mscg_transitions_show(struct seq_file *s, void *data)
int err;
if (g->power_on) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
gk20a_idle(g->dev);
gk20a_idle(g);
}
total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt;
@@ -5593,13 +5593,13 @@ static int elpg_stat_show(struct seq_file *s, void *data)
/* Don't unnecessarily power on the device */
if (g->power_on) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_idle(g->dev);
gk20a_idle(g);
}
total_ingating = g->pg_ingating_time_us +
(u64)pg_stat_data.ingating_time;
@@ -5650,13 +5650,13 @@ static int elpg_transitions_show(struct seq_file *s, void *data)
int err;
if (g->power_on) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_idle(g->dev);
gk20a_idle(g);
}
total_gating_cnt = g->pg_gating_cnt + pg_stat_data.gating_cnt;
@@ -5772,7 +5772,7 @@ static ssize_t perfmon_events_enable_write(struct file *file,
/* Don't turn on gk20a unnecessarily */
if (g->power_on) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -5783,7 +5783,7 @@ static ssize_t perfmon_events_enable_write(struct file *file,
g->pmu.perfmon_sampling_enabled = false;
pmu_perfmon_stop_sampling(&(g->pmu));
}
gk20a_idle(g->dev);
gk20a_idle(g);
} else {
g->pmu.perfmon_sampling_enabled = val ? true : false;
}

View File

@@ -233,13 +233,13 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice(
if (!kref_get_unless_zero(&tsg->refcount))
return -ENXIO;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto done;
err = gk20a_tsg_set_timeslice(tsg, arg->timeslice);
gk20a_idle(g->dev);
gk20a_idle(g);
done:
kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -266,13 +266,13 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave(
if (!kref_get_unless_zero(&tsg->refcount))
return -ENXIO;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto done;
err = gk20a_tsg_set_runlist_interleave(tsg, arg->runlist_interleave);
gk20a_idle(g->dev);
gk20a_idle(g);
done:
kref_put(&tsg->refcount, gk20a_tsg_release);
@@ -389,11 +389,11 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p", g);
if (!sched->sw_ready) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
goto free_ref;
gk20a_idle(g->dev);
gk20a_idle(g);
}
if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) {
@@ -538,7 +538,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
int i;
int err;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -563,7 +563,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused)
nvgpu_mutex_release(&sched->status_lock);
gk20a_idle(g->dev);
gk20a_idle(g);
return 0;
}
@@ -597,13 +597,13 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
if (!sched->sw_ready) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
WARN_ON(err);
return;
}
gk20a_idle(g->dev);
gk20a_idle(g);
}
nvgpu_mutex_acquire(&sched->status_lock);

View File

@@ -550,7 +550,7 @@ static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
goto done;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
goto done;
@@ -558,7 +558,7 @@ static int gk20a_tsg_ioctl_set_priority(struct gk20a *g,
err = gk20a_tsg_set_priority(g, tsg, arg->priority);
gk20a_idle(g->dev);
gk20a_idle(g);
done:
nvgpu_mutex_release(&sched->control_lock);
return err;
@@ -577,7 +577,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
err = -EPERM;
goto done;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
goto done;
@@ -585,7 +585,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g,
err = gk20a_tsg_set_runlist_interleave(tsg, arg->level);
gk20a_idle(g->dev);
gk20a_idle(g);
done:
nvgpu_mutex_release(&sched->control_lock);
return err;
@@ -604,13 +604,13 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g,
err = -EPERM;
goto done;
}
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to power on gpu");
goto done;
}
err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us);
gk20a_idle(g->dev);
gk20a_idle(g);
done:
nvgpu_mutex_release(&sched->control_lock);
return err;
@@ -641,11 +641,11 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
}
if (!g->gr.sw_ready) {
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
gk20a_idle(g->dev);
gk20a_idle(g);
}
switch (cmd) {
@@ -668,33 +668,33 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
case NVGPU_IOCTL_TSG_ENABLE:
{
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err;
}
gk20a_enable_tsg(tsg);
gk20a_idle(g->dev);
gk20a_idle(g);
break;
}
case NVGPU_IOCTL_TSG_DISABLE:
{
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd);
return err;
}
gk20a_disable_tsg(tsg);
gk20a_idle(g->dev);
gk20a_idle(g);
break;
}
case NVGPU_IOCTL_TSG_PREEMPT:
{
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err) {
gk20a_err(g->dev,
"failed to host gk20a for ioctl cmd: 0x%x", cmd);
@@ -702,7 +702,7 @@ long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
}
/* preempt TSG */
err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
gk20a_idle(g->dev);
gk20a_idle(g);
break;
}

View File

@@ -1751,7 +1751,7 @@ static int monitor_get(void *data, u64 *val)
u64 freq = clk->gpc_pll.clk_in;
u32 count1, count2;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -1789,7 +1789,7 @@ static int monitor_get(void *data, u64 *val)
gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save);
nvgpu_mutex_release(&g->clk.clk_mutex);
gk20a_idle(g->dev);
gk20a_idle(g);
if (count1 != count2)
return -EBUSY;
@@ -1807,7 +1807,7 @@ static int voltage_get(void *data, u64 *val)
if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS)
return -ENOSYS;
err = gk20a_busy(g->dev);
err = gk20a_busy(g);
if (err)
return err;
@@ -1820,7 +1820,7 @@ static int voltage_get(void *data, u64 *val)
nvgpu_mutex_release(&g->clk.clk_mutex);
gk20a_idle(g->dev);
gk20a_idle(g);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(voltage_fops, voltage_get, NULL, "%llu\n");