gpu: nvgpu: Register as subdomain of host1x

Add gk20a as a sub power domain of host1x. This enforces keeping
host1x on when using gk20a.

Bug 200003112

Change-Id: I08db595bc7b819d86d33fb98af0d8fb4de369463
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Reviewed-on: http://git-master/r/407543
Reviewed-by: Riham Haidar <rhaidar@nvidia.com>
Tested-by: Riham Haidar <rhaidar@nvidia.com>
This commit is contained in:
Terje Bergstrom
2014-05-07 13:09:36 +03:00
committed by Dan Willemsen
parent 4d93f77745
commit 4ac110cb8a
9 changed files with 50 additions and 137 deletions

View File

@@ -227,7 +227,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT; return -EFAULT;
} }
err = gk20a_channel_busy(g->dev); err = gk20a_busy(g->dev);
if (err) if (err)
return err; return err;
@@ -284,7 +284,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break; break;
} }
gk20a_channel_idle(g->dev); gk20a_idle(g->dev);
if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));

View File

@@ -697,14 +697,14 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
trace_gk20a_channel_release(dev_name(&g->dev->dev)); trace_gk20a_channel_release(dev_name(&g->dev->dev));
err = gk20a_channel_busy(ch->g->dev); err = gk20a_busy(ch->g->dev);
if (err) { if (err) {
gk20a_err(dev_from_gk20a(g), "failed to release channel %d", gk20a_err(dev_from_gk20a(g), "failed to release channel %d",
ch->hw_chid); ch->hw_chid);
return err; return err;
} }
gk20a_free_channel(ch, true); gk20a_free_channel(ch, true);
gk20a_channel_idle(ch->g->dev); gk20a_idle(ch->g->dev);
gk20a_put_client(g); gk20a_put_client(g);
filp->private_data = NULL; filp->private_data = NULL;
@@ -767,14 +767,14 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp)
return err; return err;
} }
err = gk20a_channel_busy(g->dev); err = gk20a_busy(g->dev);
if (err) { if (err) {
gk20a_put_client(g); gk20a_put_client(g);
gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err); gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
return err; return err;
} }
ch = gk20a_open_new_channel(g); ch = gk20a_open_new_channel(g);
gk20a_channel_idle(g->dev); gk20a_idle(g->dev);
if (!ch) { if (!ch) {
gk20a_put_client(g); gk20a_put_client(g);
gk20a_err(dev_from_gk20a(g), gk20a_err(dev_from_gk20a(g),
@@ -1408,12 +1408,12 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
list_del_init(&job->list); list_del_init(&job->list);
kfree(job); kfree(job);
gk20a_channel_idle(g->dev); gk20a_idle(g->dev);
} }
mutex_unlock(&c->jobs_lock); mutex_unlock(&c->jobs_lock);
for (i = 0; i < nr_completed; i++) for (i = 0; i < nr_completed; i++)
gk20a_channel_idle(c->g->dev); gk20a_idle(c->g->dev);
} }
static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
@@ -1455,7 +1455,7 @@ static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
gk20a_dbg_info("channel %d", c->hw_chid); gk20a_dbg_info("channel %d", c->hw_chid);
/* gk20a_channel_update releases this ref. */ /* gk20a_channel_update releases this ref. */
err = gk20a_channel_busy(g->dev); err = gk20a_busy(g->dev);
if (err) { if (err) {
gk20a_err(d, "failed to host gk20a to submit gpfifo"); gk20a_err(d, "failed to host gk20a to submit gpfifo");
return err; return err;
@@ -1606,7 +1606,7 @@ clean_up:
gk20a_err(d, "fail"); gk20a_err(d, "fail");
free_priv_cmdbuf(c, wait_cmd); free_priv_cmdbuf(c, wait_cmd);
free_priv_cmdbuf(c, incr_cmd); free_priv_cmdbuf(c, incr_cmd);
gk20a_channel_idle(g->dev); gk20a_idle(g->dev);
return err; return err;
} }
@@ -1856,7 +1856,6 @@ int gk20a_channel_suspend(struct gk20a *g)
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *c = &f->channel[chid]; struct channel_gk20a *c = &f->channel[chid];
if (c->in_use && c->obj_class != KEPLER_C) { if (c->in_use && c->obj_class != KEPLER_C) {
gk20a_platform_channel_busy(g->dev);
err = gk20a_channel_submit_wfi(c); err = gk20a_channel_submit_wfi(c);
if (err) { if (err) {
gk20a_err(d, "cannot idle channel %d\n", gk20a_err(d, "cannot idle channel %d\n",
@@ -1866,7 +1865,6 @@ int gk20a_channel_suspend(struct gk20a *g)
c->sync->wait_cpu(c->sync, &c->last_submit_fence, c->sync->wait_cpu(c->sync, &c->last_submit_fence,
500000); 500000);
gk20a_platform_channel_idle(g->dev);
break; break;
} }
} }
@@ -2037,7 +2035,7 @@ long gk20a_channel_ioctl(struct file *filp,
case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD: case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
break; break;
case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX: case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2046,10 +2044,10 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_alloc_obj_ctx(ch, err = gk20a_alloc_obj_ctx(ch,
(struct nvhost_alloc_obj_ctx_args *)buf); (struct nvhost_alloc_obj_ctx_args *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX: case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2058,10 +2056,10 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_free_obj_ctx(ch, err = gk20a_free_obj_ctx(ch,
(struct nvhost_free_obj_ctx_args *)buf); (struct nvhost_free_obj_ctx_args *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO: case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2070,14 +2068,14 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_alloc_channel_gpfifo(ch, err = gk20a_alloc_channel_gpfifo(ch,
(struct nvhost_alloc_gpfifo_args *)buf); (struct nvhost_alloc_gpfifo_args *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO: case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
err = gk20a_ioctl_channel_submit_gpfifo(ch, err = gk20a_ioctl_channel_submit_gpfifo(ch,
(struct nvhost_submit_gpfifo_args *)buf); (struct nvhost_submit_gpfifo_args *)buf);
break; break;
case NVHOST_IOCTL_CHANNEL_WAIT: case NVHOST_IOCTL_CHANNEL_WAIT:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2086,10 +2084,10 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_channel_wait(ch, err = gk20a_channel_wait(ch,
(struct nvhost_wait_args *)buf); (struct nvhost_wait_args *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
case NVHOST_IOCTL_CHANNEL_ZCULL_BIND: case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2098,10 +2096,10 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_channel_zcull_bind(ch, err = gk20a_channel_zcull_bind(ch,
(struct nvhost_zcull_bind_args *)buf); (struct nvhost_zcull_bind_args *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2110,11 +2108,11 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_init_error_notifier(ch, err = gk20a_init_error_notifier(ch,
(struct nvhost_set_error_notifier *)buf); (struct nvhost_set_error_notifier *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
#ifdef CONFIG_GK20A_CYCLE_STATS #ifdef CONFIG_GK20A_CYCLE_STATS
case NVHOST_IOCTL_CHANNEL_CYCLE_STATS: case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2123,7 +2121,7 @@ long gk20a_channel_ioctl(struct file *filp,
} }
err = gk20a_channel_cycle_stats(ch, err = gk20a_channel_cycle_stats(ch,
(struct nvhost_cycle_stats_args *)buf); (struct nvhost_cycle_stats_args *)buf);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
#endif #endif
case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT: case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
@@ -2153,7 +2151,7 @@ long gk20a_channel_ioctl(struct file *filp,
ch->has_timedout; ch->has_timedout;
break; break;
case NVHOST_IOCTL_CHANNEL_SET_PRIORITY: case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
err = gk20a_channel_busy(dev); err = gk20a_busy(dev);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
"%s: failed to host gk20a for ioctl cmd: 0x%x", "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2162,7 +2160,7 @@ long gk20a_channel_ioctl(struct file *filp,
} }
gk20a_channel_set_priority(ch, gk20a_channel_set_priority(ch,
((struct nvhost_set_priority_args *)buf)->priority); ((struct nvhost_set_priority_args *)buf)->priority);
gk20a_channel_idle(dev); gk20a_idle(dev);
break; break;
default: default:
dev_err(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd); dev_err(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd);

View File

@@ -227,14 +227,14 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
if (register_irq) { if (register_irq) {
/* nvhost action_gpfifo_submit_complete releases this ref. */ /* nvhost action_gpfifo_submit_complete releases this ref. */
err = gk20a_channel_busy(c->g->dev); err = gk20a_busy(c->g->dev);
if (!err) { if (!err) {
err = nvhost_intr_register_notifier(sp->host1x_pdev, err = nvhost_intr_register_notifier(sp->host1x_pdev,
sp->id, thresh, sp->id, thresh,
gk20a_channel_syncpt_update, c); gk20a_channel_syncpt_update, c);
if (err) if (err)
gk20a_channel_idle(c->g->dev); gk20a_idle(c->g->dev);
} }
/* Adding interrupt action should never fail. A proper error /* Adding interrupt action should never fail. A proper error

View File

@@ -555,7 +555,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy"); gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
gk20a_busy(g->dev); gk20a_busy(g->dev);
err = gk20a_channel_busy(dbg_s->pdev); err = gk20a_busy(dbg_s->pdev);
if (err) if (err)
return -EPERM; return -EPERM;
@@ -600,7 +600,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
gk20a_pmu_enable_elpg(g); gk20a_pmu_enable_elpg(g);
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
gk20a_channel_idle(dbg_s->pdev); gk20a_idle(dbg_s->pdev);
gk20a_idle(g->dev); gk20a_idle(g->dev);
} }

View File

@@ -739,10 +739,10 @@ void gk20a_put_client(struct gk20a *g)
WARN_ON(g->client_refcount < 0); WARN_ON(g->client_refcount < 0);
} }
static int gk20a_pm_prepare_poweroff(struct device *_dev) static int gk20a_pm_prepare_poweroff(struct device *dev)
{ {
struct platform_device *dev = to_platform_device(_dev); struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(pdev);
int ret = 0; int ret = 0;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -767,6 +767,7 @@ static int gk20a_pm_prepare_poweroff(struct device *_dev)
/* Disable GPCPLL */ /* Disable GPCPLL */
ret |= gk20a_suspend_clk_support(g); ret |= gk20a_suspend_clk_support(g);
g->power_on = false; g->power_on = false;
return ret; return ret;
@@ -790,10 +791,10 @@ static void gk20a_detect_chip(struct gk20a *g)
g->gpu_characteristics.rev); g->gpu_characteristics.rev);
} }
static int gk20a_pm_finalize_poweron(struct device *_dev) static int gk20a_pm_finalize_poweron(struct device *dev)
{ {
struct platform_device *dev = to_platform_device(_dev); struct platform_device *pdev = to_platform_device(dev);
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(pdev);
int err, nice_value; int err, nice_value;
gk20a_dbg_fn(""); gk20a_dbg_fn("");
@@ -846,7 +847,7 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
saving features (blcg/slcg) are enabled. For now, do it here. */ saving features (blcg/slcg) are enabled. For now, do it here. */
err = gk20a_init_clk_support(g); err = gk20a_init_clk_support(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a clk"); gk20a_err(dev, "failed to init gk20a clk");
goto done; goto done;
} }
@@ -865,49 +866,49 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
err = gk20a_init_fifo_reset_enable_hw(g); err = gk20a_init_fifo_reset_enable_hw(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to reset gk20a fifo"); gk20a_err(dev, "failed to reset gk20a fifo");
goto done; goto done;
} }
err = gk20a_init_mm_support(g); err = gk20a_init_mm_support(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a mm"); gk20a_err(dev, "failed to init gk20a mm");
goto done; goto done;
} }
err = gk20a_init_pmu_support(g); err = gk20a_init_pmu_support(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a pmu"); gk20a_err(dev, "failed to init gk20a pmu");
goto done; goto done;
} }
err = gk20a_init_fifo_support(g); err = gk20a_init_fifo_support(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a fifo"); gk20a_err(dev, "failed to init gk20a fifo");
goto done; goto done;
} }
err = gk20a_init_gr_support(g); err = gk20a_init_gr_support(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a gr"); gk20a_err(dev, "failed to init gk20a gr");
goto done; goto done;
} }
err = gk20a_init_pmu_setup_hw2(g); err = gk20a_init_pmu_setup_hw2(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a pmu_hw2"); gk20a_err(dev, "failed to init gk20a pmu_hw2");
goto done; goto done;
} }
err = gk20a_init_therm_support(g); err = gk20a_init_therm_support(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a therm"); gk20a_err(dev, "failed to init gk20a therm");
goto done; goto done;
} }
err = gk20a_init_gpu_characteristics(g); err = gk20a_init_gpu_characteristics(g);
if (err) { if (err) {
gk20a_err(&dev->dev, "failed to init gk20a gpu characteristics"); gk20a_err(dev, "failed to init gk20a gpu characteristics");
goto done; goto done;
} }
@@ -1507,27 +1508,6 @@ void gk20a_busy_noresume(struct platform_device *pdev)
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
} }
int gk20a_channel_busy(struct platform_device *pdev)
{
int ret = 0;
ret = gk20a_platform_channel_busy(pdev);
if (ret)
return ret;
ret = gk20a_busy(pdev);
if (ret)
gk20a_platform_channel_idle(pdev);
return ret;
}
void gk20a_channel_idle(struct platform_device *pdev)
{
gk20a_idle(pdev);
gk20a_platform_channel_idle(pdev);
}
int gk20a_busy(struct platform_device *pdev) int gk20a_busy(struct platform_device *pdev)
{ {
int ret = 0; int ret = 0;

View File

@@ -549,8 +549,6 @@ int clk_gk20a_debugfs_init(struct platform_device *dev);
void gk20a_busy_noresume(struct platform_device *pdev); void gk20a_busy_noresume(struct platform_device *pdev);
int gk20a_busy(struct platform_device *pdev); int gk20a_busy(struct platform_device *pdev);
void gk20a_idle(struct platform_device *pdev); void gk20a_idle(struct platform_device *pdev);
int gk20a_channel_busy(struct platform_device *pdev);
void gk20a_channel_idle(struct platform_device *pdev);
void gk20a_disable(struct gk20a *g, u32 units); void gk20a_disable(struct gk20a *g, u32 units);
void gk20a_enable(struct gk20a *g, u32 units); void gk20a_enable(struct gk20a *g, u32 units);
void gk20a_reset(struct gk20a *g, u32 units); void gk20a_reset(struct gk20a *g, u32 units);

View File

@@ -279,7 +279,7 @@ static ssize_t elpg_enable_store(struct device *device,
* Since elpg is refcounted, we should not unnecessarily call * Since elpg is refcounted, we should not unnecessarily call
* enable/disable if it is already so. * enable/disable if it is already so.
*/ */
err = gk20a_channel_busy(g->dev); err = gk20a_busy(g->dev);
if (err) if (err)
return -EAGAIN; return -EAGAIN;
@@ -290,7 +290,7 @@ static ssize_t elpg_enable_store(struct device *device,
g->elpg_enabled = false; g->elpg_enabled = false;
gk20a_pmu_disable_elpg(g); gk20a_pmu_disable_elpg(g);
} }
gk20a_channel_idle(g->dev); gk20a_idle(g->dev);
dev_info(device, "ELPG is %s.\n", g->elpg_enabled ? "enabled" : dev_info(device, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
"disabled"); "disabled");

View File

@@ -71,17 +71,6 @@ struct gk20a_platform {
*/ */
int (*late_probe)(struct platform_device *dev); int (*late_probe)(struct platform_device *dev);
/* Called before submitting work to the gpu. The platform may use this
* hook to ensure that any other hw modules that the gpu depends on are
* powered. The platform implementation must count refs to this call. */
int (*channel_busy)(struct platform_device *dev);
/* Called after the work on the gpu is completed. The platform may use
* this hook to release power refs to any other hw modules that the gpu
* depends on. The platform implementation must count refs to this
* call. */
void (*channel_idle)(struct platform_device *dev);
/* This function is called to allocate secure memory (memory that the /* This function is called to allocate secure memory (memory that the
* CPU cannot see). The function should fill the context buffer * CPU cannot see). The function should fill the context buffer
* descriptor (especially fields destroy, sgt, size). * descriptor (especially fields destroy, sgt, size).
@@ -134,23 +123,6 @@ extern struct gk20a_platform gk20a_generic_platform;
extern struct gk20a_platform gk20a_tegra_platform; extern struct gk20a_platform gk20a_tegra_platform;
#endif #endif
static inline int gk20a_platform_channel_busy(struct platform_device *dev)
{
struct gk20a_platform *p = gk20a_get_platform(dev);
int ret = 0;
if (p->channel_busy)
ret = p->channel_busy(dev);
return ret;
}
static inline void gk20a_platform_channel_idle(struct platform_device *dev)
{
struct gk20a_platform *p = gk20a_get_platform(dev);
if (p->channel_idle)
p->channel_idle(dev);
}
static inline bool gk20a_platform_has_syncpoints(struct platform_device *dev) static inline bool gk20a_platform_has_syncpoints(struct platform_device *dev)
{ {
struct gk20a_platform *p = gk20a_get_platform(dev); struct gk20a_platform *p = gk20a_get_platform(dev);

View File

@@ -92,37 +92,6 @@ int FXDIV(int x, int y)
return (x << pos) / y; return (x << pos) / y;
} }
static int gk20a_tegra_channel_busy(struct platform_device *dev)
{
int ret = 0;
/* Explicitly turn on the host1x clocks
* - This is needed as host1x driver sets ignore_children = true
* to cater the use case of display clock ON but host1x clock OFF
* in OS-Idle-Display-ON case
* - This was easily done in ACM as it only checked the ref count
* of host1x (or any device for that matter) to be zero before
* turning off its clock
* - However, runtime PM checks to see if *ANY* child of device is
* in ACTIVE state and if yes, it doesn't suspend the parent. As a
* result of this, display && host1x clocks remains ON during
* OS-Idle-Display-ON case
* - The code below fixes this use-case
*/
if (to_platform_device(dev->dev.parent))
ret = nvhost_module_busy_ext(
to_platform_device(dev->dev.parent));
return ret;
}
static void gk20a_tegra_channel_idle(struct platform_device *dev)
{
/* Explicitly turn off the host1x clocks */
if (to_platform_device(dev->dev.parent))
nvhost_module_idle_ext(to_platform_device(dev->dev.parent));
}
static void gk20a_tegra_secure_destroy(struct platform_device *pdev, static void gk20a_tegra_secure_destroy(struct platform_device *pdev,
struct gr_ctx_buffer_desc *desc) struct gr_ctx_buffer_desc *desc)
{ {
@@ -455,8 +424,8 @@ static int gk20a_tegra_late_probe(struct platform_device *dev)
{ {
struct gk20a_platform *platform = gk20a_get_platform(dev); struct gk20a_platform *platform = gk20a_get_platform(dev);
/* Make gk20a power domain a subdomain of mc */ /* Make gk20a power domain a subdomain of host1x */
tegra_pd_add_sd(&platform->g->pd); nvhost_register_client_domain(&platform->g->pd);
/* Initialise tegra specific scaling quirks */ /* Initialise tegra specific scaling quirks */
gk20a_tegra_scale_init(dev); gk20a_tegra_scale_init(dev);
@@ -520,8 +489,6 @@ struct gk20a_platform t132_gk20a_tegra_platform = {
.devfreq_governor = "nvhost_podgov", .devfreq_governor = "nvhost_podgov",
.qos_id = PM_QOS_GPU_FREQ_MIN, .qos_id = PM_QOS_GPU_FREQ_MIN,
.channel_busy = gk20a_tegra_channel_busy,
.channel_idle = gk20a_tegra_channel_idle,
.secure_alloc = gk20a_tegra_secure_alloc, .secure_alloc = gk20a_tegra_secure_alloc,
.dump_platform_dependencies = gk20a_tegra_debug_dump, .dump_platform_dependencies = gk20a_tegra_debug_dump,
}; };
@@ -548,8 +515,6 @@ struct gk20a_platform gk20a_tegra_platform = {
.devfreq_governor = "nvhost_podgov", .devfreq_governor = "nvhost_podgov",
.qos_id = PM_QOS_GPU_FREQ_MIN, .qos_id = PM_QOS_GPU_FREQ_MIN,
.channel_busy = gk20a_tegra_channel_busy,
.channel_idle = gk20a_tegra_channel_idle,
.secure_alloc = gk20a_tegra_secure_alloc, .secure_alloc = gk20a_tegra_secure_alloc,
.dump_platform_dependencies = gk20a_tegra_debug_dump, .dump_platform_dependencies = gk20a_tegra_debug_dump,
}; };