gpu: nvgpu: Add busy/idle ref counting

Add reference counting for gk20a_busy() and gk20a_idle() so that
the driver can keep track of whether the driver is active.

Bug 1816516
Bug 1807277

Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1250019
(cherry picked from commit 7f558019735bb34cf00dd1ec17df1797501cff60)
Change-Id: I64c2ff1719673912ae127707e58ee557966c4d4d
Reviewed-on: http://git-master/r/1261922
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2016-11-07 14:49:16 -08:00
committed by mobile promotions
parent c1750f45f5
commit ca2c499527
2 changed files with 71 additions and 1 deletions

View File

@@ -1732,16 +1732,67 @@ void gk20a_busy_noresume(struct device *dev)
pm_runtime_get_noresume(dev);
}
/*
* Start the process for unloading the driver. Set g->driver_is_dying.
*/
void gk20a_driver_start_unload(struct gk20a *g)
{
g->driver_is_dying = 1;
}
int gk20a_wait_for_idle(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
int wait_length = 150; /* 3 second overall max wait. */
if (!g)
return -ENODEV;
while (atomic_read(&g->usage_count) && wait_length-- >= 0)
msleep(20);
if (wait_length < 0) {
pr_warn("%s: Timed out waiting for idle (%d)!\n",
__func__, atomic_read(&g->usage_count));
return -ETIMEDOUT;
}
return 0;
}
/*
* Check if the device can go busy. Basically if the driver is currently
* in the process of dying then do not let new places make the driver busy.
*/
static int gk20a_can_busy(struct gk20a *g)
{
if (g->driver_is_dying)
return 0;
return 1;
}
int gk20a_busy(struct device *dev)
{
int ret = 0;
struct gk20a *g = get_gk20a(dev);
struct gk20a_platform *platform;
if (!dev)
return -ENODEV;
platform = gk20a_get_platform(dev);
if (!g || !gk20a_can_busy(g))
return -ENODEV;
atomic_inc(&g->usage_count);
down_read(&g->busy_lock);
if (pm_runtime_enabled(dev)) {
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
atomic_dec(&g->usage_count);
goto fail;
}
} else {
@@ -1749,8 +1800,10 @@ int gk20a_busy(struct device *dev)
ret = gk20a_gpu_is_virtual(dev) ?
vgpu_pm_finalize_poweron(dev)
: gk20a_pm_finalize_poweron(dev);
if (ret)
if (ret) {
atomic_dec(&g->usage_count);
goto fail;
}
}
}
@@ -1769,6 +1822,17 @@ void gk20a_idle_nosuspend(struct device *dev)
void gk20a_idle(struct device *dev)
{
struct gk20a_platform *platform;
struct gk20a *g;
if (!dev)
return;
g = get_gk20a(dev);
platform = gk20a_get_platform(dev);
atomic_dec(&g->usage_count);
if (pm_runtime_enabled(dev)) {
#ifdef CONFIG_PM
if (atomic_read(&dev->power.usage_count) == 1)

View File

@@ -34,6 +34,7 @@ struct acr_desc;
#include <linux/irqreturn.h>
#include <linux/tegra-soc.h>
#include <linux/version.h>
#include <linux/atomic.h>
#include "../../../arch/arm/mach-tegra/iomap.h"
@@ -784,6 +785,8 @@ struct gk20a {
struct device *dev;
struct platform_device *host1x_dev;
atomic_t usage_count;
int driver_is_dying;
struct resource *reg_mem;
void __iomem *regs;
@@ -1238,6 +1241,9 @@ int gk20a_do_unidle(void);
int __gk20a_do_idle(struct device *dev, bool force_reset);
int __gk20a_do_unidle(struct device *dev);
void gk20a_driver_start_unload(struct gk20a *g);
int gk20a_wait_for_idle(struct device *dev);
#define NVGPU_GPU_ARCHITECTURE_SHIFT 4
/* constructs unique and compact GPUID from nvgpu_gpu_characteristics