mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: Move gk20a->busy_lock to os_linux
gk20a->busy_lock is a Linux specific rw_semaphore used only by Linux code. Move it to os_linux. JIRA NVGPU-259 Change-Id: I220a8a080a5050732683b875d3c1d0539ba0f40e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1569695 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
236573e00a
commit
4f56c88feb
@@ -42,7 +42,7 @@ static void nvgpu_init_vars(struct gk20a *g)
|
|||||||
init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq);
|
init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq);
|
||||||
gk20a_init_gr(g);
|
gk20a_init_gr(g);
|
||||||
|
|
||||||
init_rwsem(&g->busy_lock);
|
init_rwsem(&l->busy_lock);
|
||||||
init_rwsem(&g->deterministic_busy);
|
init_rwsem(&g->deterministic_busy);
|
||||||
|
|
||||||
nvgpu_spinlock_init(&g->mc_enable_lock);
|
nvgpu_spinlock_init(&g->mc_enable_lock);
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ void gk20a_busy_noresume(struct gk20a *g)
|
|||||||
|
|
||||||
int gk20a_busy(struct gk20a *g)
|
int gk20a_busy(struct gk20a *g)
|
||||||
{
|
{
|
||||||
|
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
@@ -71,7 +72,7 @@ int gk20a_busy(struct gk20a *g)
|
|||||||
|
|
||||||
atomic_inc(&g->usage_count.atomic_var);
|
atomic_inc(&g->usage_count.atomic_var);
|
||||||
|
|
||||||
down_read(&g->busy_lock);
|
down_read(&l->busy_lock);
|
||||||
|
|
||||||
if (!gk20a_can_busy(g)) {
|
if (!gk20a_can_busy(g)) {
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
@@ -107,7 +108,7 @@ int gk20a_busy(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
up_read(&g->busy_lock);
|
up_read(&l->busy_lock);
|
||||||
|
|
||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
@@ -282,12 +283,13 @@ static struct of_device_id tegra_gk20a_of_match[] = {
|
|||||||
*
|
*
|
||||||
* In success, this call MUST be balanced by caller with __gk20a_do_unidle()
|
* In success, this call MUST be balanced by caller with __gk20a_do_unidle()
|
||||||
*
|
*
|
||||||
* Acquires two locks : &g->busy_lock and &platform->railgate_lock
|
* Acquires two locks : &l->busy_lock and &platform->railgate_lock
|
||||||
* In success, we hold these locks and return
|
* In success, we hold these locks and return
|
||||||
* In failure, we release these locks and return
|
* In failure, we release these locks and return
|
||||||
*/
|
*/
|
||||||
int __gk20a_do_idle(struct gk20a *g, bool force_reset)
|
int __gk20a_do_idle(struct gk20a *g, bool force_reset)
|
||||||
{
|
{
|
||||||
|
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||||
struct device *dev = dev_from_gk20a(g);
|
struct device *dev = dev_from_gk20a(g);
|
||||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
@@ -303,7 +305,7 @@ int __gk20a_do_idle(struct gk20a *g, bool force_reset)
|
|||||||
gk20a_channel_deterministic_idle(g);
|
gk20a_channel_deterministic_idle(g);
|
||||||
|
|
||||||
/* acquire busy lock to block other busy() calls */
|
/* acquire busy lock to block other busy() calls */
|
||||||
down_write(&g->busy_lock);
|
down_write(&l->busy_lock);
|
||||||
|
|
||||||
/* acquire railgate lock to prevent unrailgate in midst of do_idle() */
|
/* acquire railgate lock to prevent unrailgate in midst of do_idle() */
|
||||||
nvgpu_mutex_acquire(&platform->railgate_lock);
|
nvgpu_mutex_acquire(&platform->railgate_lock);
|
||||||
@@ -406,7 +408,7 @@ fail_drop_usage_count:
|
|||||||
pm_runtime_put_noidle(dev);
|
pm_runtime_put_noidle(dev);
|
||||||
fail_timeout:
|
fail_timeout:
|
||||||
nvgpu_mutex_release(&platform->railgate_lock);
|
nvgpu_mutex_release(&platform->railgate_lock);
|
||||||
up_write(&g->busy_lock);
|
up_write(&l->busy_lock);
|
||||||
gk20a_channel_deterministic_unidle(g);
|
gk20a_channel_deterministic_unidle(g);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
@@ -429,6 +431,7 @@ static int gk20a_do_idle(void *_g)
|
|||||||
*/
|
*/
|
||||||
int __gk20a_do_unidle(struct gk20a *g)
|
int __gk20a_do_unidle(struct gk20a *g)
|
||||||
{
|
{
|
||||||
|
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||||
struct device *dev = dev_from_gk20a(g);
|
struct device *dev = dev_from_gk20a(g);
|
||||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||||
int err;
|
int err;
|
||||||
@@ -453,7 +456,7 @@ int __gk20a_do_unidle(struct gk20a *g)
|
|||||||
|
|
||||||
/* release the lock and open up all other busy() calls */
|
/* release the lock and open up all other busy() calls */
|
||||||
nvgpu_mutex_release(&platform->railgate_lock);
|
nvgpu_mutex_release(&platform->railgate_lock);
|
||||||
up_write(&g->busy_lock);
|
up_write(&l->busy_lock);
|
||||||
|
|
||||||
gk20a_channel_deterministic_unidle(g);
|
gk20a_channel_deterministic_unidle(g);
|
||||||
|
|
||||||
@@ -887,12 +890,12 @@ void gk20a_driver_start_unload(struct gk20a *g)
|
|||||||
|
|
||||||
gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n");
|
gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n");
|
||||||
|
|
||||||
down_write(&g->busy_lock);
|
down_write(&l->busy_lock);
|
||||||
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
|
__nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true);
|
||||||
/* GR SW ready needs to be invalidated at this time with the busy lock
|
/* GR SW ready needs to be invalidated at this time with the busy lock
|
||||||
* held to prevent a racing condition on the gr/mm code */
|
* held to prevent a racing condition on the gr/mm code */
|
||||||
g->gr.sw_ready = false;
|
g->gr.sw_ready = false;
|
||||||
up_write(&g->busy_lock);
|
up_write(&l->busy_lock);
|
||||||
|
|
||||||
if (g->is_virtual)
|
if (g->is_virtual)
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -110,6 +110,8 @@ struct nvgpu_os_linux {
|
|||||||
struct dentry *debugfs_dump_ctxsw_stats;
|
struct dentry *debugfs_dump_ctxsw_stats;
|
||||||
#endif
|
#endif
|
||||||
struct gk20a_cde_app cde_app;
|
struct gk20a_cde_app cde_app;
|
||||||
|
|
||||||
|
struct rw_semaphore busy_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g)
|
static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g)
|
||||||
|
|||||||
@@ -1084,7 +1084,6 @@ struct gk20a {
|
|||||||
u32 log_mask;
|
u32 log_mask;
|
||||||
u32 log_trace;
|
u32 log_trace;
|
||||||
|
|
||||||
struct rw_semaphore busy_lock;
|
|
||||||
/*
|
/*
|
||||||
* Guards access to hardware when usual gk20a_{busy,idle} are skipped
|
* Guards access to hardware when usual gk20a_{busy,idle} are skipped
|
||||||
* for submits and held for channel lifetime but dropped for an ongoing
|
* for submits and held for channel lifetime but dropped for an ongoing
|
||||||
|
|||||||
@@ -712,7 +712,7 @@ int vgpu_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
vgpu_init_vars(gk20a, platform);
|
vgpu_init_vars(gk20a, platform);
|
||||||
|
|
||||||
init_rwsem(&gk20a->busy_lock);
|
init_rwsem(&l->busy_lock);
|
||||||
|
|
||||||
nvgpu_spinlock_init(&gk20a->mc_enable_lock);
|
nvgpu_spinlock_init(&gk20a->mc_enable_lock);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user