gpu: nvgpu: correct usage for gk20a_busy_noresume

Background: In case of a deferred suspend implemented by gk20a_idle,
the device waits for a delay before suspending and invoking
power gating callbacks. This helps minimize resume latency for any
resume calls(gk20a_busy) that occur before the delay.

Now, some APIs spread across the driver requires that if the device
is powered on, then they can proceed with register writes, but if its
powered off, then it must return. Examples of such APIs include
l2_flush, fb_flush and even nvs_thread. We have relied on
some hacks to ensure the device is kept powered on to prevent any such
delayed suspension to proceed. However, this still raced for some calls
like ioctl l2_flush, so gk20a_busy() was added (Refer to commit Id
dd341e7ecbaf65843cb8059f9d57a8be58952f63)

Upstream linux kernel has introduced the API pm_runtime_get_if_active
specifically to handle the corner case for locking the state during the
event of a deferred suspend.

According to the Linux kernel docs, invoking the API with
ign_usage_count parameter set to true, prevents an incoming suspend
if it has not already suspended.

With this, there is no longer a need to check whether
nvgpu_is_powered_off(). Changed the behavior of gk20a_busy_noresume()
to return bool. It returns true, iff it managed to prevent
an imminent suspend, else returns false. For cases where
PM runtime is disabled, the code follows the existing implementation.

Added missing gk20a_busy_noresume() calls to tlb_invalidate.

Also, moved gk20a_pm_deinit to after nvgpu_quiesce() in
the module removal path. This is done to prevent regs access
after registers are locked out at the end of nvgpu_quiesce. This
can happen as some free function calls post quiesce  might still
have l2_flush, fb_flush deep inside their stack, hence invoke
gk20a_pm_deinit to disable pm_runtime immediately after quiesce.

Kept the legacy implementation same for VGPU and
older kernels

Jira NVGPU-8487

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: I972f9afe577b670c44fc09e3177a5ce8a44ca338
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2715654
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2022-05-23 15:19:00 +05:30
committed by mobile promotions
parent a0b0acad05
commit c1ea9e3955
9 changed files with 105 additions and 52 deletions

View File

@@ -658,31 +658,14 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist, static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist,
struct nvgpu_runlist_domain *next_domain) struct nvgpu_runlist_domain *next_domain)
{ {
int err;
rl_dbg(g, "Runlist[%u]: switching to domain %s", rl_dbg(g, "Runlist[%u]: switching to domain %s",
runlist->id, next_domain->name); runlist->id, next_domain->name);
runlist->domain = next_domain; runlist->domain = next_domain;
gk20a_busy_noresume(g); if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) {
rl_dbg(g, "Runlist[%u]: power is off, skip submit", rl_dbg(g, "Runlist[%u]: power is off, skip submit",
runlist->id); runlist->id);
gk20a_idle_nosuspend(g);
return;
}
err = gk20a_busy(g);
gk20a_idle_nosuspend(g);
if (err != 0) {
nvgpu_err(g, "failed to hold power for runlist submit");
/*
* probably shutting down though, so don't bother propagating
* the error. Power is already on when the domain scheduler is
* actually in use.
*/
return; return;
} }
@@ -695,7 +678,7 @@ static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist
*/ */
g->ops.runlist.hw_submit(g, runlist); g->ops.runlist.hw_submit(g, runlist);
gk20a_idle(g); gk20a_idle_nosuspend(g);
} }
static void runlist_switch_domain_locked(struct gk20a *g, static void runlist_switch_domain_locked(struct gk20a *g,

View File

@@ -413,10 +413,9 @@ int nvgpu_pmu_busy_cycles_norm(struct gk20a *g, u32 *norm)
u64 busy_cycles, total_cycles; u64 busy_cycles, total_cycles;
u32 intr_status; u32 intr_status;
gk20a_busy_noresume(g); if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) {
*norm = 0; *norm = 0;
goto exit; return 0;
} }
if (g->ops.pmu.pmu_read_idle_counter == NULL || if (g->ops.pmu.pmu_read_idle_counter == NULL ||

View File

@@ -1,7 +1,7 @@
/* /*
* GA100 FB * GA100 FB
* *
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -63,9 +63,8 @@ bool ga100_fb_is_comptagline_mode_enabled(struct gk20a *g)
u32 reg = 0U; u32 reg = 0U;
bool result = true; bool result = true;
gk20a_busy_noresume(g); if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) { return result;
goto done;
} }
reg = nvgpu_readl(g, fb_mmu_hypervisor_ctl_r()); reg = nvgpu_readl(g, fb_mmu_hypervisor_ctl_r());
@@ -73,7 +72,6 @@ bool ga100_fb_is_comptagline_mode_enabled(struct gk20a *g)
result = (fb_mmu_hypervisor_ctl_force_cbc_raw_mode_v(reg) == result = (fb_mmu_hypervisor_ctl_force_cbc_raw_mode_v(reg) ==
fb_mmu_hypervisor_ctl_force_cbc_raw_mode_disable_v()); fb_mmu_hypervisor_ctl_force_cbc_raw_mode_disable_v());
done:
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
return result; return result;
} }

View File

@@ -88,7 +88,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
hw. Use the power_on flag to skip tlb invalidation when gpu hw. Use the power_on flag to skip tlb invalidation when gpu
power is turned off */ power is turned off */
if (nvgpu_is_powered_off(g)) { if (!gk20a_busy_noresume(g)) {
return err; return err;
} }
@@ -150,6 +150,9 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
out: out:
nvgpu_mutex_release(&g->mm.tlb_lock); nvgpu_mutex_release(&g->mm.tlb_lock);
gk20a_idle_nosuspend(g);
return err; return err;
} }

View File

@@ -56,8 +56,8 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
* hw. Use the power_on flag to skip tlb invalidation when gpu * hw. Use the power_on flag to skip tlb invalidation when gpu
* power is turned off * power is turned off
*/ */
if (nvgpu_is_powered_off(g)) { if (!gk20a_busy_noresume(g)) {
return 0; return err;
} }
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12); addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
@@ -101,6 +101,9 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
#endif #endif
nvgpu_mutex_release(&g->mm.tlb_lock); nvgpu_mutex_release(&g->mm.tlb_lock);
gk20a_idle_nosuspend(g);
return err; return err;
} }

View File

@@ -41,9 +41,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
nvgpu_log(g, gpu_dbg_mm, " "); nvgpu_log(g, gpu_dbg_mm, " ");
gk20a_busy_noresume(g); if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) {
gk20a_idle_nosuspend(g);
return 0; return 0;
} }
@@ -151,13 +149,12 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
void gk20a_mm_l2_invalidate(struct gk20a *g) void gk20a_mm_l2_invalidate(struct gk20a *g)
{ {
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
gk20a_busy_noresume(g); if (gk20a_busy_noresume(g)) {
if (!nvgpu_is_powered_off(g)) {
nvgpu_mutex_acquire(&mm->l2_op_lock); nvgpu_mutex_acquire(&mm->l2_op_lock);
gk20a_mm_l2_invalidate_locked(g); gk20a_mm_l2_invalidate_locked(g);
nvgpu_mutex_release(&mm->l2_op_lock); nvgpu_mutex_release(&mm->l2_op_lock);
}
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
}
} }
int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
@@ -170,9 +167,7 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
nvgpu_log(g, gpu_dbg_mm, " "); nvgpu_log(g, gpu_dbg_mm, " ");
gk20a_busy_noresume(g); if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) {
gk20a_idle_nosuspend(g);
return 0; return 0;
} }
@@ -234,9 +229,8 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g); if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) { return;
goto hw_was_off;
} }
if (g->ops.mm.get_flush_retries != NULL) { if (g->ops.mm.get_flush_retries != NULL) {
@@ -268,7 +262,6 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
nvgpu_mutex_release(&mm->l2_op_lock); nvgpu_mutex_release(&mm->l2_op_lock);
hw_was_off:
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
} }
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -353,7 +353,7 @@ int nvgpu_init_gpu_characteristics(struct gk20a *g);
* *
* @param g [in] The GPU * @param g [in] The GPU
*/ */
void gk20a_busy_noresume(struct gk20a *g); bool gk20a_busy_noresume(struct gk20a *g);
/** /**
* @brief Drops a reference for gpu. Does nothing for safety. * @brief Drops a reference for gpu. Does nothing for safety.

View File

@@ -168,9 +168,63 @@ struct device_node *nvgpu_get_node(struct gk20a *g)
return dev->of_node; return dev->of_node;
} }
void gk20a_busy_noresume(struct gk20a *g) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
static bool gk20a_pm_runtime_get_if_in_use(struct gk20a *g)
{ {
pm_runtime_get_noresume(dev_from_gk20a(g)); int ret = pm_runtime_get_if_active(dev_from_gk20a(g), true);
if (ret == 1) {
return true;
} else {
return false;
}
}
#endif
static bool gk20a_busy_noresume_legacy(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);;
pm_runtime_get_noresume(dev);
if (nvgpu_is_powered_off(g)) {
pm_runtime_put_noidle(dev);
return false;
} else {
return true;
}
}
/*
* Cases:
* 1) For older than Kernel 5.8, use legacy
* i.e. gk20a_busy_noresume_legacy()
* 2) else if pm_runtime_disabled (e.g. VGPU, DGPU)
* use legacy.
* 3) Else use gk20a_pm_runtime_get_if_in_use()
*/
bool gk20a_busy_noresume(struct gk20a *g)
{
struct device *dev;
if (!g)
return false;
dev = dev_from_gk20a(g);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
if (pm_runtime_enabled(dev)) {
if (gk20a_pm_runtime_get_if_in_use(g)) {
atomic_inc(&g->usage_count.atomic_var);
return true;
} else {
return false;
}
} else {
/* VGPU, DGPU */
return gk20a_busy_noresume_legacy(g);
}
#else
return gk20a_busy_noresume_legacy(g);
#endif
} }
int gk20a_busy(struct gk20a *g) int gk20a_busy(struct gk20a *g)
@@ -222,7 +276,17 @@ fail:
void gk20a_idle_nosuspend(struct gk20a *g) void gk20a_idle_nosuspend(struct gk20a *g)
{ {
pm_runtime_put_noidle(dev_from_gk20a(g)); struct device *dev = dev_from_gk20a(g);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
if (pm_runtime_enabled(dev)) {
gk20a_idle(g);
} else {
pm_runtime_put_noidle(dev);
}
#else
pm_runtime_put_noidle(dev);
#endif
} }
void gk20a_idle(struct gk20a *g) void gk20a_idle(struct gk20a *g)
@@ -1941,6 +2005,14 @@ int nvgpu_remove(struct device *dev)
err = nvgpu_quiesce(g); err = nvgpu_quiesce(g);
WARN(err, "gpu failed to idle during driver removal"); WARN(err, "gpu failed to idle during driver removal");
/**
* nvgpu_quiesce has been invoked already, disable pm runtime.
* Informs PM domain that its safe to power down the h/w now.
* Anything after this is just software deinit. Any cache/tlb
* flush/invalidate must have already happened before this.
*/
gk20a_pm_deinit(dev);
if (nvgpu_mem_is_valid(&g->syncpt_mem)) if (nvgpu_mem_is_valid(&g->syncpt_mem))
nvgpu_dma_free(g, &g->syncpt_mem); nvgpu_dma_free(g, &g->syncpt_mem);
@@ -2001,8 +2073,6 @@ static int __exit gk20a_remove(struct platform_device *pdev)
nvgpu_put(g); nvgpu_put(g);
gk20a_pm_deinit(dev);
return err; return err;
} }

View File

@@ -116,9 +116,13 @@ void nvgpu_disable_irqs(struct gk20a *g)
/* /*
* We have no runtime PM stuff in userspace so these are really just noops. * We have no runtime PM stuff in userspace so these are really just noops.
*/ */
void gk20a_busy_noresume(struct gk20a *g) bool gk20a_busy_noresume(struct gk20a *g)
{ {
(void)g; if (nvgpu_is_powered_off(g)) {
return false;
} else {
return true;
}
} }
void gk20a_idle_nosuspend(struct gk20a *g) void gk20a_idle_nosuspend(struct gk20a *g)