Revert "gpu: nvgpu: correct usage for gk20a_busy_noresume"

This reverts commit c1ea9e3955.

Reason for revert: ap_vulkan, ap_opengles, ap_mods tests failures
Bug 3661058
Bug 3661080 
Bug 3659004 

Change-Id: I929b5675a4fb0ddc8cbf3eeefc982b4ba04ddc59
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2718996
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
This commit is contained in:
Krishna Reddy
2022-05-26 16:24:46 -07:00
parent a80c445a5d
commit 961925be02
9 changed files with 52 additions and 105 deletions

View File

@@ -658,14 +658,31 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist,
struct nvgpu_runlist_domain *next_domain)
{
int err;
rl_dbg(g, "Runlist[%u]: switching to domain %s",
runlist->id, next_domain->name);
runlist->domain = next_domain;
if (!gk20a_busy_noresume(g)) {
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
rl_dbg(g, "Runlist[%u]: power is off, skip submit",
runlist->id);
gk20a_idle_nosuspend(g);
return;
}
err = gk20a_busy(g);
gk20a_idle_nosuspend(g);
if (err != 0) {
nvgpu_err(g, "failed to hold power for runlist submit");
/*
* probably shutting down though, so don't bother propagating
* the error. Power is already on when the domain scheduler is
* actually in use.
*/
return;
}
@@ -678,7 +695,7 @@ static void runlist_select_locked(struct gk20a *g, struct nvgpu_runlist *runlist
*/
g->ops.runlist.hw_submit(g, runlist);
gk20a_idle_nosuspend(g);
gk20a_idle(g);
}
static void runlist_switch_domain_locked(struct gk20a *g,

View File

@@ -413,9 +413,10 @@ int nvgpu_pmu_busy_cycles_norm(struct gk20a *g, u32 *norm)
u64 busy_cycles, total_cycles;
u32 intr_status;
if (!gk20a_busy_noresume(g)) {
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
*norm = 0;
return 0;
goto exit;
}
if (g->ops.pmu.pmu_read_idle_counter == NULL ||

View File

@@ -1,7 +1,7 @@
/*
* GA100 FB
*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -63,8 +63,9 @@ bool ga100_fb_is_comptagline_mode_enabled(struct gk20a *g)
u32 reg = 0U;
bool result = true;
if (!gk20a_busy_noresume(g)) {
return result;
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
goto done;
}
reg = nvgpu_readl(g, fb_mmu_hypervisor_ctl_r());
@@ -72,6 +73,7 @@ bool ga100_fb_is_comptagline_mode_enabled(struct gk20a *g)
result = (fb_mmu_hypervisor_ctl_force_cbc_raw_mode_v(reg) ==
fb_mmu_hypervisor_ctl_force_cbc_raw_mode_disable_v());
done:
gk20a_idle_nosuspend(g);
return result;
}

View File

@@ -88,7 +88,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
hw. Use the power_on flag to skip tlb invalidation when gpu
power is turned off */
if (!gk20a_busy_noresume(g)) {
if (nvgpu_is_powered_off(g)) {
return err;
}
@@ -150,9 +150,6 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
out:
nvgpu_mutex_release(&g->mm.tlb_lock);
gk20a_idle_nosuspend(g);
return err;
}

View File

@@ -56,8 +56,8 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
* hw. Use the power_on flag to skip tlb invalidation when gpu
* power is turned off
*/
if (!gk20a_busy_noresume(g)) {
return err;
if (nvgpu_is_powered_off(g)) {
return 0;
}
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
@@ -101,9 +101,6 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
#endif
nvgpu_mutex_release(&g->mm.tlb_lock);
gk20a_idle_nosuspend(g);
return err;
}

View File

@@ -41,7 +41,9 @@ int gk20a_mm_fb_flush(struct gk20a *g)
nvgpu_log(g, gpu_dbg_mm, " ");
if (!gk20a_busy_noresume(g)) {
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
gk20a_idle_nosuspend(g);
return 0;
}
@@ -149,12 +151,13 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
void gk20a_mm_l2_invalidate(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
if (gk20a_busy_noresume(g)) {
gk20a_busy_noresume(g);
if (!nvgpu_is_powered_off(g)) {
nvgpu_mutex_acquire(&mm->l2_op_lock);
gk20a_mm_l2_invalidate_locked(g);
nvgpu_mutex_release(&mm->l2_op_lock);
gk20a_idle_nosuspend(g);
}
gk20a_idle_nosuspend(g);
}
int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
@@ -167,7 +170,9 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
nvgpu_log(g, gpu_dbg_mm, " ");
if (!gk20a_busy_noresume(g)) {
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
gk20a_idle_nosuspend(g);
return 0;
}
@@ -229,8 +234,9 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
nvgpu_log_fn(g, " ");
if (!gk20a_busy_noresume(g)) {
return;
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
goto hw_was_off;
}
if (g->ops.mm.get_flush_retries != NULL) {
@@ -262,6 +268,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
nvgpu_mutex_release(&mm->l2_op_lock);
hw_was_off:
gk20a_idle_nosuspend(g);
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -353,7 +353,7 @@ int nvgpu_init_gpu_characteristics(struct gk20a *g);
*
* @param g [in] The GPU
*/
bool gk20a_busy_noresume(struct gk20a *g);
void gk20a_busy_noresume(struct gk20a *g);
/**
* @brief Drops a reference for gpu. Does nothing for safety.

View File

@@ -168,63 +168,9 @@ struct device_node *nvgpu_get_node(struct gk20a *g)
return dev->of_node;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
static bool gk20a_pm_runtime_get_if_in_use(struct gk20a *g)
void gk20a_busy_noresume(struct gk20a *g)
{
int ret = pm_runtime_get_if_active(dev_from_gk20a(g), true);
if (ret == 1) {
return true;
} else {
return false;
}
}
#endif
static bool gk20a_busy_noresume_legacy(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);;
pm_runtime_get_noresume(dev);
if (nvgpu_is_powered_off(g)) {
pm_runtime_put_noidle(dev);
return false;
} else {
return true;
}
}
/*
* Cases:
* 1) For older than Kernel 5.8, use legacy
* i.e. gk20a_busy_noresume_legacy()
* 2) else if pm_runtime_disabled (e.g. VGPU, DGPU)
* use legacy.
* 3) Else use gk20a_pm_runtime_get_if_in_use()
*/
bool gk20a_busy_noresume(struct gk20a *g)
{
struct device *dev;
if (!g)
return false;
dev = dev_from_gk20a(g);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
if (pm_runtime_enabled(dev)) {
if (gk20a_pm_runtime_get_if_in_use(g)) {
atomic_inc(&g->usage_count.atomic_var);
return true;
} else {
return false;
}
} else {
/* VGPU, DGPU */
return gk20a_busy_noresume_legacy(g);
}
#else
return gk20a_busy_noresume_legacy(g);
#endif
pm_runtime_get_noresume(dev_from_gk20a(g));
}
int gk20a_busy(struct gk20a *g)
@@ -276,17 +222,7 @@ fail:
void gk20a_idle_nosuspend(struct gk20a *g)
{
struct device *dev = dev_from_gk20a(g);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
if (pm_runtime_enabled(dev)) {
gk20a_idle(g);
} else {
pm_runtime_put_noidle(dev);
}
#else
pm_runtime_put_noidle(dev);
#endif
pm_runtime_put_noidle(dev_from_gk20a(g));
}
void gk20a_idle(struct gk20a *g)
@@ -2005,14 +1941,6 @@ int nvgpu_remove(struct device *dev)
err = nvgpu_quiesce(g);
WARN(err, "gpu failed to idle during driver removal");
/**
* nvgpu_quiesce has been invoked already, disable pm runtime.
* Informs PM domain that its safe to power down the h/w now.
* Anything after this is just software deinit. Any cache/tlb
* flush/invalidate must have already happened before this.
*/
gk20a_pm_deinit(dev);
if (nvgpu_mem_is_valid(&g->syncpt_mem))
nvgpu_dma_free(g, &g->syncpt_mem);
@@ -2073,6 +2001,8 @@ static int __exit gk20a_remove(struct platform_device *pdev)
nvgpu_put(g);
gk20a_pm_deinit(dev);
return err;
}

View File

@@ -116,13 +116,9 @@ void nvgpu_disable_irqs(struct gk20a *g)
/*
* We have no runtime PM stuff in userspace so these are really just noops.
*/
bool gk20a_busy_noresume(struct gk20a *g)
void gk20a_busy_noresume(struct gk20a *g)
{
if (nvgpu_is_powered_off(g)) {
return false;
} else {
return true;
}
(void)g;
}
void gk20a_idle_nosuspend(struct gk20a *g)