Open source GPL/LGPL release

This commit is contained in:
svcmobrel-release
2022-07-21 16:03:29 -07:00
commit f338182221
2260 changed files with 576813 additions and 0 deletions

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/trace.h>
#include <nvgpu/mm.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/timers.h>
#include <nvgpu/hw/gk20a/hw_flush_gk20a.h>
#include "flush_gk20a.h"
#ifdef CONFIG_NVGPU_COMPRESSION
void gk20a_mm_cbc_clean(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
u32 data;
struct nvgpu_timeout timeout;
u32 retries = 200;
nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
goto hw_was_off;
}
if (g->ops.mm.get_flush_retries != NULL) {
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_mutex_acquire(&mm->l2_op_lock);
/* Flush all dirty lines from the CBC to L2 */
nvgpu_writel(g, flush_l2_clean_comptags_r(),
flush_l2_clean_comptags_pending_busy_f());
do {
data = nvgpu_readl(g, flush_l2_clean_comptags_r());
if (flush_l2_clean_comptags_outstanding_v(data) ==
flush_l2_clean_comptags_outstanding_true_v() ||
flush_l2_clean_comptags_pending_v(data) ==
flush_l2_clean_comptags_pending_busy_v()) {
nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
nvgpu_udelay(5);
} else {
break;
}
} while (nvgpu_timeout_expired_msg(&timeout,
"l2_clean_comptags too many retries") == 0);
nvgpu_mutex_release(&mm->l2_op_lock);
hw_was_off:
gk20a_idle_nosuspend(g);
}
#endif

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef HAL_MM_FLUSH_FLUSH_GK20A_H
#define HAL_MM_FLUSH_FLUSH_GK20A_H
#include <nvgpu/types.h>
struct gk20a;
int gk20a_mm_fb_flush(struct gk20a *g);
int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate);
#ifdef CONFIG_NVGPU_COMPRESSION
void gk20a_mm_cbc_clean(struct gk20a *g);
#endif
void gk20a_mm_l2_invalidate(struct gk20a *g);
#endif

View File

@@ -0,0 +1,227 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/trace.h>
#include <nvgpu/mm.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/timers.h>
#include <nvgpu/hw/gk20a/hw_flush_gk20a.h>
#include "flush_gk20a.h"
int gk20a_mm_fb_flush(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
u32 data;
int ret = 0;
struct nvgpu_timeout timeout;
u32 retries;
nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
gk20a_idle_nosuspend(g);
return 0;
}
retries = 100;
if (g->ops.mm.get_flush_retries != NULL) {
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_mutex_acquire(&mm->l2_op_lock);
/* Make sure all previous writes are committed to the L2. There's no
guarantee that writes are to DRAM. This will be a sysmembar internal
to the L2. */
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_mm_fb_flush(g->name);
#endif
nvgpu_writel(g, flush_fb_flush_r(),
flush_fb_flush_pending_busy_f());
do {
data = nvgpu_readl(g, flush_fb_flush_r());
if ((flush_fb_flush_outstanding_v(data) ==
flush_fb_flush_outstanding_true_v()) ||
(flush_fb_flush_pending_v(data) ==
flush_fb_flush_pending_busy_v())) {
nvgpu_log_info(g, "fb_flush 0x%x", data);
nvgpu_udelay(5);
} else {
break;
}
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
if (g->ops.fb.dump_vpr_info != NULL) {
g->ops.fb.dump_vpr_info(g);
}
if (g->ops.fb.dump_wpr_info != NULL) {
g->ops.fb.dump_wpr_info(g);
}
ret = -EBUSY;
}
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_mm_fb_flush_done(g->name);
#endif
nvgpu_mutex_release(&mm->l2_op_lock);
gk20a_idle_nosuspend(g);
return ret;
}
static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
{
u32 data;
struct nvgpu_timeout timeout;
u32 retries = 200;
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_mm_l2_invalidate(g->name);
#endif
if (g->ops.mm.get_flush_retries != NULL) {
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
/* Invalidate any clean lines from the L2 so subsequent reads go to
DRAM. Dirty lines are not affected by this operation. */
nvgpu_writel(g, flush_l2_system_invalidate_r(),
flush_l2_system_invalidate_pending_busy_f());
do {
data = nvgpu_readl(g, flush_l2_system_invalidate_r());
if ((flush_l2_system_invalidate_outstanding_v(data) ==
flush_l2_system_invalidate_outstanding_true_v()) ||
(flush_l2_system_invalidate_pending_v(data) ==
flush_l2_system_invalidate_pending_busy_v())) {
nvgpu_log_info(g, "l2_system_invalidate 0x%x",
data);
nvgpu_udelay(5);
} else {
break;
}
} while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) {
nvgpu_warn(g, "l2_system_invalidate too many retries");
}
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_mm_l2_invalidate_done(g->name);
#endif
}
void gk20a_mm_l2_invalidate(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
gk20a_busy_noresume(g);
if (!nvgpu_is_powered_off(g)) {
nvgpu_mutex_acquire(&mm->l2_op_lock);
gk20a_mm_l2_invalidate_locked(g);
nvgpu_mutex_release(&mm->l2_op_lock);
}
gk20a_idle_nosuspend(g);
}
int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
{
struct mm_gk20a *mm = &g->mm;
u32 data;
struct nvgpu_timeout timeout;
u32 retries = 2000;
int err = -ETIMEDOUT;
nvgpu_log_fn(g, " ");
gk20a_busy_noresume(g);
if (nvgpu_is_powered_off(g)) {
gk20a_idle_nosuspend(g);
return 0;
}
if (g->ops.mm.get_flush_retries != NULL) {
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_mutex_acquire(&mm->l2_op_lock);
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_mm_l2_flush(g->name);
#endif
/* Flush all dirty lines from the L2 to DRAM. Lines are left in the L2
as clean, so subsequent reads might hit in the L2. */
nvgpu_writel(g, flush_l2_flush_dirty_r(),
flush_l2_flush_dirty_pending_busy_f());
do {
data = nvgpu_readl(g, flush_l2_flush_dirty_r());
if ((flush_l2_flush_dirty_outstanding_v(data) ==
flush_l2_flush_dirty_outstanding_true_v()) ||
(flush_l2_flush_dirty_pending_v(data) ==
flush_l2_flush_dirty_pending_busy_v())) {
nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
nvgpu_udelay(5);
} else {
err = 0;
break;
}
} while (nvgpu_timeout_expired_msg(&timeout,
"l2_flush_dirty too many retries") == 0);
#ifdef CONFIG_NVGPU_TRACE
trace_gk20a_mm_l2_flush_done(g->name);
#endif
if (invalidate) {
gk20a_mm_l2_invalidate_locked(g);
}
nvgpu_mutex_release(&mm->l2_op_lock);
gk20a_idle_nosuspend(g);
return err;
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef HAL_MM_FLUSH_FLUSH_GV11B_H
#define HAL_MM_FLUSH_FLUSH_GV11B_H
#include <nvgpu/types.h>
struct gk20a;
int gv11b_mm_l2_flush(struct gk20a *g, bool invalidate);
#endif

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/hw/gv11b/hw_flush_gv11b.h>
#include "flush_gk20a.h"
#include "flush_gv11b.h"
int gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
{
int err = 0;
nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush");
err = g->ops.mm.cache.fb_flush(g);
if (err != 0) {
nvgpu_err(g, "mm.cache.fb_flush()[1] failed err=%d", err);
return err;
}
err = gk20a_mm_l2_flush(g, invalidate);
if (err != 0) {
nvgpu_err(g, "gk20a_mm_l2_flush failed");
return err;
}
if (g->ops.bus.bar1_bind != NULL) {
err = g->ops.fb.tlb_invalidate(g, g->mm.bar1.vm->pdb.mem);
if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
return err;
}
} else {
err = g->ops.mm.cache.fb_flush(g);
if (err != 0) {
nvgpu_err(g, "mm.cache.fb_flush()[2] failed err=%d",
err);
return err;
}
}
return err;
}