gpu: nvgpu: Add VIDMEM debugging

Add some VIDMEM debugging to help track the background free
thread and allocs/frees.

JIRA NVGPU-30
JIRA NVGPU-138

Change-Id: I88471b29d2a42c104666b111d0d3014110c9d56c
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1576330
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Alex Waterman
2017-10-09 17:45:02 -07:00
committed by mobile promotions
parent e26ce10cc6
commit 8aacfb1da4
4 changed files with 50 additions and 3 deletions

View File

@@ -77,7 +77,8 @@ static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
struct nvgpu_vidmem_linux *linux_buf = buf->priv;
struct gk20a *g = buf->g;
gk20a_dbg_fn("");
vidmem_dbg(g, "Releasing Linux VIDMEM buf: dmabuf=0x%p size=%zuKB",
dmabuf, buf->mem->size >> 10);
if (linux_buf && linux_buf->dmabuf_priv_delete)
linux_buf->dmabuf_priv_delete(linux_buf->dmabuf_priv);
@@ -202,6 +203,9 @@ int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes)
/* fclose() on this drops one ref, freeing the dma buf */
fd_install(fd, priv->dmabuf->file);
vidmem_dbg(g, "Alloced Linux VIDMEM buf: dmabuf=0x%p size=%zuKB",
priv->dmabuf, buf->mem->size >> 10);
return fd;
fail:
@@ -209,6 +213,7 @@ fail:
nvgpu_kfree(g, priv);
gk20a_put(g);
vidmem_dbg(g, "Failed to alloc Linux VIDMEM buf: %d", err);
return err;
}

View File

@@ -85,6 +85,8 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
if (mm->vidmem.ce_ctx_id == (u32)~0)
return -EINVAL;
vidmem_dbg(g, "Clearing all VIDMEM:");
err = gk20a_ce_execute_ops(g,
mm->vidmem.ce_ctx_id,
0,
@@ -144,6 +146,8 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
mm->vidmem.cleared = true;
vidmem_dbg(g, "Done!");
return 0;
}
@@ -163,16 +167,24 @@ void nvgpu_vidmem_thread_pause_sync(struct mm_gk20a *mm)
*/
if (nvgpu_atomic_inc_return(&mm->vidmem.pause_count) == 1)
nvgpu_mutex_acquire(&mm->vidmem.clearing_thread_lock);
vidmem_dbg(mm->g, "Clearing thread paused; new count=%d",
nvgpu_atomic_read(&mm->vidmem.pause_count));
}
void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm)
{
vidmem_dbg(mm->g, "Unpausing clearing thread; current count=%d",
nvgpu_atomic_read(&mm->vidmem.pause_count));
/*
* And on the last decrement (1 -> 0) release the pause lock and let
* the vidmem clearing thread continue.
*/
if (nvgpu_atomic_dec_return(&mm->vidmem.pause_count) == 0)
if (nvgpu_atomic_dec_return(&mm->vidmem.pause_count) == 0) {
nvgpu_mutex_release(&mm->vidmem.clearing_thread_lock);
vidmem_dbg(mm->g, " > Clearing thread really unpaused!");
}
}
int nvgpu_vidmem_clear_list_enqueue(struct gk20a *g, struct nvgpu_mem *mem)
@@ -222,6 +234,8 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
struct gk20a *g = mm->g;
struct nvgpu_mem *mem;
vidmem_dbg(g, "Running VIDMEM clearing thread:");
while ((mem = nvgpu_vidmem_clear_list_dequeue(mm)) != NULL) {
nvgpu_vidmem_clear(g, mem);
@@ -233,6 +247,8 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
__nvgpu_mem_free_vidmem_alloc(g, mem);
nvgpu_kfree(g, mem);
}
vidmem_dbg(g, "Done!");
}
static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr)
@@ -295,6 +311,8 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
if (!size)
return 0;
vidmem_dbg(g, "init begin");
wpr_co.base = size - SZ_256M;
bootstrap_base = wpr_co.base;
bootstrap_size = SZ_16M;
@@ -354,7 +372,16 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
if (err)
goto fail;
gk20a_dbg_info("registered vidmem: %zu MB", size / SZ_1M);
vidmem_dbg(g, "VIDMEM Total: %zu MB", size >> 20);
vidmem_dbg(g, "VIDMEM Ranges:");
vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx Primary",
mm->vidmem.base, mm->vidmem.base + mm->vidmem.size);
vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx Bootstrap",
mm->vidmem.bootstrap_base,
mm->vidmem.bootstrap_base + mm->vidmem.bootstrap_size);
vidmem_dbg(g, "VIDMEM carveouts:");
vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx %s",
wpr_co.base, wpr_co.base + wpr_co.length, wpr_co.name);
return 0;
@@ -393,6 +420,8 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
alloc = mem->vidmem_alloc;
vidmem_dbg(g, "Clearing VIDMEM buf:");
nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) {
if (gk20a_last_fence)
gk20a_fence_put(gk20a_last_fence);
@@ -415,6 +444,10 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
return err;
}
vidmem_dbg(g, " > [0x%llx +0x%llx]",
nvgpu_sgt_get_phys(&alloc->sgt, sgl),
nvgpu_sgt_get_length(&alloc->sgt, sgl));
gk20a_last_fence = gk20a_fence_out;
}
@@ -437,6 +470,8 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
"fence wait failed for CE execute ops");
}
vidmem_dbg(g, " Done");
return err;
}

View File

@@ -78,6 +78,7 @@ enum nvgpu_log_categories {
gpu_dbg_alloc = BIT(21), /* Allocator debugging. */
gpu_dbg_dma = BIT(22), /* DMA allocation prints. */
gpu_dbg_sgl = BIT(23), /* SGL related traces. */
gpu_dbg_vidmem = BIT(24), /* VIDMEM tracing. */
gpu_dbg_mem = BIT(31), /* memory accesses; very verbose. */
};

View File

@@ -142,4 +142,10 @@ static inline void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm)
#endif /* !defined(CONFIG_GK20A_VIDMEM) */
/*
* Simple macro for VIDMEM debugging.
*/
#define vidmem_dbg(g, fmt, args...) \
nvgpu_log(g, gpu_dbg_vidmem, fmt, ##args); \
#endif /* __NVGPU_VIDMEM_H__ */