gpu: nvgpu: remove broken force_pramin feature

The forced PRAMIN reads and writes for sysmem buffers haven't worked in
a while since the PRAMIN access code was refactored to work with
vidmem-only sgt allocs. This feature was only ever meant for testing and
debugging PRAMIN access and early dGPU support, but that is stable
enough now so just delete the broken feature instead of fixing it.

Change-Id: Ib31dae4550f3b6fea3c426a2e4ad126864bf85d2
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1723725
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-05-18 16:13:35 +03:00
committed by mobile promotions
parent d914e662ae
commit 2788943d38
6 changed files with 14 additions and 32 deletions

View File

@@ -692,10 +692,6 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
* This path (from userspace to sysmem) is special in order to * This path (from userspace to sysmem) is special in order to
* avoid two copies unnecessarily (from user to pipe, then from * avoid two copies unnecessarily (from user to pipe, then from
* pipe to gpu sysmem buffer). * pipe to gpu sysmem buffer).
*
* As a special case, the pipe buffer exists if PRAMIN writes
* are forced, although the buffers may not be in vidmem in
* that case.
*/ */
if (end > gpfifo_size) { if (end > gpfifo_size) {
/* wrap-around */ /* wrap-around */
@@ -723,8 +719,7 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
0, num_entries); 0, num_entries);
goto out; goto out;
} else if (user_gpfifo) { } else if (user_gpfifo) {
/* from userspace to vidmem or sysmem when pramin forced, use /* from userspace to vidmem, use the common copy path below */
* the common copy path below */
err = copy_from_user(c->gpfifo.pipe, user_gpfifo, len); err = copy_from_user(c->gpfifo.pipe, user_gpfifo, len);
if (err) if (err)
return err; return err;

View File

@@ -19,8 +19,4 @@
void gk20a_mm_debugfs_init(struct gk20a *g) void gk20a_mm_debugfs_init(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
debugfs_create_bool("force_pramin", 0664, l->debugfs,
&g->mm.force_pramin);
} }

View File

@@ -54,7 +54,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
PAGE_KERNEL : PAGE_KERNEL :
pgprot_writecombine(PAGE_KERNEL); pgprot_writecombine(PAGE_KERNEL);
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) if (mem->aperture != APERTURE_SYSMEM)
return 0; return 0;
/* /*
@@ -91,7 +91,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
{ {
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) if (mem->aperture != APERTURE_SYSMEM)
return; return;
/* /*
@@ -134,7 +134,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
{ {
u32 data = 0; u32 data = 0;
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { if (mem->aperture == APERTURE_SYSMEM) {
u32 *ptr = mem->cpu_va; u32 *ptr = mem->cpu_va;
WARN_ON(!ptr); WARN_ON(!ptr);
@@ -142,7 +142,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
#endif #endif
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM) {
u32 value; u32 value;
u32 *p = &value; u32 *p = &value;
@@ -170,7 +170,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
WARN_ON(offset & 3); WARN_ON(offset & 3);
WARN_ON(size & 3); WARN_ON(size & 3);
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { if (mem->aperture == APERTURE_SYSMEM) {
u8 *src = (u8 *)mem->cpu_va + offset; u8 *src = (u8 *)mem->cpu_va + offset;
WARN_ON(!mem->cpu_va); WARN_ON(!mem->cpu_va);
@@ -180,7 +180,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
src, *dest, size); src, *dest, size);
#endif #endif
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM) {
u32 *dest_u32 = dest; u32 *dest_u32 = dest;
nvgpu_pramin_access_batched(g, mem, offset, size, nvgpu_pramin_access_batched(g, mem, offset, size,
@@ -210,7 +210,7 @@ static void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32
void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
{ {
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { if (mem->aperture == APERTURE_SYSMEM) {
u32 *ptr = mem->cpu_va; u32 *ptr = mem->cpu_va;
WARN_ON(!ptr); WARN_ON(!ptr);
@@ -218,7 +218,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
#endif #endif
ptr[w] = data; ptr[w] = data;
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM) {
u32 value = data; u32 value = data;
u32 *p = &value; u32 *p = &value;
@@ -243,7 +243,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
WARN_ON(offset & 3); WARN_ON(offset & 3);
WARN_ON(size & 3); WARN_ON(size & 3);
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { if (mem->aperture == APERTURE_SYSMEM) {
u8 *dest = (u8 *)mem->cpu_va + offset; u8 *dest = (u8 *)mem->cpu_va + offset;
WARN_ON(!mem->cpu_va); WARN_ON(!mem->cpu_va);
@@ -253,7 +253,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
dest, *src, size); dest, *src, size);
#endif #endif
memcpy(dest, src, size); memcpy(dest, src, size);
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM) {
u32 *src_u32 = src; u32 *src_u32 = src;
nvgpu_pramin_access_batched(g, mem, offset, size, nvgpu_pramin_access_batched(g, mem, offset, size,
@@ -290,7 +290,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
c &= 0xff; c &= 0xff;
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { if (mem->aperture == APERTURE_SYSMEM) {
u8 *dest = (u8 *)mem->cpu_va + offset; u8 *dest = (u8 *)mem->cpu_va + offset;
WARN_ON(!mem->cpu_va); WARN_ON(!mem->cpu_va);
@@ -300,7 +300,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
dest, c, size); dest, c, size);
#endif #endif
memset(dest, c, size); memset(dest, c, size);
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { } else if (mem->aperture == APERTURE_VIDMEM) {
u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
u32 *p = &repeat_value; u32 *p = &repeat_value;

View File

@@ -27,13 +27,6 @@
#include "gk20a/gk20a.h" #include "gk20a/gk20a.h"
/*
* Flip this to force all gk20a_mem* accesses via PRAMIN from the start of the
* boot, even for buffers that would work via cpu_va. In runtime, the flag is
* in debugfs, called "force_pramin".
*/
#define GK20A_FORCE_PRAMIN_DEFAULT false
/* /*
* The PRAMIN range is 1 MB, must change base addr if a buffer crosses that. * The PRAMIN range is 1 MB, must change base addr if a buffer crosses that.
* This same loop is used for read/write/memset. Offset and size in bytes. * This same loop is used for read/write/memset. Offset and size in bytes.
@@ -96,5 +89,4 @@ void nvgpu_init_pramin(struct mm_gk20a *mm)
{ {
mm->pramin_window = 0; mm->pramin_window = 0;
nvgpu_spinlock_init(&mm->pramin_window_lock); nvgpu_spinlock_init(&mm->pramin_window_lock);
mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT;
} }

View File

@@ -1124,7 +1124,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
goto clean_up_usermode; goto clean_up_usermode;
} }
if (c->gpfifo.mem.aperture == APERTURE_VIDMEM || g->mm.force_pramin) { if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
c->gpfifo.pipe = nvgpu_big_malloc(g, c->gpfifo.pipe = nvgpu_big_malloc(g,
gpfifo_size * gpfifo_entry_size); gpfifo_size * gpfifo_entry_size);
if (!c->gpfifo.pipe) { if (!c->gpfifo.pipe) {

View File

@@ -151,7 +151,6 @@ struct mm_gk20a {
u32 pramin_window; u32 pramin_window;
struct nvgpu_spinlock pramin_window_lock; struct nvgpu_spinlock pramin_window_lock;
bool force_pramin; /* via debugfs */
struct { struct {
size_t size; size_t size;