mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: remove broken force_pramin feature
The forced PRAMIN reads and writes for sysmem buffers haven't worked in a while since the PRAMIN access code was refactored to work with vidmem-only sgt allocs. This feature was only ever meant for testing and debugging PRAMIN access and early dGPU support, but that is stable enough now so just delete the broken feature instead of fixing it. Change-Id: Ib31dae4550f3b6fea3c426a2e4ad126864bf85d2 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1723725 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
d914e662ae
commit
2788943d38
@@ -692,10 +692,6 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
|
||||
* This path (from userspace to sysmem) is special in order to
|
||||
* avoid two copies unnecessarily (from user to pipe, then from
|
||||
* pipe to gpu sysmem buffer).
|
||||
*
|
||||
* As a special case, the pipe buffer exists if PRAMIN writes
|
||||
* are forced, although the buffers may not be in vidmem in
|
||||
* that case.
|
||||
*/
|
||||
if (end > gpfifo_size) {
|
||||
/* wrap-around */
|
||||
@@ -723,8 +719,7 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
|
||||
0, num_entries);
|
||||
goto out;
|
||||
} else if (user_gpfifo) {
|
||||
/* from userspace to vidmem or sysmem when pramin forced, use
|
||||
* the common copy path below */
|
||||
/* from userspace to vidmem, use the common copy path below */
|
||||
err = copy_from_user(c->gpfifo.pipe, user_gpfifo, len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -19,8 +19,4 @@
|
||||
|
||||
void gk20a_mm_debugfs_init(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
|
||||
debugfs_create_bool("force_pramin", 0664, l->debugfs,
|
||||
&g->mm.force_pramin);
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
PAGE_KERNEL :
|
||||
pgprot_writecombine(PAGE_KERNEL);
|
||||
|
||||
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
|
||||
if (mem->aperture != APERTURE_SYSMEM)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@@ -91,7 +91,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
|
||||
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
|
||||
if (mem->aperture != APERTURE_SYSMEM)
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -134,7 +134,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
|
||||
{
|
||||
u32 data = 0;
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u32 *ptr = mem->cpu_va;
|
||||
|
||||
WARN_ON(!ptr);
|
||||
@@ -142,7 +142,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
|
||||
#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
|
||||
#endif
|
||||
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 value;
|
||||
u32 *p = &value;
|
||||
|
||||
@@ -170,7 +170,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
WARN_ON(offset & 3);
|
||||
WARN_ON(size & 3);
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u8 *src = (u8 *)mem->cpu_va + offset;
|
||||
|
||||
WARN_ON(!mem->cpu_va);
|
||||
@@ -180,7 +180,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
|
||||
src, *dest, size);
|
||||
#endif
|
||||
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 *dest_u32 = dest;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, offset, size,
|
||||
@@ -210,7 +210,7 @@ static void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32
|
||||
|
||||
void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
|
||||
{
|
||||
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u32 *ptr = mem->cpu_va;
|
||||
|
||||
WARN_ON(!ptr);
|
||||
@@ -218,7 +218,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
|
||||
nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
|
||||
#endif
|
||||
ptr[w] = data;
|
||||
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 value = data;
|
||||
u32 *p = &value;
|
||||
|
||||
@@ -243,7 +243,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
WARN_ON(offset & 3);
|
||||
WARN_ON(size & 3);
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u8 *dest = (u8 *)mem->cpu_va + offset;
|
||||
|
||||
WARN_ON(!mem->cpu_va);
|
||||
@@ -253,7 +253,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
dest, *src, size);
|
||||
#endif
|
||||
memcpy(dest, src, size);
|
||||
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 *src_u32 = src;
|
||||
|
||||
nvgpu_pramin_access_batched(g, mem, offset, size,
|
||||
@@ -290,7 +290,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
|
||||
c &= 0xff;
|
||||
|
||||
if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
|
||||
if (mem->aperture == APERTURE_SYSMEM) {
|
||||
u8 *dest = (u8 *)mem->cpu_va + offset;
|
||||
|
||||
WARN_ON(!mem->cpu_va);
|
||||
@@ -300,7 +300,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
dest, c, size);
|
||||
#endif
|
||||
memset(dest, c, size);
|
||||
} else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
|
||||
} else if (mem->aperture == APERTURE_VIDMEM) {
|
||||
u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
|
||||
u32 *p = &repeat_value;
|
||||
|
||||
|
||||
@@ -27,13 +27,6 @@
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
/*
|
||||
* Flip this to force all gk20a_mem* accesses via PRAMIN from the start of the
|
||||
* boot, even for buffers that would work via cpu_va. In runtime, the flag is
|
||||
* in debugfs, called "force_pramin".
|
||||
*/
|
||||
#define GK20A_FORCE_PRAMIN_DEFAULT false
|
||||
|
||||
/*
|
||||
* The PRAMIN range is 1 MB, must change base addr if a buffer crosses that.
|
||||
* This same loop is used for read/write/memset. Offset and size in bytes.
|
||||
@@ -96,5 +89,4 @@ void nvgpu_init_pramin(struct mm_gk20a *mm)
|
||||
{
|
||||
mm->pramin_window = 0;
|
||||
nvgpu_spinlock_init(&mm->pramin_window_lock);
|
||||
mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT;
|
||||
}
|
||||
|
||||
@@ -1124,7 +1124,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
|
||||
goto clean_up_usermode;
|
||||
}
|
||||
|
||||
if (c->gpfifo.mem.aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
|
||||
if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
|
||||
c->gpfifo.pipe = nvgpu_big_malloc(g,
|
||||
gpfifo_size * gpfifo_entry_size);
|
||||
if (!c->gpfifo.pipe) {
|
||||
|
||||
@@ -151,7 +151,6 @@ struct mm_gk20a {
|
||||
|
||||
u32 pramin_window;
|
||||
struct nvgpu_spinlock pramin_window_lock;
|
||||
bool force_pramin; /* via debugfs */
|
||||
|
||||
struct {
|
||||
size_t size;
|
||||
|
||||
Reference in New Issue
Block a user