Revert "gpu: nvgpu: fix patch buf count update for vidmem"

This reverts commit de399ccb00.

Bug 2012077

Change-Id: Ie608c3b41aa91f9aaed3fad240ed882a0c6f1ea2
Signed-off-by: Timo Alho <talho@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1591423
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Timo Alho
2017-11-03 05:29:12 -07:00
committed by Jani Uusi-Rantala
parent de399ccb00
commit fe1e09d473
3 changed files with 8 additions and 25 deletions

View File

@@ -60,14 +60,6 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
{
void *cpu_va;
if (WARN_ON(mem->cpu_accessible)) {
nvgpu_warn(g, "nested");
return -EBUSY;
}
/* flag that the intent is to allow CPU access to the memory. */
mem->cpu_accessible = true;
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
return 0;
@@ -79,14 +71,17 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
return 0;
if (WARN_ON(mem->cpu_va)) {
nvgpu_warn(g, "nested");
return -EBUSY;
}
cpu_va = vmap(mem->priv.pages,
PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
0, pgprot_writecombine(PAGE_KERNEL));
if (WARN_ON(!cpu_va)) {
mem->cpu_accessible = false;
if (WARN_ON(!cpu_va))
return -ENOMEM;
}
mem->cpu_va = cpu_va;
return 0;
@@ -94,8 +89,6 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
{
mem->cpu_accessible = false;
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
return;

View File

@@ -682,7 +682,7 @@ int gr_gk20a_ctx_patch_write_begin(struct gk20a *g,
if (err)
return err;
if (nvgpu_mem_cpu_accessible(&ch_ctx->gr_ctx->mem)) {
if (ch_ctx->gr_ctx->mem.cpu_va) {
/* reset patch count if ucode has already processed it */
ch_ctx->patch_ctx.data_count = nvgpu_mem_rd(g,
&ch_ctx->gr_ctx->mem,
@@ -699,7 +699,7 @@ void gr_gk20a_ctx_patch_write_end(struct gk20a *g,
nvgpu_mem_end(g, &ch_ctx->patch_ctx.mem);
/* Write context count to context image if it is mapped */
if (nvgpu_mem_cpu_accessible(&ch_ctx->gr_ctx->mem)) {
if (ch_ctx->gr_ctx->mem.cpu_va) {
nvgpu_mem_wr(g, &ch_ctx->gr_ctx->mem,
ctxsw_prog_main_image_patch_count_o(),
ch_ctx->patch_ctx.data_count);

View File

@@ -122,7 +122,6 @@ struct nvgpu_mem {
size_t aligned_size;
u64 gpu_va;
bool skip_wmb;
bool cpu_accessible;
/*
* Set when a nvgpu_mem struct is not a "real" nvgpu_mem struct. Instead
@@ -211,15 +210,6 @@ static inline bool nvgpu_mem_is_valid(struct nvgpu_mem *mem)
}
/*
* Returns true if the passed nvgpu_mem can be accessed by the CPU by virtue
* of having called nvgpu_mem_begin successfully.
*/
static inline bool nvgpu_mem_cpu_accessible(struct nvgpu_mem *mem)
{
return mem->cpu_accessible;
}
/*
* Create a nvgpu_sgt of the default implementation
*/