Revert "gpu: nvgpu: cache cde compbits buf mappings"

This reverts commit 9968badd26490a9d399f526fc57a9defd161dd6c. The commit
accidentally introduced some memory leaks.

Change-Id: I00d8d4452a152a8a2fe2d90fb949cdfee0de4c69
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: http://git-master/r/714288
Reviewed-by: Juha Tukkinen <jtukkinen@nvidia.com>
This commit is contained in:
Konsta Holtta
2015-03-05 13:18:30 +02:00
committed by Dan Willemsen
parent 325e0587d9
commit 5f6cc1289e
3 changed files with 15 additions and 91 deletions

View File

@@ -1,7 +1,7 @@
/*
* Color decompression engine support
*
* Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -979,23 +979,17 @@ __releases(&cde_app->mutex)
map_size = compbits_buf->size - map_offset;
}
/* map the destination buffer, if not cached yet */
/* races protected by the cde app mutex above */
map_vaddr = gk20a_vm_cde_mapped(cde_ctx->vm, compbits_buf);
/* map the destination buffer */
get_dma_buf(compbits_buf); /* a ref for gk20a_vm_map */
map_vaddr = gk20a_vm_map(cde_ctx->vm, compbits_buf, 0,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
compbits_kind, NULL, true,
gk20a_mem_flag_none,
map_offset, map_size);
if (!map_vaddr) {
/* take a ref for gk20a_vm_map, pair is in (cached) unmap */
get_dma_buf(compbits_buf);
map_vaddr = gk20a_vm_map(cde_ctx->vm, compbits_buf, 0,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
compbits_kind, NULL, true,
gk20a_mem_flag_none,
map_offset, map_size);
if (!map_vaddr) {
dma_buf_put(compbits_buf);
err = -EINVAL;
goto exit_unlock;
}
gk20a_vm_mark_cde_mapped(cde_ctx->vm, compbits_buf, map_vaddr);
dma_buf_put(compbits_buf);
err = -EINVAL;
goto exit_unlock;
}
/* store source buffer compression tags */
@@ -1058,7 +1052,9 @@ __releases(&cde_app->mutex)
exit_unlock:
/* leave map_vaddr mapped - released when unmapped from userspace */
/* unmap the buffers - channel holds references to them now */
if (map_vaddr)
gk20a_vm_unmap(cde_ctx->vm, map_vaddr);
mutex_unlock(&g->cde_app.mutex);
return err;

View File

@@ -117,11 +117,6 @@ struct gk20a_dmabuf_priv {
int pin_count;
struct list_head states;
/* cached cde compbits buf */
struct vm_gk20a *cde_vm;
u64 cde_map_vaddr;
int map_count;
};
static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm);
@@ -203,60 +198,6 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
mutex_unlock(&priv->lock);
}
/* CDE compbits buf caching: keep compbit buffer mapped during user mappings.
* Call these four only after dma_buf has a drvdata allocated */
u64 gk20a_vm_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf)
{
struct device *dev = dev_from_vm(vm);
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
u64 map_vaddr;
mutex_lock(&priv->lock);
map_vaddr = priv->cde_map_vaddr;
mutex_unlock(&priv->lock);
return map_vaddr;
}
void gk20a_vm_mark_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf,
u64 map_vaddr)
{
struct device *dev = dev_from_vm(vm);
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
mutex_lock(&priv->lock);
priv->cde_vm = vm;
priv->cde_map_vaddr = map_vaddr;
mutex_unlock(&priv->lock);
}
static void gk20a_vm_inc_maps(struct vm_gk20a *vm, struct dma_buf *dmabuf)
{
struct device *dev = dev_from_vm(vm);
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
mutex_lock(&priv->lock);
priv->map_count++;
mutex_unlock(&priv->lock);
}
static void gk20a_vm_dec_maps(struct vm_gk20a *vm, struct dma_buf *dmabuf,
struct vm_gk20a **cde_vm, u64 *cde_map_vaddr)
{
struct device *dev = dev_from_vm(vm);
struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
mutex_lock(&priv->lock);
if (--priv->map_count == 0) {
*cde_vm = priv->cde_vm;
*cde_map_vaddr = priv->cde_map_vaddr;
priv->cde_vm = NULL;
priv->cde_map_vaddr = 0;
}
mutex_unlock(&priv->lock);
}
void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
struct gk20a_comptags *comptags)
{
@@ -809,8 +750,6 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
struct device *d = dev_from_vm(vm);
int retries;
struct mapped_buffer_node *mapped_buffer;
struct vm_gk20a *cde_vm = NULL;
u64 cde_map_vaddr = 0;
mutex_lock(&vm->update_gmmu_lock);
@@ -843,15 +782,9 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
mapped_buffer->user_mapped--;
if (mapped_buffer->user_mapped == 0)
vm->num_user_mapped_buffers--;
gk20a_vm_dec_maps(vm, mapped_buffer->dmabuf, &cde_vm, &cde_map_vaddr);
kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
mutex_unlock(&vm->update_gmmu_lock);
if (cde_map_vaddr)
gk20a_vm_unmap(cde_vm, cde_map_vaddr);
}
u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
@@ -2665,9 +2598,7 @@ int gk20a_vm_map_buffer(struct vm_gk20a *vm,
mapping_size);
*offset_align = ret_va;
if (ret_va) {
gk20a_vm_inc_maps(vm, dmabuf);
} else {
if (!ret_va) {
dma_buf_put(dmabuf);
err = -EINVAL;
}

View File

@@ -557,9 +557,6 @@ void gk20a_deinit_vm(struct vm_gk20a *vm);
int gk20a_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset);
void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
struct gk20a_comptags *comptags);
u64 gk20a_vm_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf);
void gk20a_vm_mark_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf,
u64 map_vaddr);
dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr);
int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev);