gpu: nvgpu: Remove user_mapped from mapped_buf

Remove the always true field 'user_mapped' from the mapped_buf
struct. All mapped_bufs are mapped by a user request since they
always originate from a dma_buf (for Linux, that is). As such
there is a fair amount of logic that could be deleted.

Linux specific: the own_mem_ref field was also be deleted. The
logic of only storing a dma_buf ref when the buffer is mapped
for the first time by a user is easy: when the mapped buffer is
found in the map cache release the outstanding dma_buf ref taken
earlier on in the map path. If the map cache does not have the
buffer simply let the higher level map code keep the dma_buf ref.

The dma_buf ref is released when the nvgpu_vm_unmap_system()
call-back is called by the unmap path.

JIRA NVGPU-30
JIRA NVGPU-71

Change-Id: I229d136713812a7332bdadd5ebacd85d983bbbf0
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1583983
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-10-17 15:54:31 -07:00
committed by mobile promotions
parent d13c256d5e
commit a8bd154f79
5 changed files with 16 additions and 36 deletions

View File

@@ -1050,7 +1050,6 @@ __releases(&l->cde_app->mutex)
NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL, NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
NV_KIND_INVALID, NV_KIND_INVALID,
compbits_kind, /* incompressible kind */ compbits_kind, /* incompressible kind */
true,
gk20a_mem_flag_none, gk20a_mem_flag_none,
map_offset, map_size, map_offset, map_size,
NULL); NULL);

View File

@@ -227,7 +227,6 @@ static u64 __nvgpu_vm_find_mapping(struct vm_gk20a *vm,
u64 offset_align, u64 offset_align,
u32 flags, u32 flags,
int kind, int kind,
bool user_mapped,
int rw_flag) int rw_flag)
{ {
struct gk20a *g = gk20a_from_vm(vm); struct gk20a *g = gk20a_from_vm(vm);
@@ -251,22 +250,13 @@ static u64 __nvgpu_vm_find_mapping(struct vm_gk20a *vm,
if (mapped_buffer->flags != flags) if (mapped_buffer->flags != flags)
return 0; return 0;
/* mark the buffer as used */ /*
if (user_mapped) { * If we find the mapping here then that means we have mapped it already
if (mapped_buffer->user_mapped == 0) * and already have a dma_buf ref to the underlying buffer. As such
vm->num_user_mapped_buffers++; * release the ref taken earlier in the map path.
mapped_buffer->user_mapped++; */
dma_buf_put(mapped_buffer->dmabuf);
/* If the mapping comes from user space, we own
* the handle ref. Since we reuse an
* existing mapping here, we need to give back those
* refs once in order not to leak.
*/
if (mapped_buffer->own_mem_ref)
dma_buf_put(mapped_buffer->dmabuf);
else
mapped_buffer->own_mem_ref = true;
}
nvgpu_ref_get(&mapped_buffer->ref); nvgpu_ref_get(&mapped_buffer->ref);
nvgpu_log(g, gpu_dbg_map, nvgpu_log(g, gpu_dbg_map,
@@ -329,7 +319,6 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
u32 flags, u32 flags,
s16 compr_kind, s16 compr_kind,
s16 incompr_kind, s16 incompr_kind,
bool user_mapped,
int rw_flag, int rw_flag,
u64 buffer_offset, u64 buffer_offset,
u64 mapping_size, u64 mapping_size,
@@ -367,7 +356,7 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
map_key_kind = compr_kind; map_key_kind = compr_kind;
} }
if (user_mapped && vm->userspace_managed && if (vm->userspace_managed &&
!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
nvgpu_err(g, "non-fixed-offset mapping not available on " nvgpu_err(g, "non-fixed-offset mapping not available on "
"userspace managed address spaces"); "userspace managed address spaces");
@@ -380,8 +369,7 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
if (!vm->userspace_managed) { if (!vm->userspace_managed) {
map_offset = __nvgpu_vm_find_mapping( map_offset = __nvgpu_vm_find_mapping(
vm, dmabuf, offset_align, vm, dmabuf, offset_align,
flags, map_key_kind, flags, map_key_kind, rw_flag);
user_mapped, rw_flag);
if (map_offset) { if (map_offset) {
nvgpu_mutex_release(&vm->update_gmmu_lock); nvgpu_mutex_release(&vm->update_gmmu_lock);
return map_offset; return map_offset;
@@ -545,8 +533,6 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
mapped_buffer->flags = flags; mapped_buffer->flags = flags;
mapped_buffer->kind = map_key_kind; mapped_buffer->kind = map_key_kind;
mapped_buffer->va_allocated = va_allocated; mapped_buffer->va_allocated = va_allocated;
mapped_buffer->user_mapped = user_mapped ? 1 : 0;
mapped_buffer->own_mem_ref = user_mapped;
nvgpu_init_list_node(&mapped_buffer->buffer_list); nvgpu_init_list_node(&mapped_buffer->buffer_list);
nvgpu_ref_init(&mapped_buffer->ref); nvgpu_ref_init(&mapped_buffer->ref);
@@ -555,8 +541,8 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
nvgpu_err(g, "failed to insert into mapped buffer tree"); nvgpu_err(g, "failed to insert into mapped buffer tree");
goto clean_up; goto clean_up;
} }
if (user_mapped)
vm->num_user_mapped_buffers++; vm->num_user_mapped_buffers++;
if (vm_area) { if (vm_area) {
nvgpu_list_add_tail(&mapped_buffer->buffer_list, nvgpu_list_add_tail(&mapped_buffer->buffer_list,
@@ -626,7 +612,7 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
} }
ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align, ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
flags, compr_kind, incompr_kind, true, flags, compr_kind, incompr_kind,
gk20a_mem_flag_none, gk20a_mem_flag_none,
buffer_offset, buffer_offset,
mapping_size, mapping_size,
@@ -655,6 +641,5 @@ void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer)
gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf, gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf,
mapped_buffer->sgt); mapped_buffer->sgt);
if (mapped_buffer->own_mem_ref) dma_buf_put(mapped_buffer->dmabuf);
dma_buf_put(mapped_buffer->dmabuf);
} }

View File

@@ -672,11 +672,9 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
while (node) { while (node) {
mapped_buffer = mapped_buffer_from_rbtree_node(node); mapped_buffer = mapped_buffer_from_rbtree_node(node);
if (mapped_buffer->user_mapped) { buffer_list[i] = mapped_buffer;
buffer_list[i] = mapped_buffer; nvgpu_ref_get(&mapped_buffer->ref);
nvgpu_ref_get(&mapped_buffer->ref); i++;
i++;
}
nvgpu_rbtree_enum_next(&node, node); nvgpu_rbtree_enum_next(&node, node);
} }

View File

@@ -72,7 +72,6 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
*/ */
s16 incompr_kind, s16 incompr_kind,
bool user_mapped,
int rw_flag, int rw_flag,
u64 buffer_offset, u64 buffer_offset,
u64 mapping_size, u64 mapping_size,

View File

@@ -96,8 +96,7 @@ struct nvgpu_mapped_buf {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct sg_table *sgt; struct sg_table *sgt;
struct nvgpu_ref ref; struct nvgpu_ref ref;
u32 user_mapped;
bool own_mem_ref;
u32 pgsz_idx; u32 pgsz_idx;
u32 ctag_offset; u32 ctag_offset;
u32 ctag_lines; u32 ctag_lines;