gpu: nvgpu: VM unmap refactoring

Re-organize the unmap code to be better split between OS specific
requirements and common core requirements. The new code flow works
as follows:

  nvgpu_vm_unmap()

Is the primary entrance to the unmap path. It takes a VM and a GPU
virtual address to unmap. There's also an optional batch mapping
struct.

This function is responsible for making sure there is a real buffer
and that if it's being called on a fixed mapping then the mapping
will definitely be freed (since buffers are ref-counted). Then this
function decrements the ref-count and returns.

If the ref-count hits zero then __nvgpu_vm_unmap_ref() is called
which just calls __nvgpu_vm_unmap() with the relevant batch struct
if present. This is where the real work is done. __nvgpu_vm_unmap()
clears the GMMU mapping, removes the mapped buffer from the various
lists and trees it may be in and then calls the
nvgpu_vm_unmap_system() function. This function handles any OS
specific stuff and must be defined by all VM OS implementations.

There's a a short cut used by some other core VM code to free
mappings without going through nvgpu_vm_map(). Mostly they just
directly decrement the mapping ref-count which can then call
__nvgpu_vm_unmap_ref() if the ref-count hits zero.

JIRA NVGPU-30
JIRA NVGPU-71

Change-Id: Ic626d37ab936819841bab45214f027b40ffa4e5a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1583982
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-10-17 10:55:00 -07:00
committed by mobile promotions
parent a37cec19f0
commit d13c256d5e
8 changed files with 136 additions and 106 deletions

View File

@@ -641,88 +641,20 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
return err;
}
int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
struct vm_gk20a_mapping_batch *batch)
{
struct gk20a *g = vm->mm->g;
struct nvgpu_mapped_buf *mapped_buffer;
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
if (!mapped_buffer) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
return 0;
}
if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
struct nvgpu_timeout timeout;
nvgpu_mutex_release(&vm->update_gmmu_lock);
nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
NVGPU_TIMER_RETRY_TIMER);
do {
if (nvgpu_atomic_read(
&mapped_buffer->ref.refcount) == 1)
break;
nvgpu_udelay(5);
} while (!nvgpu_timeout_expired_msg(&timeout,
"sync-unmap failed on 0x%llx"));
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
}
if (mapped_buffer->user_mapped == 0) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
return 0;
}
mapped_buffer->user_mapped--;
if (mapped_buffer->user_mapped == 0)
vm->num_user_mapped_buffers--;
vm->kref_put_batch = batch;
nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
vm->kref_put_batch = NULL;
nvgpu_mutex_release(&vm->update_gmmu_lock);
return 0;
}
/* NOTE! mapped_buffers lock must be held */
void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
struct vm_gk20a_mapping_batch *batch)
/*
* This is the function call-back for freeing OS specific components of an
* nvgpu_mapped_buf. This should most likely never be called outside of the
* core MM framework!
*
* Note: the VM lock will be held.
*/
void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer)
{
struct vm_gk20a *vm = mapped_buffer->vm;
struct gk20a *g = vm->mm->g;
g->ops.mm.gmmu_unmap(vm,
mapped_buffer->addr,
mapped_buffer->size,
mapped_buffer->pgsz_idx,
mapped_buffer->va_allocated,
gk20a_mem_flag_none,
mapped_buffer->vm_area ?
mapped_buffer->vm_area->sparse : false,
batch);
gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf,
mapped_buffer->sgt);
/* remove from mapped buffer tree and remove list, free */
nvgpu_remove_mapped_buf(vm, mapped_buffer);
if (!nvgpu_list_empty(&mapped_buffer->buffer_list))
nvgpu_list_del(&mapped_buffer->buffer_list);
/* keep track of mapped buffers */
if (mapped_buffer->user_mapped)
vm->num_user_mapped_buffers--;
if (mapped_buffer->own_mem_ref)
dma_buf_put(mapped_buffer->dmabuf);
nvgpu_kfree(g, mapped_buffer);
}