mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Begin reorganizing VM mapping/unmapping
Move vm_priv.h to <nvgpu/linux/vm.h> and rename nvgpu_vm_map() to nvgpu_vm_map_linux(). Also remove a redundant unmap function from the unmap path. These changes are the beginning of reworking the nvgpu Linux mapping and unmapping code. The rest of this patch is just the necessary changes to use the new map function naming and the new path to the Linux vm header. Patch Series Goal ----------------- There's two major goals for this patch series. Note that these goals are not achieved in this patch. There will be subsequent patches. 1. Remove all last vestiges of Linux code from common/mm/vm.c 2. Implement map caching in the common/mm/vm.c code To accomplish this firstly the VM mapping code needs to have the struct nvgpu_mapped_buf data struct be completely Linux free. That means implementing an abstraction for this to hold the Linux stuff that mapped buffers carry about (SGT, dma_buf). This is why the vm_priv.h code has been moved: it will need to be included by the <nvgpu/vm.h> header so that the OS specific struct can be pulled into struct nvgpu_mapped_buf. Next renaming the nvgpu_vm_map() to nvgpu_vm_map_linux() is in preparation for adding a new nvgpu_vm_map() that handles the map caching with nvgpu_mapped_buf. The mapping code is fairly straight forward: nvgpu_vm_map does OS generic stuff; each OS then calls this function from an nvgpu_vm_map_<OS>() or the like that does any OS specific adjustments/management. Freeing buffers is much more tricky however. The maps are all reference counted since userspace does not track buffers and expects us to handle this instead. Ugh! Since there's ref-counts the free code will require a callback into the OS specific code since the OS specific code cannot free a buffer directly. THis make's the path for freeing a buffer quite convoluted. JIRA NVGPU-30 JIRA NVGPU-71 Change-Id: I5e0975f60663a0d6cf0a6bd90e099f51e02c2395 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1578896 GVS: Gerrit_Virtual_Submit Reviewed-by: David Martinez Nieto <dmartineznie@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2a285d0607
commit
0c5d0c6a9e
@@ -31,6 +31,8 @@
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
|
||||
#include <nvgpu/linux/vm.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/channel_gk20a.h"
|
||||
#include "gk20a/mm_gk20a.h"
|
||||
@@ -44,12 +46,6 @@
|
||||
#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
|
||||
#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
|
||||
|
||||
/*
|
||||
* Currently this code uses nvgpu_vm_map() since it takes dmabuf FDs from the
|
||||
* CDE ioctls. That has to change - instead this needs to take an nvgpu_mem.
|
||||
*/
|
||||
#include "common/linux/vm_priv.h"
|
||||
|
||||
static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx);
|
||||
static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l);
|
||||
|
||||
@@ -1052,8 +1048,8 @@ __releases(&l->cde_app->mutex)
|
||||
|
||||
|
||||
/* map the destination buffer */
|
||||
get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map */
|
||||
map_vaddr = nvgpu_vm_map(cde_ctx->vm, compbits_scatter_buf, 0,
|
||||
get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
|
||||
map_vaddr = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
|
||||
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE |
|
||||
NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
|
||||
NV_KIND_INVALID,
|
||||
|
||||
@@ -21,13 +21,13 @@
|
||||
#include <nvgpu/comptags.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include <nvgpu/linux/vm.h>
|
||||
#include <nvgpu/linux/vidmem.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/platform_gk20a.h"
|
||||
|
||||
#include "dmabuf.h"
|
||||
#include "vm_priv.h"
|
||||
#include "os_linux.h"
|
||||
|
||||
static void gk20a_mm_delete_priv(void *_priv)
|
||||
|
||||
@@ -17,19 +17,19 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <nvgpu/log2.h>
|
||||
|
||||
#include <trace/events/gk20a.h>
|
||||
|
||||
#include <uapi/linux/nvgpu.h>
|
||||
|
||||
#include <nvgpu/gmmu.h>
|
||||
#include <nvgpu/vm_area.h>
|
||||
#include <nvgpu/log2.h>
|
||||
|
||||
#include <nvgpu/linux/vm.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/platform_gk20a.h"
|
||||
#include "ioctl_as.h"
|
||||
#include "vm_priv.h"
|
||||
#include "os_linux.h"
|
||||
|
||||
static int gk20a_as_ioctl_bind_channel(
|
||||
|
||||
@@ -28,7 +28,9 @@
|
||||
#include <nvgpu/vm.h>
|
||||
#include <nvgpu/atomic.h>
|
||||
#include <nvgpu/cond.h>
|
||||
|
||||
#include <nvgpu/linux/vidmem.h>
|
||||
#include <nvgpu/linux/vm.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/platform_gk20a.h"
|
||||
@@ -38,7 +40,6 @@
|
||||
#include "os_linux.h"
|
||||
#include "ioctl_dbg.h"
|
||||
|
||||
#include "vm_priv.h"
|
||||
|
||||
/* silly allocator - just increment id */
|
||||
static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
|
||||
|
||||
@@ -24,14 +24,13 @@
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/page_allocator.h>
|
||||
|
||||
#include <nvgpu/linux/vm.h>
|
||||
#include <nvgpu/linux/dma.h>
|
||||
#include <nvgpu/linux/vidmem.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/mm_gk20a.h"
|
||||
|
||||
#include "vm_priv.h"
|
||||
|
||||
bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
|
||||
{
|
||||
return !!(addr & 1ULL);
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <nvgpu/page_allocator.h>
|
||||
#include <nvgpu/vidmem.h>
|
||||
|
||||
#include <nvgpu/linux/vm.h>
|
||||
#include <nvgpu/linux/vidmem.h>
|
||||
#include <nvgpu/linux/nvgpu_mem.h>
|
||||
|
||||
@@ -33,7 +34,6 @@
|
||||
#include "gk20a/kind_gk20a.h"
|
||||
#include "gk20a/platform_gk20a.h"
|
||||
|
||||
#include "vm_priv.h"
|
||||
#include "os_linux.h"
|
||||
#include "dmabuf.h"
|
||||
|
||||
@@ -323,17 +323,17 @@ static int setup_bfr_kind_fields(struct buffer_attrs *bfr, s16 compr_kind,
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
struct dma_buf *dmabuf,
|
||||
u64 offset_align,
|
||||
u32 flags,
|
||||
s16 compr_kind,
|
||||
s16 incompr_kind,
|
||||
bool user_mapped,
|
||||
int rw_flag,
|
||||
u64 buffer_offset,
|
||||
u64 mapping_size,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
||||
struct dma_buf *dmabuf,
|
||||
u64 offset_align,
|
||||
u32 flags,
|
||||
s16 compr_kind,
|
||||
s16 incompr_kind,
|
||||
bool user_mapped,
|
||||
int rw_flag,
|
||||
u64 buffer_offset,
|
||||
u64 mapping_size,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
struct device *dev = dev_from_gk20a(g);
|
||||
@@ -625,12 +625,12 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
||||
return err;
|
||||
}
|
||||
|
||||
ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align,
|
||||
flags, compr_kind, incompr_kind, true,
|
||||
gk20a_mem_flag_none,
|
||||
buffer_offset,
|
||||
mapping_size,
|
||||
batch);
|
||||
ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
|
||||
flags, compr_kind, incompr_kind, true,
|
||||
gk20a_mem_flag_none,
|
||||
buffer_offset,
|
||||
mapping_size,
|
||||
batch);
|
||||
|
||||
*offset_align = ret_va;
|
||||
if (!ret_va) {
|
||||
@@ -641,21 +641,55 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset)
|
||||
int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
{
|
||||
struct gk20a *g = vm->mm->g;
|
||||
struct nvgpu_mapped_buf *mapped_buffer;
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
|
||||
mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
|
||||
if (!mapped_buffer) {
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
|
||||
struct nvgpu_timeout timeout;
|
||||
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
|
||||
nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
|
||||
NVGPU_TIMER_RETRY_TIMER);
|
||||
do {
|
||||
if (nvgpu_atomic_read(
|
||||
&mapped_buffer->ref.refcount) == 1)
|
||||
break;
|
||||
nvgpu_udelay(5);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"sync-unmap failed on 0x%llx"));
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
}
|
||||
|
||||
if (mapped_buffer->user_mapped == 0) {
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mapped_buffer->user_mapped--;
|
||||
if (mapped_buffer->user_mapped == 0)
|
||||
vm->num_user_mapped_buffers--;
|
||||
|
||||
vm->kref_put_batch = batch;
|
||||
nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
|
||||
vm->kref_put_batch = NULL;
|
||||
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NOTE! mapped_buffers lock must be held */
|
||||
@@ -691,6 +725,4 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
|
||||
dma_buf_put(mapped_buffer->dmabuf);
|
||||
|
||||
nvgpu_kfree(g, mapped_buffer);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -687,13 +687,6 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
|
||||
{
|
||||
struct nvgpu_mapped_buf *mapped_buffer =
|
||||
container_of(ref, struct nvgpu_mapped_buf, ref);
|
||||
nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
|
||||
}
|
||||
|
||||
void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
|
||||
struct nvgpu_mapped_buf **mapped_buffers,
|
||||
int num_buffers)
|
||||
@@ -719,14 +712,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
|
||||
nvgpu_big_free(vm->mm->g, mapped_buffers);
|
||||
}
|
||||
|
||||
static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
|
||||
{
|
||||
struct nvgpu_mapped_buf *mapped_buffer =
|
||||
container_of(ref, struct nvgpu_mapped_buf, ref);
|
||||
nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
|
||||
}
|
||||
|
||||
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset)
|
||||
{
|
||||
struct gk20a *g = vm->mm->g;
|
||||
struct nvgpu_mapped_buf *mapped_buffer;
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
|
||||
mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
|
||||
if (!mapped_buffer) {
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
@@ -734,44 +732,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
|
||||
return;
|
||||
}
|
||||
|
||||
if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
|
||||
struct nvgpu_timeout timeout;
|
||||
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
|
||||
nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
|
||||
NVGPU_TIMER_RETRY_TIMER);
|
||||
do {
|
||||
if (nvgpu_atomic_read(
|
||||
&mapped_buffer->ref.refcount) == 1)
|
||||
break;
|
||||
nvgpu_udelay(5);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"sync-unmap failed on 0x%llx"));
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
}
|
||||
|
||||
if (mapped_buffer->user_mapped == 0) {
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
|
||||
return;
|
||||
}
|
||||
|
||||
mapped_buffer->user_mapped--;
|
||||
if (mapped_buffer->user_mapped == 0)
|
||||
vm->num_user_mapped_buffers--;
|
||||
|
||||
vm->kref_put_batch = batch;
|
||||
nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
|
||||
vm->kref_put_batch = NULL;
|
||||
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
}
|
||||
|
||||
int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
{
|
||||
nvgpu_vm_unmap_user(vm, offset, batch);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -43,6 +43,13 @@
|
||||
#include <nvgpu/ltc.h>
|
||||
#include <nvgpu/barrier.h>
|
||||
|
||||
/*
|
||||
* This is required for nvgpu_vm_find_buf() which is used in the tracing
|
||||
* code. Once we can get and access userspace buffers without requiring
|
||||
* direct dma_buf usage this can be removed.
|
||||
*/
|
||||
#include <nvgpu/linux/vm.h>
|
||||
|
||||
#include "gk20a.h"
|
||||
#include "ctxsw_trace_gk20a.h"
|
||||
#include "dbg_gpu_gk20a.h"
|
||||
@@ -57,13 +64,6 @@
|
||||
*/
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* This is required for nvgpu_vm_find_buffer() which is used in the tracing
|
||||
* code. Once we can get and access userspace buffers without requiring
|
||||
* direct dma_buf usage this can be removed.
|
||||
*/
|
||||
#include "common/linux/vm_priv.h"
|
||||
|
||||
/*
|
||||
* Although channels do have pointers back to the gk20a struct that they were
|
||||
* created under in cases where the driver is killed that pointer can be bad.
|
||||
|
||||
@@ -24,6 +24,7 @@ struct dma_buf;
|
||||
|
||||
struct vm_gk20a;
|
||||
struct vm_gk20a_mapping_batch;
|
||||
struct nvgpu_vm_area;
|
||||
|
||||
struct buffer_attrs {
|
||||
struct sg_table *sgt;
|
||||
@@ -40,30 +41,30 @@ struct buffer_attrs {
|
||||
bool ctag_user_mappable;
|
||||
};
|
||||
|
||||
u64 nvgpu_vm_map(struct vm_gk20a *vm,
|
||||
struct dma_buf *dmabuf,
|
||||
u64 offset_align,
|
||||
u32 flags,
|
||||
u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
||||
struct dma_buf *dmabuf,
|
||||
u64 offset_align,
|
||||
u32 flags,
|
||||
|
||||
/*
|
||||
* compressible kind if
|
||||
* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
|
||||
* specified, otherwise just the kind
|
||||
*/
|
||||
s16 compr_kind,
|
||||
/*
|
||||
* compressible kind if
|
||||
* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
|
||||
* specified, otherwise just the kind
|
||||
*/
|
||||
s16 compr_kind,
|
||||
|
||||
/*
|
||||
* incompressible kind if
|
||||
* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
|
||||
* specified, otherwise ignored
|
||||
*/
|
||||
s16 incompr_kind,
|
||||
/*
|
||||
* incompressible kind if
|
||||
* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
|
||||
* specified, otherwise ignored
|
||||
*/
|
||||
s16 incompr_kind,
|
||||
|
||||
bool user_mapped,
|
||||
int rw_flag,
|
||||
u64 buffer_offset,
|
||||
u64 mapping_size,
|
||||
struct vm_gk20a_mapping_batch *mapping_batch);
|
||||
bool user_mapped,
|
||||
int rw_flag,
|
||||
u64 buffer_offset,
|
||||
u64 mapping_size,
|
||||
struct vm_gk20a_mapping_batch *mapping_batch);
|
||||
|
||||
/*
|
||||
* Notes:
|
||||
@@ -85,7 +86,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
||||
u64 mapping_size,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
|
||||
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset);
|
||||
/* Note: batch may be NULL if unmap op is not part of a batch */
|
||||
int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
|
||||
/* find buffer corresponding to va */
|
||||
int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
|
||||
@@ -207,14 +207,12 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
|
||||
struct nvgpu_mapped_buf **mapped_buffers,
|
||||
int num_buffers);
|
||||
|
||||
/* Note: batch may be NULL if unmap op is not part of a batch */
|
||||
int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
|
||||
void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
|
||||
struct vm_gk20a_mapping_batch *batch);
|
||||
void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref);
|
||||
|
||||
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset);
|
||||
|
||||
/*
|
||||
* These all require the VM update lock to be held.
|
||||
*/
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
|
||||
#include <nvgpu/vgpu/vm.h>
|
||||
|
||||
#include <nvgpu/linux/vm.h>
|
||||
#include <nvgpu/linux/nvgpu_mem.h>
|
||||
|
||||
#include "vgpu/vgpu.h"
|
||||
@@ -39,8 +40,6 @@
|
||||
#include "gk20a/mm_gk20a.h"
|
||||
#include "gm20b/mm_gm20b.h"
|
||||
|
||||
#include "common/linux/vm_priv.h"
|
||||
|
||||
static int vgpu_init_mm_setup_sw(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
|
||||
Reference in New Issue
Block a user