Files
linux-nvgpu/drivers/gpu/nvgpu/common/linux/vm_priv.h
Alex Waterman 29cc82844e gpu: nvgpu: Split vm_area management into vm code
The vm_reserve_va_node struct is essentially a special VM area that
can be used for sparse mappings and fixed mappings. The name of this
struct is somewhat confusing (as node is typically used for list
items). Though this struct is a part of a list it doesn't really
make sense to call this a list item since it's much more. Based on
that the struct has been renamed to nvgpu_vm_area to capture the
actual use of the struct more accurately.

This also moves all of the management code of vm areas to a new file
devoted solely to vm_area management.

Also add a brief overview of the VM architecture. This should help
other people follow along the hierachy of ownership and lifetimes in
the rather complex MM code.

JIRA NVGPU-12
JIRA NVGPU-30

Change-Id: If85e1cf868031d0dc265e7bed50b58a2aed2602e
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1477744
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2017-05-19 15:34:12 -07:00

94 lines
2.5 KiB
C

/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __COMMON_LINUX_VM_PRIV_H__
#define __COMMON_LINUX_VM_PRIV_H__
#include <nvgpu/types.h>
struct sg_table;
struct dma_buf;
struct vm_gk20a;
struct vm_gk20a_mapping_batch;
struct buffer_attrs {
struct sg_table *sgt;
u64 size;
u64 align;
u32 ctag_offset;
u32 ctag_lines;
u32 ctag_allocated_lines;
int pgsz_idx;
u8 kind_v;
u8 uc_kind_v;
bool ctag_user_mappable;
};
u64 nvgpu_vm_map(struct vm_gk20a *vm,
struct dma_buf *dmabuf,
u64 offset_align,
u32 flags,
int kind,
bool user_mapped,
int rw_flag,
u64 buffer_offset,
u64 mapping_size,
struct vm_gk20a_mapping_batch *mapping_batch);
int nvgpu_vm_map_compbits(struct vm_gk20a *vm,
u64 mapping_gva,
u64 *compbits_win_gva,
u64 *mapping_iova,
u32 flags);
/* Note: batch may be NULL if map op is not part of a batch */
int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
int dmabuf_fd,
u64 *offset_align,
u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */
int kind,
u64 buffer_offset,
u64 mapping_size,
struct vm_gk20a_mapping_batch *batch);
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset);
/* find buffer corresponding to va */
int nvgpu_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
struct dma_buf **dmabuf,
u64 *offset);
enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
struct dma_buf *dmabuf);
int validate_fixed_buffer(struct vm_gk20a *vm,
struct buffer_attrs *bfr,
u64 map_offset, u64 map_size,
struct nvgpu_vm_area **pva_node);
int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
u32 flags,
struct buffer_attrs *bfr,
enum gmmu_pgsz_gk20a pgsz_idx);
int gk20a_alloc_comptags(struct gk20a *g,
struct device *dev,
struct dma_buf *dmabuf,
struct gk20a_comptag_allocator *allocator,
u32 lines, bool user_mappable,
u64 *ctag_map_win_size,
u32 *ctag_map_win_ctagline);
#endif