mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
This patch begins splitting out the VM implementation from mm_gk20a.c and moves it to common/linux/vm.c and common/mm/vm.c. This split is necessary because the VM code has two portions: first, an interface for the OS specific code to use (i.e userspace mappings), and second, a set of APIs for the driver to use (init, cleanup, etc) which are not OS specific. This is only the beginning of the split - there's still a lot of things that need to be carefully moved around. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I3b57cba245d7daf9e4326a143b9c6217e0f28c96 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477743 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
95 lines
2.6 KiB
C
95 lines
2.6 KiB
C
/*
|
|
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef __COMMON_LINUX_VM_PRIV_H__
|
|
#define __COMMON_LINUX_VM_PRIV_H__
|
|
|
|
#include <nvgpu/types.h>
|
|
|
|
struct sg_table;
|
|
struct dma_buf;
|
|
|
|
struct vm_gk20a;
|
|
struct vm_gk20a_mapping_batch;
|
|
|
|
struct buffer_attrs {
|
|
struct sg_table *sgt;
|
|
u64 size;
|
|
u64 align;
|
|
u32 ctag_offset;
|
|
u32 ctag_lines;
|
|
u32 ctag_allocated_lines;
|
|
int pgsz_idx;
|
|
u8 kind_v;
|
|
u8 uc_kind_v;
|
|
bool ctag_user_mappable;
|
|
};
|
|
|
|
u64 nvgpu_vm_map(struct vm_gk20a *vm,
|
|
struct dma_buf *dmabuf,
|
|
u64 offset_align,
|
|
u32 flags,
|
|
int kind,
|
|
bool user_mapped,
|
|
int rw_flag,
|
|
u64 buffer_offset,
|
|
u64 mapping_size,
|
|
struct vm_gk20a_mapping_batch *mapping_batch);
|
|
|
|
int nvgpu_vm_map_compbits(struct vm_gk20a *vm,
|
|
u64 mapping_gva,
|
|
u64 *compbits_win_gva,
|
|
u64 *mapping_iova,
|
|
u32 flags);
|
|
|
|
/* Note: batch may be NULL if map op is not part of a batch */
|
|
int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
|
int dmabuf_fd,
|
|
u64 *offset_align,
|
|
u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */
|
|
int kind,
|
|
u64 buffer_offset,
|
|
u64 mapping_size,
|
|
struct vm_gk20a_mapping_batch *batch);
|
|
|
|
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset);
|
|
|
|
/* find buffer corresponding to va */
|
|
int nvgpu_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
|
|
struct dma_buf **dmabuf,
|
|
u64 *offset);
|
|
|
|
enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
|
|
struct dma_buf *dmabuf);
|
|
int validate_fixed_buffer(struct vm_gk20a *vm,
|
|
struct buffer_attrs *bfr,
|
|
u64 map_offset, u64 map_size,
|
|
struct vm_reserved_va_node **pva_node);
|
|
int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
|
|
u32 flags,
|
|
struct buffer_attrs *bfr,
|
|
enum gmmu_pgsz_gk20a pgsz_idx);
|
|
int gk20a_alloc_comptags(struct gk20a *g,
|
|
struct device *dev,
|
|
struct dma_buf *dmabuf,
|
|
struct gk20a_comptag_allocator *allocator,
|
|
u32 lines, bool user_mappable,
|
|
u64 *ctag_map_win_size,
|
|
u32 *ctag_map_win_ctagline);
|
|
void gk20a_vm_unmap_locked_kref(struct kref *ref);
|
|
|
|
#endif
|