Files
linux-nvgpu/drivers/gpu/nvgpu/common/mm/gmmu.c
Alex Waterman c3fa78b1d9 gpu: nvgpu: Separate GMMU out of mm_gk20a.c
Begin moving (and renaming) the GMMU code into common/mm/gmmu.c. This
block of code will be responsible for handling the platform/OS
independent GMMU operations.

JIRA NVGPU-12
JIRA NVGPU-30

Change-Id: Ide761bab75e5d84be3dcb977c4842ae4b3a7c1b3
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1464083
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2017-05-11 06:04:12 -07:00

110 lines
2.8 KiB
C

/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <nvgpu/log.h>
#include <nvgpu/gmmu.h>
#include <nvgpu/nvgpu_mem.h>
#include "gk20a/gk20a.h"
#include "gk20a/mm_gk20a.h"
/*
* Core GMMU map function for the kernel to use. If @addr is 0 then the GPU
* VA will be allocated for you. If addr is non-zero then the buffer will be
* mapped at @addr.
*/
static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
struct nvgpu_mem *mem,
u64 addr,
u64 size,
u32 flags,
int rw_flag,
bool priv,
enum nvgpu_aperture aperture)
{
struct gk20a *g = gk20a_from_vm(vm);
u64 vaddr;
struct sg_table *sgt = mem->priv.sgt;
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
vaddr = g->ops.mm.gmmu_map(vm, addr,
sgt, /* sg table */
0, /* sg offset */
size,
gmmu_page_size_kernel,
0, /* kind */
0, /* ctag_offset */
flags, rw_flag,
false, /* clear_ctags */
false, /* sparse */
priv, /* priv */
NULL, /* mapping_batch handle */
aperture);
nvgpu_mutex_release(&vm->update_gmmu_lock);
if (!vaddr) {
nvgpu_err(g, "failed to allocate va space");
return 0;
}
return vaddr;
}
u64 nvgpu_gmmu_map(struct vm_gk20a *vm,
struct nvgpu_mem *mem,
u64 size,
u32 flags,
int rw_flag,
bool priv,
enum nvgpu_aperture aperture)
{
return __nvgpu_gmmu_map(vm, mem, 0, size, flags, rw_flag, priv,
aperture);
}
/*
* Like nvgpu_gmmu_map() except it can work on a fixed address instead.
*/
u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm,
struct nvgpu_mem *mem,
u64 addr,
u64 size,
u32 flags,
int rw_flag,
bool priv,
enum nvgpu_aperture aperture)
{
return __nvgpu_gmmu_map(vm, mem, addr, size, flags, rw_flag, priv,
aperture);
}
void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
{
struct gk20a *g = gk20a_from_vm(vm);
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
g->ops.mm.gmmu_unmap(vm,
gpu_va,
mem->size,
gmmu_page_size_kernel,
true, /*va_allocated */
gk20a_mem_flag_none,
false,
NULL);
nvgpu_mutex_release(&vm->update_gmmu_lock);
}