mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: fix REMAP to support small/big pages
Initially, REMAP only worked with big pages but in some cases only small pages are supported where REMAP functionality is also needed. This cleans up some page size assumptions. In particular, on a remap request, the nvgpu_vm_area is found from the passed in VA, but can only be done from virt_offset_in_pages if we're also told the page size. This now occurs from _PAGESIZE_ flags which are required by both map and unmap operations. Jira NVGPU-6804 Change-Id: I311980a1b5e0e5e1840bdc1123479350a5c9d469 Signed-off-by: Chris Johnson <cwj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2566087 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
359e83b45a
commit
14ed75e857
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -245,8 +245,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u64 pages, u32 page_size,
|
|||||||
nvgpu_init_list_node(&vm_area->vm_area_list);
|
nvgpu_init_list_node(&vm_area->vm_area_list);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_REMAP
|
#ifdef CONFIG_NVGPU_REMAP
|
||||||
if (((flags & NVGPU_VM_AREA_ALLOC_SPARSE) != 0U) &&
|
if ((flags & NVGPU_VM_AREA_ALLOC_SPARSE) != 0U) {
|
||||||
(vm_area->pgsz_idx == GMMU_PAGE_SIZE_BIG)) {
|
|
||||||
err = nvgpu_vm_remap_vpool_create(vm, vm_area, pages);
|
err = nvgpu_vm_remap_vpool_create(vm, vm_area, pages);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto free_vaddr;
|
goto free_vaddr;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -28,23 +28,6 @@
|
|||||||
#include <nvgpu/string.h>
|
#include <nvgpu/string.h>
|
||||||
#include <nvgpu/power_features/pg.h>
|
#include <nvgpu/power_features/pg.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* Return page size index of page size for VM areas that can be used with
|
|
||||||
* remap.
|
|
||||||
*/
|
|
||||||
static inline u32 nvgpu_vm_remap_pgsz_idx(struct vm_gk20a *vm)
|
|
||||||
{
|
|
||||||
return GMMU_PAGE_SIZE_BIG;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return page size for VM areas that can be used with remap.
|
|
||||||
*/
|
|
||||||
static inline u64 nvgpu_vm_remap_page_size(struct vm_gk20a *vm)
|
|
||||||
{
|
|
||||||
return vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return a pointer to the os-specific structure for the specified physical
|
* Return a pointer to the os-specific structure for the specified physical
|
||||||
* memory pool.
|
* memory pool.
|
||||||
@@ -184,6 +167,11 @@ static int nvgpu_vm_remap_validate_vpool(struct nvgpu_vm_remap_vpool *vpool,
|
|||||||
{
|
{
|
||||||
u64 first_page = op->virt_offset_in_pages;
|
u64 first_page = op->virt_offset_in_pages;
|
||||||
u64 last_page = op->virt_offset_in_pages + op->num_pages - 1ULL;
|
u64 last_page = op->virt_offset_in_pages + op->num_pages - 1ULL;
|
||||||
|
u64 page_size = nvgpu_vm_remap_page_size(op);
|
||||||
|
|
||||||
|
if (page_size == 0) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (first_page < vpool->base_offset_in_pages ||
|
if (first_page < vpool->base_offset_in_pages ||
|
||||||
last_page >= vpool->base_offset_in_pages + vpool->num_pages ||
|
last_page >= vpool->base_offset_in_pages + vpool->num_pages ||
|
||||||
@@ -212,7 +200,7 @@ static int nvgpu_vm_remap_validate_map(struct vm_gk20a *vm,
|
|||||||
struct nvgpu_vm_remap_op *op,
|
struct nvgpu_vm_remap_op *op,
|
||||||
struct nvgpu_vm_remap_os_buffer *remap_os_buf)
|
struct nvgpu_vm_remap_os_buffer *remap_os_buf)
|
||||||
{
|
{
|
||||||
u64 page_size = nvgpu_vm_remap_page_size(vm);
|
u64 page_size = nvgpu_vm_remap_page_size(op);
|
||||||
u64 map_offset;
|
u64 map_offset;
|
||||||
u64 map_size;
|
u64 map_size;
|
||||||
u64 os_buf_size;
|
u64 os_buf_size;
|
||||||
@@ -278,11 +266,19 @@ static struct nvgpu_vm_remap_vpool *nvgpu_vm_remap_get_vpool_locked(
|
|||||||
struct vm_gk20a *vm, struct nvgpu_vm_remap_op *op)
|
struct vm_gk20a *vm, struct nvgpu_vm_remap_op *op)
|
||||||
{
|
{
|
||||||
struct gk20a *g = gk20a_from_vm(vm);
|
struct gk20a *g = gk20a_from_vm(vm);
|
||||||
u64 page_size = nvgpu_vm_remap_page_size(vm);
|
u64 page_size = nvgpu_vm_remap_page_size(op);
|
||||||
u64 offset = nvgpu_safe_mult_u64(op->virt_offset_in_pages, page_size);
|
u64 offset;
|
||||||
struct nvgpu_vm_area *vm_area = nvgpu_vm_area_find(vm, offset);
|
struct nvgpu_vm_area *vm_area;
|
||||||
|
|
||||||
if ((vm_area == NULL) || (vm_area->vpool == NULL)) {
|
if (page_size == 0) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = nvgpu_safe_mult_u64(op->virt_offset_in_pages, page_size);
|
||||||
|
vm_area = nvgpu_vm_area_find(vm, offset);
|
||||||
|
|
||||||
|
if ((vm_area == NULL) || (vm_area->vpool == NULL) ||
|
||||||
|
(vm->gmmu_page_sizes[vm_area->pgsz_idx] != page_size)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -358,7 +354,7 @@ static u64 nvgpu_vm_remap_get_ctag_offset(struct vm_gk20a *vm,
|
|||||||
struct gk20a_comptags comptags;
|
struct gk20a_comptags comptags;
|
||||||
u64 ctag = 0;
|
u64 ctag = 0;
|
||||||
u64 ctag_offset = 0;
|
u64 ctag_offset = 0;
|
||||||
u64 page_size = nvgpu_vm_remap_page_size(vm);
|
u64 page_size = nvgpu_vm_remap_page_size(op);
|
||||||
u64 phys_offset = nvgpu_safe_mult_u64(op->mem_offset_in_pages,
|
u64 phys_offset = nvgpu_safe_mult_u64(op->mem_offset_in_pages,
|
||||||
page_size);
|
page_size);
|
||||||
|
|
||||||
@@ -423,8 +419,8 @@ static int nvgpu_vm_remap_execute_remaps(struct vm_gk20a *vm,
|
|||||||
struct gk20a *g = gk20a_from_vm(vm);
|
struct gk20a *g = gk20a_from_vm(vm);
|
||||||
struct nvgpu_vm_remap_op *op;
|
struct nvgpu_vm_remap_op *op;
|
||||||
struct nvgpu_vm_remap_os_buffer *remap_os_buf;
|
struct nvgpu_vm_remap_os_buffer *remap_os_buf;
|
||||||
u32 pgsz_idx = nvgpu_vm_remap_pgsz_idx(vm);
|
u32 pgsz_idx = vpool->vm_area->pgsz_idx;
|
||||||
u64 page_size = nvgpu_vm_remap_page_size(vm);
|
u64 page_size = vm->gmmu_page_sizes[pgsz_idx];
|
||||||
u64 map_addr = 0;
|
u64 map_addr = 0;
|
||||||
u64 phys_offset = 0;
|
u64 phys_offset = 0;
|
||||||
u64 map_size;
|
u64 map_size;
|
||||||
@@ -733,7 +729,6 @@ int nvgpu_vm_remap_vpool_create(struct vm_gk20a *vm,
|
|||||||
u64 start_page_nr = 0;
|
u64 start_page_nr = 0;
|
||||||
|
|
||||||
if ((num_pages == 0ULL) ||
|
if ((num_pages == 0ULL) ||
|
||||||
(vm_area->pgsz_idx != GMMU_PAGE_SIZE_BIG) ||
|
|
||||||
((vm_area->flags & NVGPU_VM_AREA_ALLOC_SPARSE) == 0U)) {
|
((vm_area->flags & NVGPU_VM_AREA_ALLOC_SPARSE) == 0U)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -750,6 +745,7 @@ int nvgpu_vm_remap_vpool_create(struct vm_gk20a *vm,
|
|||||||
vp->num_pages = num_pages;
|
vp->num_pages = num_pages;
|
||||||
vp->vm = vm;
|
vp->vm = vm;
|
||||||
|
|
||||||
|
vp->vm_area = vm_area;
|
||||||
vm_area->vpool = vp;
|
vm_area->vpool = vp;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -37,14 +37,17 @@
|
|||||||
*/
|
*/
|
||||||
#define NVGPU_VM_REMAP_OP_FLAGS_CACHEABLE BIT32(1)
|
#define NVGPU_VM_REMAP_OP_FLAGS_CACHEABLE BIT32(1)
|
||||||
#define NVGPU_VM_REMAP_OP_FLAGS_ACCESS_NO_WRITE BIT32(7)
|
#define NVGPU_VM_REMAP_OP_FLAGS_ACCESS_NO_WRITE BIT32(7)
|
||||||
|
#define NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_4K BIT32(12)
|
||||||
|
#define NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_64K BIT32(13)
|
||||||
|
#define NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_128K BIT32(14)
|
||||||
/**
|
/**
|
||||||
* This structure describes a single remap operation (either a map or unmap).
|
* This structure describes a single remap operation (either a map or unmap).
|
||||||
*/
|
*/
|
||||||
struct nvgpu_vm_remap_op {
|
struct nvgpu_vm_remap_op {
|
||||||
/**
|
/**
|
||||||
* When a map operation is specified this field contains any flags
|
* When a map/unmap operation is specified this field contains flags
|
||||||
* to use when setting up the mapping. When an unmap operation is
|
* needed to determine the page size used to generate the map/unmap
|
||||||
* specified this field must be zero.
|
* mem and virt offsets and/or flags used when setting up the mapping.
|
||||||
*/
|
*/
|
||||||
u32 flags;
|
u32 flags;
|
||||||
|
|
||||||
@@ -148,11 +151,27 @@ nvgpu_vm_remap_mpool_from_ref(struct nvgpu_ref *ref)
|
|||||||
offsetof(struct nvgpu_vm_remap_mpool, ref));
|
offsetof(struct nvgpu_vm_remap_mpool, ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 nvgpu_vm_remap_page_size(struct nvgpu_vm_remap_op *op)
|
||||||
|
{
|
||||||
|
u64 pagesize = 0;
|
||||||
|
|
||||||
|
/* validate_map/unmap_op ensures a single pagesize flag */
|
||||||
|
if (op->flags & NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_4K)
|
||||||
|
pagesize = SZ_4K;
|
||||||
|
if (op->flags & NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_64K)
|
||||||
|
pagesize = SZ_64K;
|
||||||
|
if (op->flags & NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_128K)
|
||||||
|
pagesize = SZ_128K;
|
||||||
|
|
||||||
|
nvgpu_assert(pagesize);
|
||||||
|
return pagesize;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This structure describes a virtual memory pool.
|
* This structure describes a virtual memory pool.
|
||||||
* There is one virtual memory pool for each sparse VM area allocation that
|
* There is one virtual memory pool for each sparse VM area allocation.
|
||||||
* uses big pages. A virtual memory pool tracks the association between
|
* A virtual memory pool tracks the association between each mapped page
|
||||||
* each mapped big page in the pool and the corresponding physical memory.
|
* in the pool and the corresponding physical memory.
|
||||||
*/
|
*/
|
||||||
struct nvgpu_vm_remap_vpool {
|
struct nvgpu_vm_remap_vpool {
|
||||||
/**
|
/**
|
||||||
@@ -160,6 +179,11 @@ struct nvgpu_vm_remap_vpool {
|
|||||||
*/
|
*/
|
||||||
struct vm_gk20a *vm;
|
struct vm_gk20a *vm;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pointer to associated VM area.
|
||||||
|
*/
|
||||||
|
struct nvgpu_vm_area *vm_area;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tree of physical memory pools that are currently mapped to this
|
* Tree of physical memory pools that are currently mapped to this
|
||||||
* virtual pool.
|
* virtual pool.
|
||||||
@@ -243,7 +267,6 @@ int nvgpu_vm_remap(struct vm_gk20a *vm, struct nvgpu_vm_remap_op *ops,
|
|||||||
* @param num_pages [in] Number of pages in virtual memory pool.
|
* @param num_pages [in] Number of pages in virtual memory pool.
|
||||||
*
|
*
|
||||||
* - Check that #num_pages is non-zero.
|
* - Check that #num_pages is non-zero.
|
||||||
* - Check that VM area is using big pages.
|
|
||||||
* - Check that VM area is configured as sparse.
|
* - Check that VM area is configured as sparse.
|
||||||
* - Allocate memory for internal virtual pool management structures.
|
* - Allocate memory for internal virtual pool management structures.
|
||||||
* - Initialize virtual pool management structures including storing #vm
|
* - Initialize virtual pool management structures including storing #vm
|
||||||
@@ -253,7 +276,6 @@ int nvgpu_vm_remap(struct vm_gk20a *vm, struct nvgpu_vm_remap_op *ops,
|
|||||||
* @return Zero if the virtual pool create succeeds.
|
* @return Zero if the virtual pool create succeeds.
|
||||||
* Suitable errors, for failures.
|
* Suitable errors, for failures.
|
||||||
* @retval -EINVAL if a value of zero is specified for #num_pages.
|
* @retval -EINVAL if a value of zero is specified for #num_pages.
|
||||||
* @retval -EINVAL if the VM area is not using big pages.
|
|
||||||
* @retval -EINVAL if the VM area is not configured as sparse.
|
* @retval -EINVAL if the VM area is not configured as sparse.
|
||||||
* @retval -ENOMEM if memory allocation for internal resources fails.
|
* @retval -ENOMEM if memory allocation for internal resources fails.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -157,23 +157,50 @@ void nvgpu_vm_remap_os_buf_put(struct vm_gk20a *vm,
|
|||||||
static int nvgpu_vm_remap_validate_map_op(struct nvgpu_as_remap_op *op)
|
static int nvgpu_vm_remap_validate_map_op(struct nvgpu_as_remap_op *op)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u32 valid_flags = (NVGPU_AS_REMAP_OP_FLAGS_CACHEABLE |
|
const u32 pagesize_flags = (NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K |
|
||||||
|
NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K |
|
||||||
|
NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K);
|
||||||
|
const u32 valid_flags = (pagesize_flags |
|
||||||
|
NVGPU_AS_REMAP_OP_FLAGS_CACHEABLE |
|
||||||
NVGPU_AS_REMAP_OP_FLAGS_ACCESS_NO_WRITE);
|
NVGPU_AS_REMAP_OP_FLAGS_ACCESS_NO_WRITE);
|
||||||
|
const u32 pagesize = op->flags & pagesize_flags;
|
||||||
|
|
||||||
if ((op->flags & ~valid_flags) != 0) {
|
if ((op->flags & ~valid_flags) != 0) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* must be set and to a single pagesize */
|
||||||
|
if ((pagesize != NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K) &&
|
||||||
|
(pagesize != NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K) &&
|
||||||
|
(pagesize != NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K)) {
|
||||||
|
err = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvgpu_vm_remap_validate_unmap_op(struct nvgpu_as_remap_op *op)
|
static int nvgpu_vm_remap_validate_unmap_op(struct nvgpu_as_remap_op *op)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
const u32 pagesize_flags = (NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K |
|
||||||
|
NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K |
|
||||||
|
NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K);
|
||||||
|
const u32 valid_flags = pagesize_flags;
|
||||||
|
const u32 pagesize = op->flags & pagesize_flags;
|
||||||
|
|
||||||
|
if ((op->flags & ~valid_flags) != 0) {
|
||||||
|
err = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* must be set and to a single pagesize */
|
||||||
|
if ((pagesize != NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K) &&
|
||||||
|
(pagesize != NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K) &&
|
||||||
|
(pagesize != NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K)) {
|
||||||
|
err = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if ((op->compr_kind != NVGPU_KIND_INVALID) ||
|
if ((op->compr_kind != NVGPU_KIND_INVALID) ||
|
||||||
(op->incompr_kind != NVGPU_KIND_INVALID) ||
|
(op->incompr_kind != NVGPU_KIND_INVALID) ||
|
||||||
(op->flags != 0) ||
|
|
||||||
(op->mem_offset_in_pages != 0)) {
|
(op->mem_offset_in_pages != 0)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -191,7 +218,15 @@ static u32 nvgpu_vm_remap_translate_as_flags(u32 flags)
|
|||||||
if ((flags & NVGPU_AS_REMAP_OP_FLAGS_ACCESS_NO_WRITE) != 0) {
|
if ((flags & NVGPU_AS_REMAP_OP_FLAGS_ACCESS_NO_WRITE) != 0) {
|
||||||
core_flags |= NVGPU_VM_REMAP_OP_FLAGS_ACCESS_NO_WRITE;
|
core_flags |= NVGPU_VM_REMAP_OP_FLAGS_ACCESS_NO_WRITE;
|
||||||
}
|
}
|
||||||
|
if ((flags & NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K) != 0) {
|
||||||
|
core_flags |= NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_4K;
|
||||||
|
}
|
||||||
|
if ((flags & NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K) != 0) {
|
||||||
|
core_flags |= NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_64K;
|
||||||
|
}
|
||||||
|
if ((flags & NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K) != 0) {
|
||||||
|
core_flags |= NVGPU_VM_REMAP_OP_FLAGS_PAGESIZE_128K;
|
||||||
|
}
|
||||||
return core_flags;
|
return core_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -201,7 +236,6 @@ int nvgpu_vm_remap_translate_as_op(struct vm_gk20a *vm,
|
|||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u64 page_size;
|
u64 page_size;
|
||||||
u64 max_num_pages;
|
|
||||||
|
|
||||||
if (as_op->mem_handle == 0) {
|
if (as_op->mem_handle == 0) {
|
||||||
err = nvgpu_vm_remap_validate_unmap_op(as_op);
|
err = nvgpu_vm_remap_validate_unmap_op(as_op);
|
||||||
@@ -212,18 +246,17 @@ int nvgpu_vm_remap_translate_as_op(struct vm_gk20a *vm,
|
|||||||
if (err != 0)
|
if (err != 0)
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
|
|
||||||
page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
|
vm_op->flags = nvgpu_vm_remap_translate_as_flags(as_op->flags);
|
||||||
max_num_pages = (ULONG_MAX / page_size);
|
page_size = nvgpu_vm_remap_page_size(vm_op);
|
||||||
|
|
||||||
if ((as_op->num_pages == 0) ||
|
if ((as_op->num_pages == 0) || (page_size == 0) ||
|
||||||
(as_op->num_pages > max_num_pages) ||
|
(as_op->num_pages > (vm->va_limit / page_size)) ||
|
||||||
(as_op->mem_offset_in_pages > max_num_pages) ||
|
(as_op->mem_offset_in_pages > (vm->va_limit / page_size)) ||
|
||||||
(as_op->virt_offset_in_pages > max_num_pages)) {
|
(as_op->virt_offset_in_pages > (vm->va_limit / page_size))) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
}
|
}
|
||||||
|
|
||||||
vm_op->flags = nvgpu_vm_remap_translate_as_flags(as_op->flags);
|
|
||||||
vm_op->compr_kind = as_op->compr_kind;
|
vm_op->compr_kind = as_op->compr_kind;
|
||||||
vm_op->incompr_kind = as_op->incompr_kind;
|
vm_op->incompr_kind = as_op->incompr_kind;
|
||||||
vm_op->mem_handle = as_op->mem_handle;
|
vm_op->mem_handle = as_op->mem_handle;
|
||||||
|
|||||||
@@ -454,7 +454,14 @@ struct nvgpu_as_mapping_modify_args {
|
|||||||
* must be set if the physical memory buffer represented by @mem_handle
|
* must be set if the physical memory buffer represented by @mem_handle
|
||||||
* is mapped read-only.
|
* is mapped read-only.
|
||||||
*
|
*
|
||||||
* This field must be zero for unmap operations.
|
* %NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K
|
||||||
|
* %NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K
|
||||||
|
* %NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K
|
||||||
|
*
|
||||||
|
* One, and only one, of these flags must be set for both map/unmap
|
||||||
|
* ops and indicates the assumed page size of the mem_offset_in_pages
|
||||||
|
* and virt_offset_in_pages. This value is also verified against the
|
||||||
|
* page size of the address space.
|
||||||
*
|
*
|
||||||
* @compr_kind [IN/OUT]
|
* @compr_kind [IN/OUT]
|
||||||
* @incompr_kind [IN/OUT]
|
* @incompr_kind [IN/OUT]
|
||||||
@@ -479,16 +486,14 @@ struct nvgpu_as_mapping_modify_args {
|
|||||||
*
|
*
|
||||||
* @mem_offset_in_pages [IN]
|
* @mem_offset_in_pages [IN]
|
||||||
*
|
*
|
||||||
* Specify an offset into the physical buffer associated with mem_handle at
|
* Specify an offset (in pages) into the physical buffer associated with
|
||||||
* which to start the mapping. This value is in pages and the page size
|
* mem_handle at which to start the mapping. This value must be zero for
|
||||||
* is the big page size in the associated sparse address space. This value
|
* unmap operations.
|
||||||
* must be zero for unmap operations.
|
|
||||||
*
|
*
|
||||||
* @virt_offset_in_pages [IN]
|
* @virt_offset_in_pages [IN]
|
||||||
*
|
*
|
||||||
* Specify the virtual memory start offset of the region to map or unmap.
|
* Specify the virtual memory start offset (in pages) of the region to map
|
||||||
* This value is in pages and the page size is the big page size in the
|
* or unmap.
|
||||||
* associated sparse address space.
|
|
||||||
*
|
*
|
||||||
* @num_pages [IN]
|
* @num_pages [IN]
|
||||||
* Specify the number of pages to map or unmap.
|
* Specify the number of pages to map or unmap.
|
||||||
@@ -496,10 +501,13 @@ struct nvgpu_as_mapping_modify_args {
|
|||||||
struct nvgpu_as_remap_op {
|
struct nvgpu_as_remap_op {
|
||||||
#define NVGPU_AS_REMAP_OP_FLAGS_CACHEABLE (1 << 2)
|
#define NVGPU_AS_REMAP_OP_FLAGS_CACHEABLE (1 << 2)
|
||||||
#define NVGPU_AS_REMAP_OP_FLAGS_ACCESS_NO_WRITE (1 << 10)
|
#define NVGPU_AS_REMAP_OP_FLAGS_ACCESS_NO_WRITE (1 << 10)
|
||||||
|
#define NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_4K (1 << 15)
|
||||||
|
#define NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_64K (1 << 16)
|
||||||
|
#define NVGPU_AS_REMAP_OP_FLAGS_PAGESIZE_128K (1 << 17)
|
||||||
|
|
||||||
/* in: For map operations, this field specifies the mask of
|
/* in: For map and unmap (one and only one) of the _PAGESIZE_ flags is
|
||||||
* NVGPU_AS_REMAP flags to use for the mapping. For unmap operations
|
* required to interpret the mem_offset_in_pages and virt_offset_in_pages
|
||||||
* this field must be zero */
|
* correctly. The other flags are used only with map operations. */
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
/* in: For map operations, this field specifies the desired
|
/* in: For map operations, this field specifies the desired
|
||||||
@@ -550,7 +558,7 @@ struct nvgpu_as_remap_op {
|
|||||||
* that have been allocated with NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE.
|
* that have been allocated with NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE.
|
||||||
* Validation of remap operations is performed before any changes are made
|
* Validation of remap operations is performed before any changes are made
|
||||||
* to the associated sparse address space so either all map and/or unmap
|
* to the associated sparse address space so either all map and/or unmap
|
||||||
* operations are performed or none of them area.
|
* operations are performed or none of them are.
|
||||||
*/
|
*/
|
||||||
struct nvgpu_as_remap_args {
|
struct nvgpu_as_remap_args {
|
||||||
/* in: This field specifies a pointer into the caller's address space
|
/* in: This field specifies a pointer into the caller's address space
|
||||||
|
|||||||
Reference in New Issue
Block a user