mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: add linux MAPPING_MODIFY ioctl
Add new MAPPING_MODIFY ioctl to the linux nvgpu driver. This ioctl is used (for example) by the NvRmGpuMappingModify API to change the kind of an existing mapping. For compressed mappings the ioctl can be used to do the following: * switch between two different compressed kinds * switch between compressed and incompressed kinds For incompressed mappings the ioctl can be used to do the following: * switch between two different incompressed kinds In order to properly update an existing mapping the nvgpu_mapped_buf structure has been extended to cache the following state when the mapping is first created: * the compression tag offset (if applicable) * the GMMU read/write flags * the memory aperture The unused ctag_lines field in the nvgpu_ctag_buffer_info structure has been replaced with a new ctag_offset field. Jira NVGPU-6374 Change-Id: I647ab9c2c272e3f9b52f1ccefc5e0de4577c14f1 Signed-off-by: scottl <scottl@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2468100 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
47f2e68013
commit
456a814db5
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -51,7 +51,7 @@ struct nvgpu_ctag_buffer_info {
|
|||||||
#endif
|
#endif
|
||||||
s16 incompr_kind;
|
s16 incompr_kind;
|
||||||
|
|
||||||
u32 ctag_lines;
|
u32 ctag_offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_COMPRESSION
|
#ifdef CONFIG_NVGPU_COMPRESSION
|
||||||
@@ -1292,6 +1292,7 @@ static int nvgpu_vm_do_map(struct vm_gk20a *vm,
|
|||||||
nvgpu_assert((binfo_ptr->compr_kind >= 0) &&
|
nvgpu_assert((binfo_ptr->compr_kind >= 0) &&
|
||||||
(binfo_ptr->compr_kind <= (s16)U8_MAX));
|
(binfo_ptr->compr_kind <= (s16)U8_MAX));
|
||||||
pte_kind = (u8)binfo_ptr->compr_kind;
|
pte_kind = (u8)binfo_ptr->compr_kind;
|
||||||
|
binfo_ptr->ctag_offset = ctag_offset;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
if (binfo_ptr->incompr_kind != NVGPU_KIND_INVALID) {
|
if (binfo_ptr->incompr_kind != NVGPU_KIND_INVALID) {
|
||||||
@@ -1540,6 +1541,9 @@ int nvgpu_vm_map(struct vm_gk20a *vm,
|
|||||||
mapped_buffer->kind = map_key_kind;
|
mapped_buffer->kind = map_key_kind;
|
||||||
mapped_buffer->va_allocated = va_allocated;
|
mapped_buffer->va_allocated = va_allocated;
|
||||||
mapped_buffer->vm_area = vm_area;
|
mapped_buffer->vm_area = vm_area;
|
||||||
|
mapped_buffer->ctag_offset = binfo.ctag_offset;
|
||||||
|
mapped_buffer->rw_flag = rw;
|
||||||
|
mapped_buffer->aperture = aperture;
|
||||||
|
|
||||||
nvgpu_insert_mapped_buf(vm, mapped_buffer);
|
nvgpu_insert_mapped_buf(vm, mapped_buffer);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -86,6 +86,11 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
|
|||||||
struct dma_buf **dmabuf,
|
struct dma_buf **dmabuf,
|
||||||
u64 *offset);
|
u64 *offset);
|
||||||
|
|
||||||
|
/* modify existing mapping attributes (i.e. kind) */
|
||||||
|
int nvgpu_vm_mapping_modify(struct vm_gk20a *vm,
|
||||||
|
s16 compr_kind, s16 incompr_kind,
|
||||||
|
u64 map_address, u64 buffer_offset, u64 buffer_size);
|
||||||
|
|
||||||
enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
|
enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
|
||||||
struct dma_buf *dmabuf);
|
struct dma_buf *dmabuf);
|
||||||
|
|
||||||
|
|||||||
@@ -182,6 +182,19 @@ struct nvgpu_mapped_buf {
|
|||||||
* by user space or not.
|
* by user space or not.
|
||||||
*/
|
*/
|
||||||
bool va_allocated;
|
bool va_allocated;
|
||||||
|
/**
|
||||||
|
* Offset into compression tags pool if compression enabled.
|
||||||
|
*/
|
||||||
|
u32 ctag_offset;
|
||||||
|
/**
|
||||||
|
* GMMU read/write flags specified when mapping was created.
|
||||||
|
*/
|
||||||
|
enum gk20a_mem_rw_flag rw_flag;
|
||||||
|
/**
|
||||||
|
* Aperture specified when mapping was created
|
||||||
|
*/
|
||||||
|
enum nvgpu_aperture aperture;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Os specific buffer structure.
|
* Os specific buffer structure.
|
||||||
* Separate from the nvgpu_os_buffer struct to clearly distinguish
|
* Separate from the nvgpu_os_buffer struct to clearly distinguish
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GK20A Address Spaces
|
* GK20A Address Spaces
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -302,6 +302,22 @@ static int nvgpu_as_ioctl_get_sync_ro_map(
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvgpu_as_ioctl_mapping_modify(
|
||||||
|
struct gk20a_as_share *as_share,
|
||||||
|
struct nvgpu_as_mapping_modify_args *args)
|
||||||
|
{
|
||||||
|
struct gk20a *g = gk20a_from_vm(as_share->vm);
|
||||||
|
|
||||||
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
return nvgpu_vm_mapping_modify(as_share->vm,
|
||||||
|
args->compr_kind,
|
||||||
|
args->incompr_kind,
|
||||||
|
args->map_address,
|
||||||
|
args->buffer_offset,
|
||||||
|
args->buffer_size);
|
||||||
|
}
|
||||||
|
|
||||||
int gk20a_as_dev_open(struct inode *inode, struct file *filp)
|
int gk20a_as_dev_open(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
struct gk20a_as_share *as_share;
|
struct gk20a_as_share *as_share;
|
||||||
@@ -439,6 +455,10 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||||||
err = nvgpu_as_ioctl_get_sync_ro_map(as_share,
|
err = nvgpu_as_ioctl_get_sync_ro_map(as_share,
|
||||||
(struct nvgpu_as_get_sync_ro_map_args *)buf);
|
(struct nvgpu_as_get_sync_ro_map_args *)buf);
|
||||||
break;
|
break;
|
||||||
|
case NVGPU_AS_IOCTL_MAPPING_MODIFY:
|
||||||
|
err = nvgpu_as_ioctl_mapping_modify(as_share,
|
||||||
|
(struct nvgpu_as_mapping_modify_args *)buf);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
err = -ENOTTY;
|
err = -ENOTTY;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -350,6 +350,113 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nvgpu_vm_mapping_modify(struct vm_gk20a *vm,
|
||||||
|
s16 compr_kind,
|
||||||
|
s16 incompr_kind,
|
||||||
|
u64 map_address,
|
||||||
|
u64 buffer_offset,
|
||||||
|
u64 buffer_size)
|
||||||
|
{
|
||||||
|
struct gk20a *g = gk20a_from_vm(vm);
|
||||||
|
int ret = -EINVAL;
|
||||||
|
struct nvgpu_mapped_buf *mapped_buffer;
|
||||||
|
struct nvgpu_sgt *nvgpu_sgt = NULL;
|
||||||
|
u32 pgsz_idx;
|
||||||
|
u32 page_size;
|
||||||
|
u64 ctag_offset;
|
||||||
|
s16 kind = NV_KIND_INVALID;
|
||||||
|
|
||||||
|
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||||
|
|
||||||
|
mapped_buffer = nvgpu_vm_find_mapped_buf(vm, map_address);
|
||||||
|
if (mapped_buffer == NULL) {
|
||||||
|
nvgpu_err(g, "no buffer at map_address 0x%llx", map_address);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
nvgpu_assert(mapped_buffer->addr == map_address);
|
||||||
|
|
||||||
|
pgsz_idx = mapped_buffer->pgsz_idx;
|
||||||
|
page_size = vm->gmmu_page_sizes[pgsz_idx];
|
||||||
|
|
||||||
|
if (buffer_offset & (page_size - 1)) {
|
||||||
|
nvgpu_err(g, "buffer_offset 0x%llx not page aligned",
|
||||||
|
buffer_offset);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buffer_size & (page_size - 1)) {
|
||||||
|
nvgpu_err(g, "buffer_size 0x%llx not page aligned",
|
||||||
|
buffer_size);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buffer_offset >= mapped_buffer->size) {
|
||||||
|
nvgpu_err(g, "buffer_offset 0x%llx exceeds buffer size 0x%llx",
|
||||||
|
buffer_size, mapped_buffer->size);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buffer_offset + buffer_size > mapped_buffer->size) {
|
||||||
|
nvgpu_err(g, "buffer end 0x%llx exceeds buffer size 0x%llx",
|
||||||
|
buffer_offset + buffer_size, mapped_buffer->size);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (compr_kind == NV_KIND_INVALID && incompr_kind == NV_KIND_INVALID) {
|
||||||
|
nvgpu_err(g, "both compr_kind and incompr_kind are invalid\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mapped_buffer->ctag_offset != 0) {
|
||||||
|
if (compr_kind == NV_KIND_INVALID) {
|
||||||
|
kind = incompr_kind;
|
||||||
|
} else {
|
||||||
|
kind = compr_kind;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (incompr_kind == NV_KIND_INVALID) {
|
||||||
|
nvgpu_err(g, "invalid incompr_kind specified");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
kind = incompr_kind;
|
||||||
|
}
|
||||||
|
|
||||||
|
nvgpu_sgt = nvgpu_linux_sgt_create(g, mapped_buffer->os_priv.sgt);
|
||||||
|
if (!nvgpu_sgt) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctag_offset = mapped_buffer->ctag_offset;
|
||||||
|
ctag_offset += (u32)(buffer_offset >>
|
||||||
|
ilog2(g->ops.fb.compression_page_size(g)));
|
||||||
|
|
||||||
|
if (g->ops.mm.gmmu.map(vm,
|
||||||
|
map_address + buffer_offset,
|
||||||
|
nvgpu_sgt,
|
||||||
|
buffer_offset,
|
||||||
|
buffer_size,
|
||||||
|
pgsz_idx,
|
||||||
|
kind,
|
||||||
|
ctag_offset,
|
||||||
|
mapped_buffer->flags,
|
||||||
|
mapped_buffer->rw_flag,
|
||||||
|
false /* not clear_ctags */,
|
||||||
|
false /* not sparse */,
|
||||||
|
false /* not priv */,
|
||||||
|
NULL /* no mapping_batch handle */,
|
||||||
|
mapped_buffer->aperture) != 0ULL) {
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
nvgpu_sgt_free(g, nvgpu_sgt);
|
||||||
|
|
||||||
|
out:
|
||||||
|
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the function call-back for freeing OS specific components of an
|
* This is the function call-back for freeing OS specific components of an
|
||||||
* nvgpu_mapped_buf. This should most likely never be called outside of the
|
* nvgpu_mapped_buf. This should most likely never be called outside of the
|
||||||
|
|||||||
@@ -1208,9 +1208,9 @@ struct nvgpu_as_bind_channel_args {
|
|||||||
*
|
*
|
||||||
* @buffer_offset [IN]
|
* @buffer_offset [IN]
|
||||||
*
|
*
|
||||||
* Specify an offset into the physical buffer to being the mapping at. For
|
* Specify an offset into the physical buffer to begin the mapping at. For
|
||||||
* example imagine a DMA buffer 32KB long. However, you wish to only this
|
* example imagine a DMA buffer 32KB long. However, you wish to only map
|
||||||
* buffer starting at 8KB. In such a case you would pass 8KB as the
|
* this buffer starting at 8KB. In such a case you would pass 8KB as the
|
||||||
* @buffer_offset. This is only available with fixed address mappings. All
|
* @buffer_offset. This is only available with fixed address mappings. All
|
||||||
* regular (non-fixed) mappings require this field to be set to 0. This field
|
* regular (non-fixed) mappings require this field to be set to 0. This field
|
||||||
* is in bytes.
|
* is in bytes.
|
||||||
@@ -1397,6 +1397,53 @@ struct nvgpu_as_get_sync_ro_map_args {
|
|||||||
__u32 padding;
|
__u32 padding;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VM mapping modify IOCTL
|
||||||
|
*
|
||||||
|
* This ioctl changes the kind of an existing mapped buffer region.
|
||||||
|
*
|
||||||
|
* Usage of this API is as follows.
|
||||||
|
*
|
||||||
|
* @compr_kind [IN]
|
||||||
|
*
|
||||||
|
* Specify the new compressed kind to be used for the mapping. This
|
||||||
|
* parameter is only valid if compression resources are allocated to the
|
||||||
|
* underlying physical buffer. If NV_KIND_INVALID is specified then the
|
||||||
|
* fallback incompr_kind parameter is used.
|
||||||
|
*
|
||||||
|
* @incompr_kind [IN]
|
||||||
|
*
|
||||||
|
* Specify the new kind to be used for the mapping if compression is not
|
||||||
|
* to be used. If NV_KIND_INVALID is specified then incompressible fallback
|
||||||
|
* is not allowed.
|
||||||
|
*
|
||||||
|
* @buffer_offset [IN]
|
||||||
|
*
|
||||||
|
* Specifies the beginning offset of the region within the existing buffer
|
||||||
|
* for which the kind should be modified. This field is in bytes.
|
||||||
|
*
|
||||||
|
* @buffer_size [IN]
|
||||||
|
*
|
||||||
|
* Specifies the size of the region within the existing buffer for which the
|
||||||
|
* kind should be updated. This field is in bytes. Note that the region
|
||||||
|
* described by <buffer_offset, buffer_offset + buffer_size> must reside
|
||||||
|
* entirely within the existing buffer.
|
||||||
|
*
|
||||||
|
* @map_address [IN]
|
||||||
|
*
|
||||||
|
* The address of the existing buffer in the GPU virtual address space
|
||||||
|
* specified in bytes.
|
||||||
|
*/
|
||||||
|
struct nvgpu_as_mapping_modify_args {
|
||||||
|
__s16 compr_kind; /* in */
|
||||||
|
__s16 incompr_kind; /* in */
|
||||||
|
|
||||||
|
__u64 buffer_offset; /* in, offset of mapped buffer region */
|
||||||
|
__u64 buffer_size; /* in, size of mapped buffer region */
|
||||||
|
|
||||||
|
__u64 map_address; /* in, base virtual address of mapped buffer */
|
||||||
|
};
|
||||||
|
|
||||||
#define NVGPU_AS_IOCTL_BIND_CHANNEL \
|
#define NVGPU_AS_IOCTL_BIND_CHANNEL \
|
||||||
_IOWR(NVGPU_AS_IOCTL_MAGIC, 1, struct nvgpu_as_bind_channel_args)
|
_IOWR(NVGPU_AS_IOCTL_MAGIC, 1, struct nvgpu_as_bind_channel_args)
|
||||||
#define NVGPU32_AS_IOCTL_ALLOC_SPACE \
|
#define NVGPU32_AS_IOCTL_ALLOC_SPACE \
|
||||||
@@ -1419,9 +1466,11 @@ struct nvgpu_as_get_sync_ro_map_args {
|
|||||||
_IOWR(NVGPU_AS_IOCTL_MAGIC, 11, struct nvgpu_as_map_buffer_batch_args)
|
_IOWR(NVGPU_AS_IOCTL_MAGIC, 11, struct nvgpu_as_map_buffer_batch_args)
|
||||||
#define NVGPU_AS_IOCTL_GET_SYNC_RO_MAP \
|
#define NVGPU_AS_IOCTL_GET_SYNC_RO_MAP \
|
||||||
_IOR(NVGPU_AS_IOCTL_MAGIC, 12, struct nvgpu_as_get_sync_ro_map_args)
|
_IOR(NVGPU_AS_IOCTL_MAGIC, 12, struct nvgpu_as_get_sync_ro_map_args)
|
||||||
|
#define NVGPU_AS_IOCTL_MAPPING_MODIFY \
|
||||||
|
_IOWR(NVGPU_AS_IOCTL_MAGIC, 13, struct nvgpu_as_mapping_modify_args)
|
||||||
|
|
||||||
#define NVGPU_AS_IOCTL_LAST \
|
#define NVGPU_AS_IOCTL_LAST \
|
||||||
_IOC_NR(NVGPU_AS_IOCTL_GET_SYNC_RO_MAP)
|
_IOC_NR(NVGPU_AS_IOCTL_MAPPING_MODIFY)
|
||||||
#define NVGPU_AS_IOCTL_MAX_ARG_SIZE \
|
#define NVGPU_AS_IOCTL_MAX_ARG_SIZE \
|
||||||
sizeof(struct nvgpu_as_map_buffer_ex_args)
|
sizeof(struct nvgpu_as_map_buffer_ex_args)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user