tegra: nvmap: dmabuf fd for range from list of handles

It returns dmabuf fd from list of nvmap handles
for a range between offset and size.

The range should fall under total size of all of the handles.
Range may lie between sub set of handles.

Bug 3494980

Change-Id: I5c688f832a7a3bb0b6e3713ec6462224bb6fbfc5
Signed-off-by: Puneet Saxena <puneets@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2789431
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Puneet Saxena
2022-10-14 16:13:14 +05:30
committed by Laxman Dewangan
parent f1e2cd8dea
commit c4a88d6515
9 changed files with 335 additions and 3 deletions

View File

@@ -1045,6 +1045,9 @@ void _nvmap_handle_free(struct nvmap_handle *h)
for (i = page_index; i < nr_page; i++) {
if (h->from_va)
put_page(h->pgalloc.pages[i]);
/* Knowingly kept in "else if" handle for subrange */
else if (h->is_subhandle)
put_page(h->pgalloc.pages[i]);
else
__free_page(h->pgalloc.pages[i]);
}
@@ -1156,3 +1159,98 @@ void nvmap_free_handle_from_fd(struct nvmap_client *client,
trace_refcount_free_handle(handle, dmabuf, handle_ref, dmabuf_ref,
is_ro ? "RO" : "RW");
}
static int nvmap_assign_pages_per_handle(struct nvmap_handle *src_h,
struct nvmap_handle *dest_h, u64 src_h_start,
u64 src_h_end, u32 *pg_cnt)
{
/* Increament ref count of source handle as its pages
* are referenced here to create new nvmap handle.
* By increamenting the ref count of source handle,
* source handle pages are not freed until new handle's fd is not closed.
* Note: nvmap_dmabuf_release, need to decreement source handle ref count
*/
src_h = nvmap_handle_get(src_h);
if (!src_h)
return -EINVAL;
while (src_h_start < src_h_end) {
unsigned long next;
struct page *src_page;
struct page *dest_page;
dest_h->pgalloc.pages[*pg_cnt] =
src_h->pgalloc.pages[src_h_start >> PAGE_SHIFT];
src_page = nvmap_to_page(src_h->pgalloc.pages
[src_h_start >> PAGE_SHIFT]);
dest_page = nvmap_to_page(dest_h->pgalloc.pages[*pg_cnt]);
get_page(dest_page);
nvmap_clean_cache_page(src_page);
next = min(((src_h_start + PAGE_SIZE) & PAGE_MASK),
src_h_end);
src_h_start = next;
*pg_cnt = *pg_cnt + 1;
}
mutex_lock(&dest_h->pg_ref_h_lock);
list_add_tail(&src_h->pg_ref, &dest_h->pg_ref_h);
mutex_unlock(&dest_h->pg_ref_h_lock);
return 0;
}
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
struct nvmap_handle **hs, struct nvmap_handle *h,
struct handles_range *rng)
{
size_t nr_page = h->size >> PAGE_SHIFT;
struct page **pages;
u64 end_cur = 0;
u64 start = 0;
u64 end = 0;
u32 pg_cnt = 0;
u32 i;
int err = 0;
h = nvmap_handle_get(h);
if (!h)
return -EINVAL;
if (h->alloc) {
nvmap_handle_put(h);
return -EEXIST;
}
pages = nvmap_altalloc(nr_page * sizeof(*pages));
if (!pages) {
nvmap_handle_put(h);
return -ENOMEM;
}
h->pgalloc.pages = pages;
start = rng->offs_start;
end = rng->sz;
for (i = rng->start; i <= rng->end; i++) {
end_cur = (end >= hs[i]->size) ? (hs[i]->size - start) : end;
err = nvmap_assign_pages_per_handle(hs[i], h, start, start + end_cur, &pg_cnt);
if (err) {
nvmap_altfree(pages, nr_page * sizeof(*pages));
goto err_h;
}
end -= (hs[i]->size - start);
start = 0;
}
h->heap_type = NVMAP_HEAP_IOVMM;
h->heap_pgalloc = true;
h->alloc = true;
h->is_subhandle = true;
atomic_set(&h->pgalloc.ndirty, 0);
mb();
return err;
err_h:
nvmap_handle_put(h);
return err;
}

View File

@@ -483,7 +483,9 @@ static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NVMAP_IOC_QUERY_HEAP_PARAMS:
err = nvmap_ioctl_query_heap_params(filp, uarg);
break;
case NVMAP_IOC_GET_FD_FOR_RANGE_FROM_LIST:
err = nvmap_ioctl_get_fd_from_list(filp, uarg);
break;
default:
pr_warn("Unknown NVMAP_IOC = 0x%x\n", cmd);
}

View File

@@ -223,6 +223,24 @@ static void nvmap_dmabuf_release(struct dma_buf *dmabuf)
}
mutex_unlock(&info->handle->lock);
if (!list_empty(&info->handle->pg_ref_h)) {
struct nvmap_handle *tmp, *src;
mutex_lock(&info->handle->pg_ref_h_lock);
/* Closing dmabuf_fd,
* 1. Remove all the handles in page_ref_h list
* 2. Decreament handle ref count of all the handles in page_ref_h list
* 3. NULL page_ref_h list;
*/
list_for_each_entry_safe(src, tmp, &info->handle->pg_ref_h,
pg_ref) {
list_del(&src->pg_ref);
nvmap_handle_put(src);
}
mutex_unlock(&info->handle->pg_ref_h_lock);
}
nvmap_handle_put(info->handle);
kfree(info);
}

View File

@@ -229,9 +229,15 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (PageAnon(page) && (vma->vm_flags & VM_SHARED))
return VM_FAULT_SIGSEGV;
if (priv->handle->is_subhandle) {
pr_err("sub_handle page:%p\n", page);
kaddr = kmap(page);
BUG_ON(!kaddr);
inner_cache_maint(NVMAP_CACHE_OP_WB_INV, kaddr, PAGE_SIZE);
kunmap(page);
}
if (!nvmap_handle_track_dirty(priv->handle))
goto finish;
mutex_lock(&priv->handle->lock);
if (nvmap_page_dirty(priv->handle->pgalloc.pages[offs])) {
mutex_unlock(&priv->handle->lock);

View File

@@ -246,6 +246,7 @@ struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
INIT_LIST_HEAD(&h->lru);
INIT_LIST_HEAD(&h->dmabuf_priv);
INIT_LIST_HEAD(&h->pg_ref_h);
/*
* This takes out 1 ref on the dambuf. This corresponds to the
* handle_ref that gets automatically made by nvmap_create_handle().

View File

@@ -1432,3 +1432,174 @@ out:
is_ro ? "RO" : "RW");
return ret;
}
static int find_range_of_handles(struct nvmap_handle **hs, u32 nr,
struct handles_range *hrange)
{
u64 tot_sz = 0, rem_sz = 0;
u64 offs = hrange->offs;
u32 start = 0, end = 0;
u64 sz = hrange->sz;
u32 i;
hrange->offs_start = offs;
/* Find start handle */
for (i = 0; i < nr; i++) {
tot_sz += hs[i]->size;
if (offs > tot_sz) {
hrange->offs_start -= tot_sz;
continue;
} else {
rem_sz = tot_sz - offs;
start = i;
/* Check size in current handle */
if (rem_sz >= sz) {
end = i;
hrange->start = start;
hrange->end = end;
return 0;
}
/* Though start found but end lies in further handles */
i++;
break;
}
}
/* find end handle number */
for (; i < nr; i++) {
rem_sz += hs[i]->size;
if (rem_sz >= sz) {
end = i;
hrange->start = start;
hrange->end = end;
return 0;
}
}
return -1;
}
int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg)
{
struct nvmap_client *client = filp->private_data;
struct nvmap_fd_for_range_from_list op = {0};
struct nvmap_handle_ref *ref = NULL;
struct nvmap_handle **hs = NULL;
struct dma_buf *dmabuf = NULL;
struct nvmap_handle *h = NULL;
struct handles_range hrange = {0};
size_t tot_hs_size = 0;
u32 i, count = 0;
size_t bytes;
int err = 0;
int fd = -1;
u32 *hndls;
if (!client)
return -ENODEV;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
if (!op.handles || !op.num_handles
|| !op.size || op.num_handles > U32_MAX / sizeof(u32))
return -EINVAL;
hrange.offs = op.offset;
hrange.sz = op.size;
/* memory for nvmap_handle pointers */
bytes = op.num_handles * sizeof(*hs);
if (!ACCESS_OK(VERIFY_READ, (const void __user *)op.handles,
op.num_handles * sizeof(u32)))
return -EFAULT;
/* memory for handles passed by userspace */
bytes += op.num_handles * sizeof(u32);
hs = nvmap_altalloc(bytes);
if (!hs) {
pr_err("memory allocation failed\n");
return -ENOMEM;
}
hndls = (u32 *)(hs + op.num_handles);
if (!IS_ALIGNED((ulong)hndls, sizeof(u32))) {
pr_err("handle pointer is not properly aligned!!\n");
err = -EINVAL;
goto free_mem;
}
if (copy_from_user(hndls, (void __user *)op.handles,
op.num_handles * sizeof(u32))) {
pr_err("Can't copy from user pointer op.handles\n");
err = -EFAULT;
goto free_mem;
}
for (i = 0; i < op.num_handles; i++) {
hs[i] = nvmap_handle_get_from_id(client, hndls[i]);
tot_hs_size += hs[i]->size;
if (IS_ERR_OR_NULL(hs[i])) {
pr_err("invalid handle_ptr[%d] = %u\n",
i, hndls[i]);
while (i--)
nvmap_handle_put(hs[i]);
err = -EINVAL;
goto free_mem;
}
}
/* Add check for sizes of all the handles should be > offs and size */
if (tot_hs_size < (hrange.offs + hrange.sz)) {
err = -EINVAL;
goto free_hs;
}
/* Check all of the handles from system heap */
for (i = 0; i < op.num_handles; i++)
if (hs[i]->heap_pgalloc)
count++;
if (!count || (op.num_handles && count % op.num_handles)) {
pr_err("all or none of the handles should be from heap\n");
err = -EINVAL;
goto free_hs;
}
/* Find actual range of handles where the offset/size range is lying */
if (find_range_of_handles(hs, op.num_handles, &hrange)) {
err = -EINVAL;
goto free_hs;
}
if (hrange.start > op.num_handles || hrange.end > op.num_handles) {
err = -EINVAL;
goto free_hs;
}
/* Create new handle for the size */
ref = nvmap_create_handle(client, hrange.sz, false);
if (IS_ERR_OR_NULL(ref))
goto free_hs;
ref->handle->orig_size = hrange.sz;
h = ref->handle;
/* Assign pages from the handles to newly created nvmap handle */
err = nvmap_assign_pages_to_handle(client, hs, h, &hrange);
if (err)
goto free_hs;
dmabuf = h->dmabuf;
/* Create dmabuf fd out of dmabuf */
fd = nvmap_get_dmabuf_fd(client, h, false);
op.fd = fd;
err = nvmap_install_fd(client, h, fd,
arg, &op, sizeof(op), 1, dmabuf);
free_hs:
for (i = 0; i < op.num_handles; i++)
nvmap_handle_put(hs[i]);
if (h) {
nvmap_handle_put(h);
nvmap_free_handle(client, h, false);
}
free_mem:
nvmap_altfree(hs, bytes);
return err;
}

View File

@@ -76,4 +76,5 @@ int nvmap_ioctl_query_heap_params(struct file *filp, void __user *arg);
int nvmap_ioctl_dup_handle(struct file *filp, void __user *arg);
int nvmap_ioctl_get_fd_from_list(struct file *filp, void __user *arg);
#endif /* __VIDEO_TEGRA_NVMAP_IOCTL_H */

View File

@@ -263,6 +263,15 @@ struct nvmap_handle {
* read-only.
*/
bool is_ro;
/* list node in case this handle's pages are referenced */
struct list_head pg_ref;
/* list of all the handles whose
* pages are refernced in this handle
*/
struct list_head pg_ref_h;
struct mutex pg_ref_h_lock;
bool is_subhandle;
};
struct nvmap_handle_info {
@@ -401,6 +410,13 @@ struct nvmap_device {
bool co_cache_flush_at_alloc;
};
struct handles_range {
u32 start; /* start handle no where buffer range starts */
u32 end; /* end handle no where buffer range ends */
u64 offs_start; /* keep track of intermediate offset */
u64 offs; /* user passed offset */
u64 sz; /* user passed size */
};
extern struct nvmap_device *nvmap_dev;
extern ulong nvmap_init_time;
@@ -906,4 +922,8 @@ void nvmap_dma_mark_declared_memory_unoccupied(struct device *dev,
extern void __dma_flush_area(const void *cpu_va, size_t size);
extern void __dma_map_area(const void *cpu_va, size_t size, int dir);
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
struct nvmap_handle **hs, struct nvmap_handle *h,
struct handles_range *rng);
#endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */

View File

@@ -259,6 +259,17 @@ struct nvmap_duplicate_handle {
__u32 dup_handle;
};
/**
* Struct used while duplicating memory handle
*/
struct nvmap_fd_for_range_from_list {
__u32 *handles; /* Head of handles list */
__u32 num_handles; /* Number of handles in the list */
__u64 offset; /* Offset aligned by page size in the buffers */
__u64 size; /* Size of the sub buffer for which fd to be returned */
__s32 fd; /* Sub range Dma Buf fd to be returned*/
};
#define NVMAP_IOC_MAGIC 'N'
/* Creates a new memory handle. On input, the argument is the size of the new
@@ -361,6 +372,10 @@ struct nvmap_duplicate_handle {
#define NVMAP_IOC_DUP_HANDLE _IOWR(NVMAP_IOC_MAGIC, 106, \
struct nvmap_duplicate_handle)
#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_DUP_HANDLE))
/* Get for range from list of NvRmMemHandles */
#define NVMAP_IOC_GET_FD_FOR_RANGE_FROM_LIST _IOR(NVMAP_IOC_MAGIC, 107, \
struct nvmap_fd_for_range_from_list)
#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_FD_FOR_RANGE_FROM_LIST))
#endif /* __UAPI_LINUX_NVMAP_H */