mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
video: tegra: nvmap: Refactor nvmap_handle unit
- Files for nvmap_handle unit: nvmap_handle.c, nvmap_sci_ipc.c, nvmap_id_array.c. - Define external header for nvmap_handle unit as nvmap_handle.h and move declarations of all external APIs of nvmap_handle unit to this header. - Define internal header for nvmap_handle unit as nvmap_handle_int.h and move declarations of all internally called APIs to this header. JIRA TMM-5651 Change-Id: Ie4922c0839070491f9893f23744eb700cabb9828 Signed-off-by: Ashish Mhetre <amhetre@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3211591 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Jon Hunter
parent
ebabca3f65
commit
d2d52d6786
@@ -19,6 +19,7 @@
|
|||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_alloc_int.h"
|
#include "nvmap_alloc_int.h"
|
||||||
#include "nvmap_dmabuf.h"
|
#include "nvmap_dmabuf.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
bool nvmap_convert_carveout_to_iovmm;
|
bool nvmap_convert_carveout_to_iovmm;
|
||||||
bool nvmap_convert_iovmm_to_carveout;
|
bool nvmap_convert_iovmm_to_carveout;
|
||||||
@@ -574,215 +575,3 @@ out:
|
|||||||
kfree(h);
|
kfree(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvmap_free_handle(struct nvmap_client *client,
|
|
||||||
struct nvmap_handle *handle, bool is_ro)
|
|
||||||
{
|
|
||||||
struct nvmap_handle_ref *ref;
|
|
||||||
struct nvmap_handle *h;
|
|
||||||
|
|
||||||
nvmap_ref_lock(client);
|
|
||||||
|
|
||||||
ref = __nvmap_validate_locked(client, handle, is_ro);
|
|
||||||
if (!ref) {
|
|
||||||
nvmap_ref_unlock(client);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
BUG_ON(!ref->handle);
|
|
||||||
h = ref->handle;
|
|
||||||
|
|
||||||
if (atomic_dec_return(&ref->dupes)) {
|
|
||||||
NVMAP_TAG_TRACE(trace_nvmap_free_handle,
|
|
||||||
NVMAP_TP_ARGS_CHR(client, h, ref));
|
|
||||||
nvmap_ref_unlock(client);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
smp_rmb();
|
|
||||||
rb_erase(&ref->node, &client->handle_refs);
|
|
||||||
client->handle_count--;
|
|
||||||
atomic_dec(&ref->handle->share_count);
|
|
||||||
|
|
||||||
nvmap_ref_unlock(client);
|
|
||||||
|
|
||||||
if (h->owner == client)
|
|
||||||
h->owner = NULL;
|
|
||||||
|
|
||||||
if (is_ro)
|
|
||||||
dma_buf_put(ref->handle->dmabuf_ro);
|
|
||||||
else
|
|
||||||
dma_buf_put(ref->handle->dmabuf);
|
|
||||||
NVMAP_TAG_TRACE(trace_nvmap_free_handle,
|
|
||||||
NVMAP_TP_ARGS_CHR(client, h, ref));
|
|
||||||
kfree(ref);
|
|
||||||
|
|
||||||
out:
|
|
||||||
BUG_ON(!atomic_read(&h->ref));
|
|
||||||
nvmap_handle_put(h);
|
|
||||||
}
|
|
||||||
|
|
||||||
int is_nvmap_id_ro(struct nvmap_client *client, int id, bool *is_ro)
|
|
||||||
{
|
|
||||||
struct nvmap_handle_info *info = NULL;
|
|
||||||
struct dma_buf *dmabuf = NULL;
|
|
||||||
|
|
||||||
if (WARN_ON(!client))
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
if (client->ida)
|
|
||||||
dmabuf = nvmap_id_array_get_dmabuf_from_id(client->ida,
|
|
||||||
id);
|
|
||||||
else
|
|
||||||
dmabuf = dma_buf_get(id);
|
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(dmabuf))
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
if (dmabuf_is_nvmap(dmabuf))
|
|
||||||
info = dmabuf->priv;
|
|
||||||
|
|
||||||
if (!info) {
|
|
||||||
dma_buf_put(dmabuf);
|
|
||||||
/*
|
|
||||||
* Ideally, we should return error from here,
|
|
||||||
* but this is done intentionally to handle foreign buffers.
|
|
||||||
*/
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
*is_ro = info->is_ro;
|
|
||||||
dma_buf_put(dmabuf);
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
fail:
|
|
||||||
pr_err("Handle RO check failed\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvmap_free_handle_from_fd(struct nvmap_client *client,
|
|
||||||
int id)
|
|
||||||
{
|
|
||||||
bool is_ro = false;
|
|
||||||
struct nvmap_handle *handle;
|
|
||||||
struct dma_buf *dmabuf = NULL;
|
|
||||||
int handle_ref = 0;
|
|
||||||
long dmabuf_ref = 0;
|
|
||||||
|
|
||||||
handle = nvmap_handle_get_from_id(client, id);
|
|
||||||
if (IS_ERR_OR_NULL(handle))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (is_nvmap_id_ro(client, id, &is_ro) != 0) {
|
|
||||||
nvmap_handle_put(handle);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (client->ida)
|
|
||||||
nvmap_id_array_id_release(client->ida, id);
|
|
||||||
|
|
||||||
nvmap_free_handle(client, handle, is_ro);
|
|
||||||
mutex_lock(&handle->lock);
|
|
||||||
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
|
|
||||||
if (dmabuf && dmabuf->file) {
|
|
||||||
dmabuf_ref = atomic_long_read(&dmabuf->file->f_count);
|
|
||||||
} else {
|
|
||||||
dmabuf_ref = 0;
|
|
||||||
}
|
|
||||||
mutex_unlock(&handle->lock);
|
|
||||||
handle_ref = atomic_read(&handle->ref);
|
|
||||||
|
|
||||||
trace_refcount_free_handle(handle, dmabuf, handle_ref, dmabuf_ref,
|
|
||||||
is_ro ? "RO" : "RW");
|
|
||||||
nvmap_handle_put(handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nvmap_assign_pages_per_handle(struct nvmap_handle *src_h,
|
|
||||||
struct nvmap_handle *dest_h, u64 src_h_start,
|
|
||||||
u64 src_h_end, u32 *pg_cnt)
|
|
||||||
{
|
|
||||||
/* Increament ref count of source handle as its pages
|
|
||||||
* are referenced here to create new nvmap handle.
|
|
||||||
* By increamenting the ref count of source handle,
|
|
||||||
* source handle pages are not freed until new handle's fd is not closed.
|
|
||||||
* Note: nvmap_dmabuf_release, need to decreement source handle ref count
|
|
||||||
*/
|
|
||||||
src_h = nvmap_handle_get(src_h);
|
|
||||||
if (!src_h)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
while (src_h_start < src_h_end) {
|
|
||||||
unsigned long next;
|
|
||||||
struct page *dest_page;
|
|
||||||
|
|
||||||
dest_h->pgalloc.pages[*pg_cnt] =
|
|
||||||
src_h->pgalloc.pages[src_h_start >> PAGE_SHIFT];
|
|
||||||
dest_page = nvmap_to_page(dest_h->pgalloc.pages[*pg_cnt]);
|
|
||||||
get_page(dest_page);
|
|
||||||
|
|
||||||
next = min(((src_h_start + PAGE_SIZE) & PAGE_MASK),
|
|
||||||
src_h_end);
|
|
||||||
src_h_start = next;
|
|
||||||
*pg_cnt = *pg_cnt + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dest_h->pg_ref_h_lock);
|
|
||||||
list_add_tail(&src_h->pg_ref, &dest_h->pg_ref_h);
|
|
||||||
mutex_unlock(&dest_h->pg_ref_h_lock);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
|
|
||||||
struct nvmap_handle **hs, struct nvmap_handle *h,
|
|
||||||
struct handles_range *rng)
|
|
||||||
{
|
|
||||||
size_t nr_page = h->size >> PAGE_SHIFT;
|
|
||||||
struct page **pages;
|
|
||||||
u64 end_cur = 0;
|
|
||||||
u64 start = 0;
|
|
||||||
u64 end = 0;
|
|
||||||
u32 pg_cnt = 0;
|
|
||||||
u32 i;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
h = nvmap_handle_get(h);
|
|
||||||
if (!h)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (h->alloc) {
|
|
||||||
nvmap_handle_put(h);
|
|
||||||
return -EEXIST;
|
|
||||||
}
|
|
||||||
|
|
||||||
pages = nvmap_altalloc(nr_page * sizeof(*pages));
|
|
||||||
if (!pages) {
|
|
||||||
nvmap_handle_put(h);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
h->pgalloc.pages = pages;
|
|
||||||
|
|
||||||
start = rng->offs_start;
|
|
||||||
end = rng->sz;
|
|
||||||
|
|
||||||
for (i = rng->start; i <= rng->end; i++) {
|
|
||||||
end_cur = (end >= hs[i]->size) ? (hs[i]->size - start) : end;
|
|
||||||
err = nvmap_assign_pages_per_handle(hs[i], h, start, start + end_cur, &pg_cnt);
|
|
||||||
if (err) {
|
|
||||||
nvmap_altfree(pages, nr_page * sizeof(*pages));
|
|
||||||
goto err_h;
|
|
||||||
}
|
|
||||||
end -= (hs[i]->size - start);
|
|
||||||
start = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
h->flags = hs[0]->flags;
|
|
||||||
h->heap_type = NVMAP_HEAP_IOVMM;
|
|
||||||
h->heap_pgalloc = true;
|
|
||||||
h->alloc = true;
|
|
||||||
h->is_subhandle = true;
|
|
||||||
mb();
|
|
||||||
return err;
|
|
||||||
err_h:
|
|
||||||
nvmap_handle_put(h);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ __weak struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
|||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_alloc_int.h"
|
#include "nvmap_alloc_int.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
extern void __clean_dcache_area_poc(void *addr, size_t len);
|
extern void __clean_dcache_area_poc(void *addr, size_t len);
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_alloc_int.h"
|
#include "nvmap_alloc_int.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
bool vpr_cpu_access;
|
bool vpr_cpu_access;
|
||||||
|
|
||||||
|
|||||||
@@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
static phys_addr_t handle_phys(struct nvmap_handle *h)
|
static phys_addr_t handle_phys(struct nvmap_handle *h)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -46,6 +46,7 @@
|
|||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_ioctl.h"
|
#include "nvmap_ioctl.h"
|
||||||
#include "nvmap_dmabuf.h"
|
#include "nvmap_dmabuf.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
#include <linux/pagewalk.h>
|
#include <linux/pagewalk.h>
|
||||||
|
|
||||||
#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
|
#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
|
||||||
|
|||||||
@@ -34,6 +34,7 @@
|
|||||||
#include "nvmap_ioctl.h"
|
#include "nvmap_ioctl.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_dmabuf.h"
|
#include "nvmap_dmabuf.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
#define NVMAP_DMABUF_ATTACH nvmap_dmabuf_attach
|
#define NVMAP_DMABUF_ATTACH nvmap_dmabuf_attach
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
static void nvmap_vma_close(struct vm_area_struct *vma);
|
static void nvmap_vma_close(struct vm_area_struct *vma);
|
||||||
|
|
||||||
|
|||||||
@@ -29,15 +29,34 @@
|
|||||||
#include "nvmap_ioctl.h"
|
#include "nvmap_ioctl.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_dmabuf.h"
|
#include "nvmap_dmabuf.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
#include "nvmap_handle_int.h"
|
||||||
|
|
||||||
u32 nvmap_max_handle_count;
|
u32 nvmap_max_handle_count;
|
||||||
|
|
||||||
|
static inline void nvmap_lru_add(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
spin_lock(&nvmap_dev->lru_lock);
|
||||||
|
BUG_ON(!list_empty(&h->lru));
|
||||||
|
list_add_tail(&h->lru, &nvmap_dev->lru_handles);
|
||||||
|
spin_unlock(&nvmap_dev->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_lru_del(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
spin_lock(&nvmap_dev->lru_lock);
|
||||||
|
list_del(&h->lru);
|
||||||
|
INIT_LIST_HEAD(&h->lru);
|
||||||
|
spin_unlock(&nvmap_dev->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Verifies that the passed ID is a valid handle ID. Then the passed client's
|
* Verifies that the passed ID is a valid handle ID. Then the passed client's
|
||||||
* reference to the handle is returned.
|
* reference to the handle is returned.
|
||||||
*
|
*
|
||||||
* Note: to call this function make sure you own the client ref lock.
|
* Note: to call this function make sure you own the client ref lock.
|
||||||
*/
|
*/
|
||||||
struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
|
static struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
|
||||||
struct nvmap_handle *h,
|
struct nvmap_handle *h,
|
||||||
bool is_ro)
|
bool is_ro)
|
||||||
{
|
{
|
||||||
@@ -57,7 +76,7 @@ struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
/* adds a newly-created handle to the device master tree */
|
/* adds a newly-created handle to the device master tree */
|
||||||
void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
|
static void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
|
||||||
{
|
{
|
||||||
struct rb_node **p;
|
struct rb_node **p;
|
||||||
struct rb_node *parent = NULL;
|
struct rb_node *parent = NULL;
|
||||||
@@ -111,7 +130,7 @@ int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
|
|||||||
|
|
||||||
/* Validates that a handle is in the device master tree and that the
|
/* Validates that a handle is in the device master tree and that the
|
||||||
* client has permission to access it. */
|
* client has permission to access it. */
|
||||||
struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
|
static struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
|
||||||
{
|
{
|
||||||
struct nvmap_handle *h = NULL;
|
struct nvmap_handle *h = NULL;
|
||||||
struct rb_node *n;
|
struct rb_node *n;
|
||||||
@@ -564,3 +583,216 @@ struct nvmap_handle_ref *nvmap_dup_handle_ro(struct nvmap_client *client,
|
|||||||
|
|
||||||
return ref;
|
return ref;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvmap_free_handle(struct nvmap_client *client,
|
||||||
|
struct nvmap_handle *handle, bool is_ro)
|
||||||
|
{
|
||||||
|
struct nvmap_handle_ref *ref;
|
||||||
|
struct nvmap_handle *h;
|
||||||
|
|
||||||
|
nvmap_ref_lock(client);
|
||||||
|
|
||||||
|
ref = __nvmap_validate_locked(client, handle, is_ro);
|
||||||
|
if (!ref) {
|
||||||
|
nvmap_ref_unlock(client);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
BUG_ON(!ref->handle);
|
||||||
|
h = ref->handle;
|
||||||
|
|
||||||
|
if (atomic_dec_return(&ref->dupes)) {
|
||||||
|
NVMAP_TAG_TRACE(trace_nvmap_free_handle,
|
||||||
|
NVMAP_TP_ARGS_CHR(client, h, ref));
|
||||||
|
nvmap_ref_unlock(client);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
smp_rmb();
|
||||||
|
rb_erase(&ref->node, &client->handle_refs);
|
||||||
|
client->handle_count--;
|
||||||
|
atomic_dec(&ref->handle->share_count);
|
||||||
|
|
||||||
|
nvmap_ref_unlock(client);
|
||||||
|
|
||||||
|
if (h->owner == client)
|
||||||
|
h->owner = NULL;
|
||||||
|
|
||||||
|
if (is_ro)
|
||||||
|
dma_buf_put(ref->handle->dmabuf_ro);
|
||||||
|
else
|
||||||
|
dma_buf_put(ref->handle->dmabuf);
|
||||||
|
NVMAP_TAG_TRACE(trace_nvmap_free_handle,
|
||||||
|
NVMAP_TP_ARGS_CHR(client, h, ref));
|
||||||
|
kfree(ref);
|
||||||
|
|
||||||
|
out:
|
||||||
|
BUG_ON(!atomic_read(&h->ref));
|
||||||
|
nvmap_handle_put(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvmap_free_handle_from_fd(struct nvmap_client *client,
|
||||||
|
int id)
|
||||||
|
{
|
||||||
|
bool is_ro = false;
|
||||||
|
struct nvmap_handle *handle;
|
||||||
|
struct dma_buf *dmabuf = NULL;
|
||||||
|
int handle_ref = 0;
|
||||||
|
long dmabuf_ref = 0;
|
||||||
|
|
||||||
|
handle = nvmap_handle_get_from_id(client, id);
|
||||||
|
if (IS_ERR_OR_NULL(handle))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (is_nvmap_id_ro(client, id, &is_ro) != 0) {
|
||||||
|
nvmap_handle_put(handle);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (client->ida)
|
||||||
|
nvmap_id_array_id_release(client->ida, id);
|
||||||
|
|
||||||
|
nvmap_free_handle(client, handle, is_ro);
|
||||||
|
mutex_lock(&handle->lock);
|
||||||
|
dmabuf = is_ro ? handle->dmabuf_ro : handle->dmabuf;
|
||||||
|
if (dmabuf && dmabuf->file) {
|
||||||
|
dmabuf_ref = atomic_long_read(&dmabuf->file->f_count);
|
||||||
|
} else {
|
||||||
|
dmabuf_ref = 0;
|
||||||
|
}
|
||||||
|
mutex_unlock(&handle->lock);
|
||||||
|
handle_ref = atomic_read(&handle->ref);
|
||||||
|
|
||||||
|
trace_refcount_free_handle(handle, dmabuf, handle_ref, dmabuf_ref,
|
||||||
|
is_ro ? "RO" : "RW");
|
||||||
|
nvmap_handle_put(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvmap_assign_pages_per_handle(struct nvmap_handle *src_h,
|
||||||
|
struct nvmap_handle *dest_h, u64 src_h_start,
|
||||||
|
u64 src_h_end, u32 *pg_cnt)
|
||||||
|
{
|
||||||
|
/* Increament ref count of source handle as its pages
|
||||||
|
* are referenced here to create new nvmap handle.
|
||||||
|
* By increamenting the ref count of source handle,
|
||||||
|
* source handle pages are not freed until new handle's fd is not closed.
|
||||||
|
* Note: nvmap_dmabuf_release, need to decreement source handle ref count
|
||||||
|
*/
|
||||||
|
src_h = nvmap_handle_get(src_h);
|
||||||
|
if (!src_h)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
while (src_h_start < src_h_end) {
|
||||||
|
unsigned long next;
|
||||||
|
struct page *dest_page;
|
||||||
|
|
||||||
|
dest_h->pgalloc.pages[*pg_cnt] =
|
||||||
|
src_h->pgalloc.pages[src_h_start >> PAGE_SHIFT];
|
||||||
|
dest_page = nvmap_to_page(dest_h->pgalloc.pages[*pg_cnt]);
|
||||||
|
get_page(dest_page);
|
||||||
|
|
||||||
|
next = min(((src_h_start + PAGE_SIZE) & PAGE_MASK),
|
||||||
|
src_h_end);
|
||||||
|
src_h_start = next;
|
||||||
|
*pg_cnt = *pg_cnt + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&dest_h->pg_ref_h_lock);
|
||||||
|
list_add_tail(&src_h->pg_ref, &dest_h->pg_ref_h);
|
||||||
|
mutex_unlock(&dest_h->pg_ref_h_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
|
||||||
|
struct nvmap_handle **hs, struct nvmap_handle *h,
|
||||||
|
struct handles_range *rng)
|
||||||
|
{
|
||||||
|
size_t nr_page = h->size >> PAGE_SHIFT;
|
||||||
|
struct page **pages;
|
||||||
|
u64 end_cur = 0;
|
||||||
|
u64 start = 0;
|
||||||
|
u64 end = 0;
|
||||||
|
u32 pg_cnt = 0;
|
||||||
|
u32 i;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
h = nvmap_handle_get(h);
|
||||||
|
if (!h)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (h->alloc) {
|
||||||
|
nvmap_handle_put(h);
|
||||||
|
return -EEXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
pages = nvmap_altalloc(nr_page * sizeof(*pages));
|
||||||
|
if (!pages) {
|
||||||
|
nvmap_handle_put(h);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
h->pgalloc.pages = pages;
|
||||||
|
|
||||||
|
start = rng->offs_start;
|
||||||
|
end = rng->sz;
|
||||||
|
|
||||||
|
for (i = rng->start; i <= rng->end; i++) {
|
||||||
|
end_cur = (end >= hs[i]->size) ? (hs[i]->size - start) : end;
|
||||||
|
err = nvmap_assign_pages_per_handle(hs[i], h, start, start + end_cur, &pg_cnt);
|
||||||
|
if (err) {
|
||||||
|
nvmap_altfree(pages, nr_page * sizeof(*pages));
|
||||||
|
goto err_h;
|
||||||
|
}
|
||||||
|
end -= (hs[i]->size - start);
|
||||||
|
start = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
h->flags = hs[0]->flags;
|
||||||
|
h->heap_type = NVMAP_HEAP_IOVMM;
|
||||||
|
h->heap_pgalloc = true;
|
||||||
|
h->alloc = true;
|
||||||
|
h->is_subhandle = true;
|
||||||
|
mb();
|
||||||
|
return err;
|
||||||
|
err_h:
|
||||||
|
nvmap_handle_put(h);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
int is_nvmap_id_ro(struct nvmap_client *client, int id, bool *is_ro)
|
||||||
|
{
|
||||||
|
struct nvmap_handle_info *info = NULL;
|
||||||
|
struct dma_buf *dmabuf = NULL;
|
||||||
|
|
||||||
|
if (WARN_ON(!client))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
if (client->ida)
|
||||||
|
dmabuf = nvmap_id_array_get_dmabuf_from_id(client->ida,
|
||||||
|
id);
|
||||||
|
else
|
||||||
|
dmabuf = dma_buf_get(id);
|
||||||
|
|
||||||
|
if (IS_ERR_OR_NULL(dmabuf))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
if (dmabuf_is_nvmap(dmabuf))
|
||||||
|
info = dmabuf->priv;
|
||||||
|
|
||||||
|
if (!info) {
|
||||||
|
dma_buf_put(dmabuf);
|
||||||
|
/*
|
||||||
|
* Ideally, we should return error from here,
|
||||||
|
* but this is done intentionally to handle foreign buffers.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*is_ro = info->is_ro;
|
||||||
|
dma_buf_put(dmabuf);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
pr_err("Handle RO check failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|||||||
318
drivers/video/tegra/nvmap/nvmap_handle.h
Normal file
318
drivers/video/tegra/nvmap/nvmap_handle.h
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
* SPDX-FileCopyrightText: Copyright (c) 2009-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
|
*
|
||||||
|
* GPU memory management driver for Tegra
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _NVMAP_HANDLE_H_
|
||||||
|
#define _NVMAP_HANDLE_H_
|
||||||
|
|
||||||
|
#include <linux/nvscierror.h>
|
||||||
|
#include <linux/nvsciipc_interface.h>
|
||||||
|
|
||||||
|
struct nvmap_handle {
|
||||||
|
struct rb_node node; /* entry on global handle tree */
|
||||||
|
atomic_t ref; /* reference count (i.e., # of duplications) */
|
||||||
|
atomic_t pin; /* pin count */
|
||||||
|
u32 flags; /* caching flags */
|
||||||
|
size_t size; /* padded (as-allocated) size */
|
||||||
|
size_t orig_size; /* original (as-requested) size */
|
||||||
|
size_t align;
|
||||||
|
struct nvmap_client *owner;
|
||||||
|
struct dma_buf *dmabuf;
|
||||||
|
struct dma_buf *dmabuf_ro;
|
||||||
|
union {
|
||||||
|
struct nvmap_pgalloc pgalloc;
|
||||||
|
struct nvmap_heap_block *carveout;
|
||||||
|
};
|
||||||
|
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
|
||||||
|
bool alloc; /* handle has memory allocated */
|
||||||
|
bool from_va; /* handle memory is from VA */
|
||||||
|
u32 heap_type; /* handle heap is allocated from */
|
||||||
|
u32 userflags; /* flags passed from userspace */
|
||||||
|
void *vaddr; /* mapping used inside kernel */
|
||||||
|
struct list_head vmas; /* list of all user vma's */
|
||||||
|
atomic_t umap_count; /* number of outstanding maps from user */
|
||||||
|
atomic_t kmap_count; /* number of outstanding map from kernel */
|
||||||
|
atomic_t share_count; /* number of processes sharing the handle */
|
||||||
|
struct list_head lru; /* list head to track the lru */
|
||||||
|
struct mutex lock;
|
||||||
|
struct list_head dmabuf_priv;
|
||||||
|
u64 ivm_id;
|
||||||
|
unsigned int peer; /* Peer VM number */
|
||||||
|
int offs; /* Offset in IVM mem pool */
|
||||||
|
/*
|
||||||
|
* To be set only in handle created from VA case if the handle is
|
||||||
|
* read-only.
|
||||||
|
*/
|
||||||
|
bool is_ro;
|
||||||
|
|
||||||
|
/* list node in case this handle's pages are referenced */
|
||||||
|
struct list_head pg_ref;
|
||||||
|
/* list of all the handles whose
|
||||||
|
* pages are refernced in this handle
|
||||||
|
*/
|
||||||
|
struct list_head pg_ref_h;
|
||||||
|
struct mutex pg_ref_h_lock;
|
||||||
|
bool is_subhandle;
|
||||||
|
/*
|
||||||
|
* waitq to wait on RO dmabuf release completion, if release is already in progress.
|
||||||
|
*/
|
||||||
|
wait_queue_head_t waitq;
|
||||||
|
int numa_id;
|
||||||
|
u64 serial_id;
|
||||||
|
bool has_hugetlbfs_pages;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nvmap_handle_info {
|
||||||
|
struct nvmap_handle *handle;
|
||||||
|
struct list_head maps;
|
||||||
|
struct mutex maps_lock;
|
||||||
|
bool is_ro;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* handle_ref objects are client-local references to an nvmap_handle;
|
||||||
|
* they are distinct objects so that handles can be unpinned and
|
||||||
|
* unreferenced the correct number of times when a client abnormally
|
||||||
|
* terminates */
|
||||||
|
struct nvmap_handle_ref {
|
||||||
|
struct nvmap_handle *handle;
|
||||||
|
struct rb_node node;
|
||||||
|
atomic_t dupes; /* number of times to free on file close */
|
||||||
|
bool is_ro;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct handles_range {
|
||||||
|
u32 start; /* start handle no where buffer range starts */
|
||||||
|
u32 end; /* end handle no where buffer range ends */
|
||||||
|
u64 offs_start; /* keep track of intermediate offset */
|
||||||
|
u64 offs; /* user passed offset */
|
||||||
|
u64 sz; /* user passed size */
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
|
||||||
|
{
|
||||||
|
if (h->flags == NVMAP_HANDLE_UNCACHEABLE) {
|
||||||
|
#ifdef CONFIG_ARM64
|
||||||
|
if (h->heap_type != NVMAP_HEAP_CARVEOUT_VPR &&
|
||||||
|
h->owner && !h->owner->warned) {
|
||||||
|
char task_comm[TASK_COMM_LEN];
|
||||||
|
h->owner->warned = 1;
|
||||||
|
get_task_comm(task_comm, h->owner->task);
|
||||||
|
pr_err("PID %d: %s: TAG: 0x%04x WARNING: "
|
||||||
|
"NVMAP_HANDLE_WRITE_COMBINE "
|
||||||
|
"should be used in place of "
|
||||||
|
"NVMAP_HANDLE_UNCACHEABLE on ARM64\n",
|
||||||
|
h->owner->task->pid, task_comm,
|
||||||
|
h->userflags >> 16);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return pgprot_noncached(prot);
|
||||||
|
} else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE) {
|
||||||
|
return pgprot_writecombine(prot);
|
||||||
|
} else {
|
||||||
|
/* Do nothing */
|
||||||
|
}
|
||||||
|
return prot;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: assume user space requests for reserve operations
|
||||||
|
* are page aligned
|
||||||
|
*/
|
||||||
|
static inline int nvmap_handle_mk(struct nvmap_handle *h,
|
||||||
|
u32 offset, u32 size,
|
||||||
|
bool (*fn)(struct page **),
|
||||||
|
bool locked)
|
||||||
|
{
|
||||||
|
int i, nchanged = 0;
|
||||||
|
u32 start_page = offset >> PAGE_SHIFT;
|
||||||
|
u32 end_page = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (!locked)
|
||||||
|
mutex_lock(&h->lock);
|
||||||
|
if (h->heap_pgalloc &&
|
||||||
|
(offset < h->size) &&
|
||||||
|
(size <= h->size) &&
|
||||||
|
(offset <= (h->size - size))) {
|
||||||
|
for (i = start_page; i < end_page; i++)
|
||||||
|
nchanged += fn(&h->pgalloc.pages[i]) ? 1 : 0;
|
||||||
|
}
|
||||||
|
if (!locked)
|
||||||
|
mutex_unlock(&h->lock);
|
||||||
|
return nchanged;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
|
||||||
|
u32 offset, u32 size)
|
||||||
|
{
|
||||||
|
int nchanged;
|
||||||
|
|
||||||
|
if (h->heap_pgalloc && !atomic_read(&h->pgalloc.ndirty))
|
||||||
|
return;
|
||||||
|
if (size == 0)
|
||||||
|
size = h->size;
|
||||||
|
|
||||||
|
nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkclean, false);
|
||||||
|
if (h->heap_pgalloc)
|
||||||
|
atomic_sub(nchanged, &h->pgalloc.ndirty);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void _nvmap_handle_mkdirty(struct nvmap_handle *h,
|
||||||
|
u32 offset, u32 size)
|
||||||
|
{
|
||||||
|
int nchanged;
|
||||||
|
|
||||||
|
if (h->heap_pgalloc &&
|
||||||
|
(atomic_read(&h->pgalloc.ndirty) == (h->size >> PAGE_SHIFT)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkdirty, true);
|
||||||
|
if (h->heap_pgalloc)
|
||||||
|
atomic_add(nchanged, &h->pgalloc.ndirty);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
mutex_lock(&h->lock);
|
||||||
|
atomic_inc(&h->kmap_count);
|
||||||
|
mutex_unlock(&h->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
atomic_inc(&h->kmap_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
atomic_dec(&h->kmap_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_umaps_inc(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
mutex_lock(&h->lock);
|
||||||
|
atomic_inc(&h->umap_count);
|
||||||
|
mutex_unlock(&h->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_umaps_dec(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
atomic_dec(&h->umap_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_lru_reset(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
spin_lock(&nvmap_dev->lru_lock);
|
||||||
|
BUG_ON(list_empty(&h->lru));
|
||||||
|
list_del(&h->lru);
|
||||||
|
list_add_tail(&h->lru, &nvmap_dev->lru_handles);
|
||||||
|
spin_unlock(&nvmap_dev->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool nvmap_handle_track_dirty(struct nvmap_handle *h)
|
||||||
|
{
|
||||||
|
if (!h->heap_pgalloc)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return h->userflags & (NVMAP_HANDLE_CACHE_SYNC |
|
||||||
|
NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
|
||||||
|
size_t size, bool ro_buf);
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_create_handle_from_va(struct nvmap_client *client,
|
||||||
|
ulong addr, size_t size,
|
||||||
|
unsigned int access_flags);
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_dup_handle_ro(struct nvmap_client *client,
|
||||||
|
int fd);
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_try_duplicate_by_ivmid(
|
||||||
|
struct nvmap_client *client, u64 ivm_id,
|
||||||
|
struct nvmap_heap_block **block);
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_create_handle_from_id(
|
||||||
|
struct nvmap_client *client, u32 id);
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_create_handle_from_fd(
|
||||||
|
struct nvmap_client *client, int fd);
|
||||||
|
|
||||||
|
void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h, bool is_ro);
|
||||||
|
|
||||||
|
void nvmap_free_handle_from_fd(struct nvmap_client *c, int fd);
|
||||||
|
|
||||||
|
int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
|
||||||
|
|
||||||
|
int is_nvmap_id_ro(struct nvmap_client *client, int id, bool *is_ro);
|
||||||
|
|
||||||
|
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
|
||||||
|
struct nvmap_handle **hs, struct nvmap_handle *h,
|
||||||
|
struct handles_range *rng);
|
||||||
|
|
||||||
|
int nvmap_validate_sci_ipc_params(struct nvmap_client *client,
|
||||||
|
NvSciIpcEndpointAuthToken auth_token,
|
||||||
|
NvSciIpcEndpointVuid *pr_vuid,
|
||||||
|
NvSciIpcEndpointVuid *localusr_vuid);
|
||||||
|
|
||||||
|
int nvmap_create_sci_ipc_id(struct nvmap_client *client,
|
||||||
|
struct nvmap_handle *h,
|
||||||
|
u32 flags,
|
||||||
|
u64 *sci_ipc_id,
|
||||||
|
NvSciIpcEndpointVuid pr_vuid,
|
||||||
|
bool is_ro);
|
||||||
|
|
||||||
|
int nvmap_get_handle_from_sci_ipc_id(struct nvmap_client *client,
|
||||||
|
u32 flags,
|
||||||
|
u64 sci_ipc_id,
|
||||||
|
NvSciIpcEndpointVuid localusr_vuid,
|
||||||
|
u32 *h);
|
||||||
|
|
||||||
|
#ifdef NVMAP_CONFIG_SCIIPC
|
||||||
|
int nvmap_sci_ipc_init(void);
|
||||||
|
void nvmap_sci_ipc_exit(void);
|
||||||
|
#else
|
||||||
|
__weak int nvmap_sci_ipc_init(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
__weak void nvmap_sci_ipc_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef NVMAP_CONFIG_HANDLE_AS_ID
|
||||||
|
void nvmap_id_array_init(struct xarray *xarr);
|
||||||
|
void nvmap_id_array_exit(struct xarray *xarr);
|
||||||
|
struct dma_buf *nvmap_id_array_get_dmabuf_from_id(struct xarray *xarr, u32 id);
|
||||||
|
int nvmap_id_array_id_alloc(struct xarray *xarr, u32 *id, struct dma_buf *dmabuf);
|
||||||
|
struct dma_buf *nvmap_id_array_id_release(struct xarray *xarr, u32 id);
|
||||||
|
#else
|
||||||
|
static inline void nvmap_id_array_init(struct xarray *xarr)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmap_id_array_exit(struct xarray *xarr)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct dma_buf *nvmap_id_array_get_dmabuf_from_id(struct xarray *xarr, u32 id)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int nvmap_id_array_id_alloc(struct xarray *xarr, u32 *id, struct dma_buf *dmabuf)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct dma_buf *nvmap_id_array_id_release(struct xarray *xarr, u32 id)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif /* NVMAP_CONFIG_HANDLE_AS_ID */
|
||||||
|
|
||||||
|
#endif /* _NVMAP_HANDLE_H_ */
|
||||||
14
drivers/video/tegra/nvmap/nvmap_handle_int.h
Normal file
14
drivers/video/tegra/nvmap/nvmap_handle_int.h
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
|
*
|
||||||
|
* GPU memory management driver for Tegra
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _NVMAP_HANDLE_INT_H_
|
||||||
|
#define _NVMAP_HANDLE_INT_H_
|
||||||
|
|
||||||
|
struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
|
||||||
|
struct nvmap_handle *h, bool skip_val,
|
||||||
|
bool is_ro);
|
||||||
|
|
||||||
|
#endif /* _NVMAP_HANDLE_INT_H_ */
|
||||||
@@ -28,6 +28,7 @@
|
|||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_alloc_int.h"
|
#include "nvmap_alloc_int.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
#include "include/linux/nvmap_exports.h"
|
#include "include/linux/nvmap_exports.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
#include <linux/xarray.h>
|
#include <linux/xarray.h>
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize xarray mapping
|
* Initialize xarray mapping
|
||||||
|
|||||||
@@ -30,13 +30,13 @@
|
|||||||
#ifdef NVMAP_CONFIG_SCIIPC
|
#ifdef NVMAP_CONFIG_SCIIPC
|
||||||
#include <linux/nvscierror.h>
|
#include <linux/nvscierror.h>
|
||||||
#include <linux/nvsciipc_interface.h>
|
#include <linux/nvsciipc_interface.h>
|
||||||
#include "nvmap_sci_ipc.h"
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "nvmap_ioctl.h"
|
#include "nvmap_ioctl.h"
|
||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_alloc.h"
|
#include "nvmap_alloc.h"
|
||||||
#include "nvmap_dmabuf.h"
|
#include "nvmap_dmabuf.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
|
|||||||
@@ -171,84 +171,12 @@ struct nvmap_handle_dmabuf_priv {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvmap_handle {
|
|
||||||
struct rb_node node; /* entry on global handle tree */
|
|
||||||
atomic_t ref; /* reference count (i.e., # of duplications) */
|
|
||||||
atomic_t pin; /* pin count */
|
|
||||||
u32 flags; /* caching flags */
|
|
||||||
size_t size; /* padded (as-allocated) size */
|
|
||||||
size_t orig_size; /* original (as-requested) size */
|
|
||||||
size_t align;
|
|
||||||
struct nvmap_client *owner;
|
|
||||||
struct dma_buf *dmabuf;
|
|
||||||
struct dma_buf *dmabuf_ro;
|
|
||||||
union {
|
|
||||||
struct nvmap_pgalloc pgalloc;
|
|
||||||
struct nvmap_heap_block *carveout;
|
|
||||||
};
|
|
||||||
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
|
|
||||||
bool alloc; /* handle has memory allocated */
|
|
||||||
bool from_va; /* handle memory is from VA */
|
|
||||||
u32 heap_type; /* handle heap is allocated from */
|
|
||||||
u32 userflags; /* flags passed from userspace */
|
|
||||||
void *vaddr; /* mapping used inside kernel */
|
|
||||||
struct list_head vmas; /* list of all user vma's */
|
|
||||||
atomic_t umap_count; /* number of outstanding maps from user */
|
|
||||||
atomic_t kmap_count; /* number of outstanding map from kernel */
|
|
||||||
atomic_t share_count; /* number of processes sharing the handle */
|
|
||||||
struct list_head lru; /* list head to track the lru */
|
|
||||||
struct mutex lock;
|
|
||||||
struct list_head dmabuf_priv;
|
|
||||||
u64 ivm_id;
|
|
||||||
unsigned int peer; /* Peer VM number */
|
|
||||||
int offs; /* Offset in IVM mem pool */
|
|
||||||
/*
|
|
||||||
* To be set only in handle created from VA case if the handle is
|
|
||||||
* read-only.
|
|
||||||
*/
|
|
||||||
bool is_ro;
|
|
||||||
|
|
||||||
/* list node in case this handle's pages are referenced */
|
|
||||||
struct list_head pg_ref;
|
|
||||||
/* list of all the handles whose
|
|
||||||
* pages are refernced in this handle
|
|
||||||
*/
|
|
||||||
struct list_head pg_ref_h;
|
|
||||||
struct mutex pg_ref_h_lock;
|
|
||||||
bool is_subhandle;
|
|
||||||
/*
|
|
||||||
* waitq to wait on RO dmabuf release completion, if release is already in progress.
|
|
||||||
*/
|
|
||||||
wait_queue_head_t waitq;
|
|
||||||
int numa_id;
|
|
||||||
u64 serial_id;
|
|
||||||
bool has_hugetlbfs_pages;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct nvmap_handle_info {
|
|
||||||
struct nvmap_handle *handle;
|
|
||||||
struct list_head maps;
|
|
||||||
struct mutex maps_lock;
|
|
||||||
bool is_ro;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct nvmap_tag_entry {
|
struct nvmap_tag_entry {
|
||||||
struct rb_node node;
|
struct rb_node node;
|
||||||
atomic_t ref; /* reference count (i.e., # of duplications) */
|
atomic_t ref; /* reference count (i.e., # of duplications) */
|
||||||
u32 tag;
|
u32 tag;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* handle_ref objects are client-local references to an nvmap_handle;
|
|
||||||
* they are distinct objects so that handles can be unpinned and
|
|
||||||
* unreferenced the correct number of times when a client abnormally
|
|
||||||
* terminates */
|
|
||||||
struct nvmap_handle_ref {
|
|
||||||
struct nvmap_handle *handle;
|
|
||||||
struct rb_node node;
|
|
||||||
atomic_t dupes; /* number of times to free on file close */
|
|
||||||
bool is_ro;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define NVMAP_IVM_INVALID_PEER (-1)
|
#define NVMAP_IVM_INVALID_PEER (-1)
|
||||||
|
|
||||||
struct nvmap_client {
|
struct nvmap_client {
|
||||||
@@ -302,14 +230,6 @@ struct nvmap_device {
|
|||||||
u64 serial_id_counter; /* This is global counter common across different client processes */
|
u64 serial_id_counter; /* This is global counter common across different client processes */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct handles_range {
|
|
||||||
u32 start; /* start handle no where buffer range starts */
|
|
||||||
u32 end; /* end handle no where buffer range ends */
|
|
||||||
u64 offs_start; /* keep track of intermediate offset */
|
|
||||||
u64 offs; /* user passed offset */
|
|
||||||
u64 sz; /* user passed size */
|
|
||||||
};
|
|
||||||
|
|
||||||
extern struct nvmap_device *nvmap_dev;
|
extern struct nvmap_device *nvmap_dev;
|
||||||
extern ulong nvmap_init_time;
|
extern ulong nvmap_init_time;
|
||||||
|
|
||||||
@@ -333,30 +253,6 @@ static inline void nvmap_release_mmap_read_lock(struct mm_struct *mm)
|
|||||||
up_read(&mm->mmap_lock);
|
up_read(&mm->mmap_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
|
|
||||||
{
|
|
||||||
if (h->flags == NVMAP_HANDLE_UNCACHEABLE) {
|
|
||||||
#ifdef CONFIG_ARM64
|
|
||||||
if (h->heap_type != NVMAP_HEAP_CARVEOUT_VPR &&
|
|
||||||
h->owner && !h->owner->warned) {
|
|
||||||
char task_comm[TASK_COMM_LEN];
|
|
||||||
h->owner->warned = 1;
|
|
||||||
get_task_comm(task_comm, h->owner->task);
|
|
||||||
pr_err("PID %d: %s: TAG: 0x%04x WARNING: "
|
|
||||||
"NVMAP_HANDLE_WRITE_COMBINE "
|
|
||||||
"should be used in place of "
|
|
||||||
"NVMAP_HANDLE_UNCACHEABLE on ARM64\n",
|
|
||||||
h->owner->task->pid, task_comm,
|
|
||||||
h->userflags >> 16);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return pgprot_noncached(prot);
|
|
||||||
}
|
|
||||||
else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
|
|
||||||
return pgprot_writecombine(prot);
|
|
||||||
return prot;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct dma_coherent_mem_replica {
|
struct dma_coherent_mem_replica {
|
||||||
void *virt_base;
|
void *virt_base;
|
||||||
dma_addr_t device_base;
|
dma_addr_t device_base;
|
||||||
@@ -381,53 +277,10 @@ struct nvmap_carveout_node;
|
|||||||
struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
|
struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
|
||||||
void nvmap_handle_put(struct nvmap_handle *h);
|
void nvmap_handle_put(struct nvmap_handle *h);
|
||||||
|
|
||||||
struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
|
|
||||||
struct nvmap_handle *h,
|
|
||||||
bool is_ro);
|
|
||||||
|
|
||||||
struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *h);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
|
|
||||||
size_t size, bool ro_buf);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_create_handle_from_va(struct nvmap_client *client,
|
|
||||||
ulong addr, size_t size,
|
|
||||||
unsigned int access_flags);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_dup_handle_ro(struct nvmap_client *client,
|
|
||||||
int fd);
|
|
||||||
|
|
||||||
int is_nvmap_id_ro(struct nvmap_client *client, int id, bool *is_ro);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
|
|
||||||
struct nvmap_handle *h, bool skip_val,
|
|
||||||
bool is_ro);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_try_duplicate_by_ivmid(
|
|
||||||
struct nvmap_client *client, u64 ivm_id,
|
|
||||||
struct nvmap_heap_block **block);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_create_handle_from_id(
|
|
||||||
struct nvmap_client *client, u32 id);
|
|
||||||
|
|
||||||
struct nvmap_handle_ref *nvmap_create_handle_from_fd(
|
|
||||||
struct nvmap_client *client, int fd);
|
|
||||||
|
|
||||||
void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size);
|
void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size);
|
||||||
|
|
||||||
void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h, bool is_ro);
|
|
||||||
|
|
||||||
void nvmap_free_handle_from_fd(struct nvmap_client *c, int fd);
|
|
||||||
|
|
||||||
int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
|
|
||||||
|
|
||||||
void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
|
|
||||||
|
|
||||||
int is_nvmap_vma(struct vm_area_struct *vma);
|
int is_nvmap_vma(struct vm_area_struct *vma);
|
||||||
|
|
||||||
int nvmap_get_handle_param(struct nvmap_client *client,
|
|
||||||
struct nvmap_handle_ref *ref, u32 param, u64 *result);
|
|
||||||
|
|
||||||
struct nvmap_handle *nvmap_handle_get_from_fd(int fd);
|
struct nvmap_handle *nvmap_handle_get_from_fd(int fd);
|
||||||
|
|
||||||
/* MM definitions. */
|
/* MM definitions. */
|
||||||
@@ -466,62 +319,6 @@ static inline bool nvmap_page_mkclean(struct page **page)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: assume user space requests for reserve operations
|
|
||||||
* are page aligned
|
|
||||||
*/
|
|
||||||
static inline int nvmap_handle_mk(struct nvmap_handle *h,
|
|
||||||
u32 offset, u32 size,
|
|
||||||
bool (*fn)(struct page **),
|
|
||||||
bool locked)
|
|
||||||
{
|
|
||||||
int i, nchanged = 0;
|
|
||||||
u32 start_page = offset >> PAGE_SHIFT;
|
|
||||||
u32 end_page = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (!locked)
|
|
||||||
mutex_lock(&h->lock);
|
|
||||||
if (h->heap_pgalloc &&
|
|
||||||
(offset < h->size) &&
|
|
||||||
(size <= h->size) &&
|
|
||||||
(offset <= (h->size - size))) {
|
|
||||||
for (i = start_page; i < end_page; i++)
|
|
||||||
nchanged += fn(&h->pgalloc.pages[i]) ? 1 : 0;
|
|
||||||
}
|
|
||||||
if (!locked)
|
|
||||||
mutex_unlock(&h->lock);
|
|
||||||
return nchanged;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
|
|
||||||
u32 offset, u32 size)
|
|
||||||
{
|
|
||||||
int nchanged;
|
|
||||||
|
|
||||||
if (h->heap_pgalloc && !atomic_read(&h->pgalloc.ndirty))
|
|
||||||
return;
|
|
||||||
if (size == 0)
|
|
||||||
size = h->size;
|
|
||||||
|
|
||||||
nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkclean, false);
|
|
||||||
if (h->heap_pgalloc)
|
|
||||||
atomic_sub(nchanged, &h->pgalloc.ndirty);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void _nvmap_handle_mkdirty(struct nvmap_handle *h,
|
|
||||||
u32 offset, u32 size)
|
|
||||||
{
|
|
||||||
int nchanged;
|
|
||||||
|
|
||||||
if (h->heap_pgalloc &&
|
|
||||||
(atomic_read(&h->pgalloc.ndirty) == (h->size >> PAGE_SHIFT)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkdirty, true);
|
|
||||||
if (h->heap_pgalloc)
|
|
||||||
atomic_add(nchanged, &h->pgalloc.ndirty);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size);
|
void nvmap_zap_handle(struct nvmap_handle *handle, u64 offset, u64 size);
|
||||||
|
|
||||||
void nvmap_vma_open(struct vm_area_struct *vma);
|
void nvmap_vma_open(struct vm_area_struct *vma);
|
||||||
@@ -529,69 +326,6 @@ void nvmap_vma_open(struct vm_area_struct *vma);
|
|||||||
int nvmap_reserve_pages(struct nvmap_handle **handles, u64 *offsets,
|
int nvmap_reserve_pages(struct nvmap_handle **handles, u64 *offsets,
|
||||||
u64 *sizes, u32 nr, u32 op, bool is_32);
|
u64 *sizes, u32 nr, u32 op, bool is_32);
|
||||||
|
|
||||||
static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
mutex_lock(&h->lock);
|
|
||||||
atomic_inc(&h->kmap_count);
|
|
||||||
mutex_unlock(&h->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
atomic_inc(&h->kmap_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
atomic_dec(&h->kmap_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_umaps_inc(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
mutex_lock(&h->lock);
|
|
||||||
atomic_inc(&h->umap_count);
|
|
||||||
mutex_unlock(&h->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_umaps_dec(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
atomic_dec(&h->umap_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_lru_add(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
spin_lock(&nvmap_dev->lru_lock);
|
|
||||||
BUG_ON(!list_empty(&h->lru));
|
|
||||||
list_add_tail(&h->lru, &nvmap_dev->lru_handles);
|
|
||||||
spin_unlock(&nvmap_dev->lru_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_lru_del(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
spin_lock(&nvmap_dev->lru_lock);
|
|
||||||
list_del(&h->lru);
|
|
||||||
INIT_LIST_HEAD(&h->lru);
|
|
||||||
spin_unlock(&nvmap_dev->lru_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_lru_reset(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
spin_lock(&nvmap_dev->lru_lock);
|
|
||||||
BUG_ON(list_empty(&h->lru));
|
|
||||||
list_del(&h->lru);
|
|
||||||
list_add_tail(&h->lru, &nvmap_dev->lru_handles);
|
|
||||||
spin_unlock(&nvmap_dev->lru_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool nvmap_handle_track_dirty(struct nvmap_handle *h)
|
|
||||||
{
|
|
||||||
if (!h->heap_pgalloc)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return h->userflags & (NVMAP_HANDLE_CACHE_SYNC |
|
|
||||||
NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct nvmap_tag_entry *nvmap_search_tag_entry(struct rb_root *root, u32 tag);
|
struct nvmap_tag_entry *nvmap_search_tag_entry(struct rb_root *root, u32 tag);
|
||||||
|
|
||||||
int nvmap_define_tag(struct nvmap_device *dev, u32 tag,
|
int nvmap_define_tag(struct nvmap_device *dev, u32 tag,
|
||||||
@@ -665,51 +399,6 @@ extern struct of_device_id __nvmapcache_of_table;
|
|||||||
_OF_DECLARE(nvmapcache, nvmapcache_of, compat, fn, \
|
_OF_DECLARE(nvmapcache, nvmapcache_of, compat, fn, \
|
||||||
nvmap_setup_chip_cache_fn)
|
nvmap_setup_chip_cache_fn)
|
||||||
|
|
||||||
#ifdef NVMAP_CONFIG_SCIIPC
|
|
||||||
int nvmap_sci_ipc_init(void);
|
|
||||||
void nvmap_sci_ipc_exit(void);
|
|
||||||
#else
|
|
||||||
__weak int nvmap_sci_ipc_init(void)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
__weak void nvmap_sci_ipc_exit(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef NVMAP_CONFIG_HANDLE_AS_ID
|
|
||||||
void nvmap_id_array_init(struct xarray *xarr);
|
|
||||||
void nvmap_id_array_exit(struct xarray *xarr);
|
|
||||||
struct dma_buf *nvmap_id_array_get_dmabuf_from_id(struct xarray *xarr, u32 id);
|
|
||||||
int nvmap_id_array_id_alloc(struct xarray *xarr, u32 *id, struct dma_buf *dmabuf);
|
|
||||||
struct dma_buf *nvmap_id_array_id_release(struct xarray *xarr, u32 id);
|
|
||||||
#else
|
|
||||||
static inline void nvmap_id_array_init(struct xarray *xarr)
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvmap_id_array_exit(struct xarray *xarr)
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct dma_buf *nvmap_id_array_get_dmabuf_from_id(struct xarray *xarr, u32 id)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int nvmap_id_array_id_alloc(struct xarray *xarr, u32 *id, struct dma_buf *dmabuf)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct dma_buf *nvmap_id_array_id_release(struct xarray *xarr, u32 id)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
void *nvmap_dmabuf_get_drv_data(struct dma_buf *dmabuf,
|
void *nvmap_dmabuf_get_drv_data(struct dma_buf *dmabuf,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid);
|
bool is_nvmap_memory_available(size_t size, uint32_t heap, int numa_nid);
|
||||||
@@ -731,9 +420,5 @@ void nvmap_dma_mark_declared_memory_unoccupied(struct device *dev,
|
|||||||
dma_addr_t device_addr, size_t size);
|
dma_addr_t device_addr, size_t size);
|
||||||
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
|
#endif /* CONFIG_TEGRA_VIRTUALIZATION */
|
||||||
|
|
||||||
int nvmap_assign_pages_to_handle(struct nvmap_client *client,
|
|
||||||
struct nvmap_handle **hs, struct nvmap_handle *h,
|
|
||||||
struct handles_range *rng);
|
|
||||||
|
|
||||||
void nvmap_dma_release_coherent_memory(struct dma_coherent_mem_replica *mem);
|
void nvmap_dma_release_coherent_memory(struct dma_coherent_mem_replica *mem);
|
||||||
#endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */
|
#endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */
|
||||||
|
|||||||
@@ -21,8 +21,9 @@
|
|||||||
|
|
||||||
#include <trace/events/nvmap.h>
|
#include <trace/events/nvmap.h>
|
||||||
#include "nvmap_priv.h"
|
#include "nvmap_priv.h"
|
||||||
#include "nvmap_sci_ipc.h"
|
|
||||||
#include "nvmap_dmabuf.h"
|
#include "nvmap_dmabuf.h"
|
||||||
|
#include "nvmap_handle.h"
|
||||||
|
#include "nvmap_handle_int.h"
|
||||||
|
|
||||||
struct nvmap_sci_ipc {
|
struct nvmap_sci_ipc {
|
||||||
struct rb_root entries;
|
struct rb_root entries;
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
*
|
|
||||||
* mapping between nvmap_hnadle and sci_ipc entery
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __VIDEO_TEGRA_NVMAP_SCI_IPC_H
|
|
||||||
#define __VIDEO_TEGRA_NVMAP_SCI_IPC_H
|
|
||||||
|
|
||||||
int nvmap_validate_sci_ipc_params(struct nvmap_client *client,
|
|
||||||
NvSciIpcEndpointAuthToken auth_token,
|
|
||||||
NvSciIpcEndpointVuid *pr_vuid,
|
|
||||||
NvSciIpcEndpointVuid *localusr_vuid);
|
|
||||||
|
|
||||||
int nvmap_create_sci_ipc_id(struct nvmap_client *client,
|
|
||||||
struct nvmap_handle *h,
|
|
||||||
u32 flags,
|
|
||||||
u64 *sci_ipc_id,
|
|
||||||
NvSciIpcEndpointVuid pr_vuid,
|
|
||||||
bool is_ro);
|
|
||||||
|
|
||||||
int nvmap_get_handle_from_sci_ipc_id(struct nvmap_client *client,
|
|
||||||
u32 flags,
|
|
||||||
u64 sci_ipc_id,
|
|
||||||
NvSciIpcEndpointVuid localusr_vuid,
|
|
||||||
u32 *h);
|
|
||||||
#endif /* __VIDEO_TEGRA_NVMAP_SCI_IPC_H */
|
|
||||||
Reference in New Issue
Block a user