mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
video: tegra: nvmap: Update carveout name
Compression carveout is not correct carveout name as it will be used by gpu even for non-compression usecases. Hence rename it to gpu carveout. Bug 3956637 Change-Id: I802b91d58d9ca120e34655c21f56c0da8c8cf677 Signed-off-by: Ketan Patil <ketanp@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2955536 Reviewed-by: Pritesh Raithatha <praithatha@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
3e51976226
commit
1c2599bcba
@@ -717,7 +717,7 @@ static void alloc_handle(struct nvmap_client *client,
|
|||||||
void *cpu_addr;
|
void *cpu_addr;
|
||||||
|
|
||||||
if (h->pgalloc.pages &&
|
if (h->pgalloc.pages &&
|
||||||
h->heap_type == NVMAP_HEAP_CARVEOUT_COMPRESSION) {
|
h->heap_type == NVMAP_HEAP_CARVEOUT_GPU) {
|
||||||
unsigned long page_count;
|
unsigned long page_count;
|
||||||
u32 granule_size = 0;
|
u32 granule_size = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|||||||
@@ -150,11 +150,11 @@ static phys_addr_t nvmap_alloc_mem(struct nvmap_heap *h, size_t len,
|
|||||||
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
||||||
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||||
/*
|
/*
|
||||||
* In case of Compression carveout, try to allocate the entire granule in physically
|
* In case of Gpu carveout, try to allocate the entire granule in physically
|
||||||
* contiguous manner. If it returns error, then try to allocate the memory in
|
* contiguous manner. If it returns error, then try to allocate the memory in
|
||||||
* granules of specified granule size.
|
* granules of specified granule size.
|
||||||
*/
|
*/
|
||||||
if (h->is_compression_co && IS_ERR(err)) {
|
if (h->is_gpu_co && IS_ERR(err)) {
|
||||||
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
err = nvmap_dma_alloc_attrs(dev, len, &pa,
|
||||||
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE |
|
GFP_KERNEL, DMA_ATTR_ALLOC_EXACT_SIZE |
|
||||||
DMA_ATTR_ALLOC_SINGLE_PAGES);
|
DMA_ATTR_ALLOC_SINGLE_PAGES);
|
||||||
@@ -222,7 +222,7 @@ static void nvmap_free_mem(struct nvmap_heap *h, phys_addr_t base,
|
|||||||
(void *)(uintptr_t)base,
|
(void *)(uintptr_t)base,
|
||||||
(dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE);
|
(dma_addr_t)base, DMA_ATTR_ALLOC_EXACT_SIZE);
|
||||||
#else
|
#else
|
||||||
if (h->is_compression_co && handle->pgalloc.pages) {
|
if (h->is_gpu_co && handle->pgalloc.pages) {
|
||||||
/* In case of pages, we need to pass pointer to array of pages */
|
/* In case of pages, we need to pass pointer to array of pages */
|
||||||
nvmap_dma_free_attrs(dev, len,
|
nvmap_dma_free_attrs(dev, len,
|
||||||
(void *)handle->pgalloc.pages,
|
(void *)handle->pgalloc.pages,
|
||||||
@@ -482,7 +482,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
|||||||
DMA_MEMORY_NOMAP);
|
DMA_MEMORY_NOMAP);
|
||||||
#else
|
#else
|
||||||
err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len,
|
err = nvmap_dma_declare_coherent_memory(h->dma_dev, 0, base, len,
|
||||||
DMA_MEMORY_NOMAP, co->is_compression_co, co->granule_size);
|
DMA_MEMORY_NOMAP, co->is_gpu_co, co->granule_size);
|
||||||
#endif
|
#endif
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||||
if (!err) {
|
if (!err) {
|
||||||
@@ -505,7 +505,7 @@ struct nvmap_heap *nvmap_heap_create(struct device *parent,
|
|||||||
h->base = base;
|
h->base = base;
|
||||||
h->can_alloc = !!co->can_alloc;
|
h->can_alloc = !!co->can_alloc;
|
||||||
h->is_ivm = co->is_ivm;
|
h->is_ivm = co->is_ivm;
|
||||||
h->is_compression_co = co->is_compression_co;
|
h->is_gpu_co = co->is_gpu_co;
|
||||||
h->granule_size = co->granule_size;
|
h->granule_size = co->granule_size;
|
||||||
h->len = len;
|
h->len = len;
|
||||||
h->free_size = len;
|
h->free_size = len;
|
||||||
@@ -639,7 +639,7 @@ int nvmap_flush_heap_block(struct nvmap_client *client,
|
|||||||
struct list_block *b = container_of(block, struct list_block, block);
|
struct list_block *b = container_of(block, struct list_block, block);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For Compression carveout with physically discontiguous granules,
|
* For Gpu carveout with physically discontiguous granules,
|
||||||
* iterate over granules and do cache maint for it.
|
* iterate over granules and do cache maint for it.
|
||||||
*/
|
*/
|
||||||
page_count = h->size >> PAGE_SHIFT;
|
page_count = h->size >> PAGE_SHIFT;
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ struct nvmap_heap {
|
|||||||
struct device *cma_dev;
|
struct device *cma_dev;
|
||||||
struct device *dma_dev;
|
struct device *dma_dev;
|
||||||
bool is_ivm;
|
bool is_ivm;
|
||||||
bool is_compression_co;
|
bool is_gpu_co;
|
||||||
u32 granule_size;
|
u32 granule_size;
|
||||||
bool can_alloc; /* Used only if is_ivm == true */
|
bool can_alloc; /* Used only if is_ivm == true */
|
||||||
unsigned int peer; /* Used only if is_ivm == true */
|
unsigned int peer; /* Used only if is_ivm == true */
|
||||||
|
|||||||
@@ -136,8 +136,8 @@ static struct nvmap_platform_carveout nvmap_carveouts[] = {
|
|||||||
.size = 0,
|
.size = 0,
|
||||||
},
|
},
|
||||||
[4] = {
|
[4] = {
|
||||||
.name = "compression",
|
.name = "gpu",
|
||||||
.usage_mask = NVMAP_HEAP_CARVEOUT_COMPRESSION,
|
.usage_mask = NVMAP_HEAP_CARVEOUT_GPU,
|
||||||
.base = 0,
|
.base = 0,
|
||||||
.size = 0,
|
.size = 0,
|
||||||
},
|
},
|
||||||
@@ -367,7 +367,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
int do_memset = 0;
|
int do_memset = 0;
|
||||||
int *bitmap_nos = NULL;
|
int *bitmap_nos = NULL;
|
||||||
const char *device_name;
|
const char *device_name;
|
||||||
bool is_compression = false;
|
bool is_gpu = false;
|
||||||
u32 granule_size = 0;
|
u32 granule_size = 0;
|
||||||
|
|
||||||
device_name = dev_name(dev);
|
device_name = dev_name(dev);
|
||||||
@@ -376,16 +376,16 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!strncmp(device_name, "compression", 11)) {
|
if (!strncmp(device_name, "gpu", 3)) {
|
||||||
struct nvmap_platform_carveout *co;
|
struct nvmap_platform_carveout *co;
|
||||||
|
|
||||||
is_compression = true;
|
is_gpu = true;
|
||||||
co = nvmap_get_carveout_pdata("compression");
|
co = nvmap_get_carveout_pdata("gpu");
|
||||||
granule_size = co->granule_size;
|
granule_size = co->granule_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_compression) {
|
if (is_gpu) {
|
||||||
/* Calculation for Compression carveout should consider granule size */
|
/* Calculation for Gpu carveout should consider granule size */
|
||||||
count = size >> PAGE_SHIFT_GRANULE(granule_size);
|
count = size >> PAGE_SHIFT_GRANULE(granule_size);
|
||||||
} else {
|
} else {
|
||||||
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
|
if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs)) {
|
||||||
@@ -411,7 +411,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) {
|
||||||
alloc_size = 1;
|
alloc_size = 1;
|
||||||
/* pages contain the array of pages of kernel PAGE_SIZE */
|
/* pages contain the array of pages of kernel PAGE_SIZE */
|
||||||
if (!is_compression)
|
if (!is_gpu)
|
||||||
pages = nvmap_kvzalloc_pages(count);
|
pages = nvmap_kvzalloc_pages(count);
|
||||||
else
|
else
|
||||||
pages = nvmap_kvzalloc_pages(count * PAGES_PER_GRANULE(granule_size));
|
pages = nvmap_kvzalloc_pages(count * PAGES_PER_GRANULE(granule_size));
|
||||||
@@ -426,15 +426,15 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
|
|
||||||
spin_lock_irqsave(&mem->spinlock, flags);
|
spin_lock_irqsave(&mem->spinlock, flags);
|
||||||
|
|
||||||
if (!is_compression && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
|
if (!is_gpu && unlikely(size > ((u64)mem->size << PAGE_SHIFT)))
|
||||||
goto err;
|
goto err;
|
||||||
else if (is_compression &&
|
else if (is_gpu &&
|
||||||
unlikely(size > ((u64)mem->size << PAGE_SHIFT_GRANULE(granule_size))))
|
unlikely(size > ((u64)mem->size << PAGE_SHIFT_GRANULE(granule_size))))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (((mem->flags & DMA_MEMORY_NOMAP) &&
|
if (((mem->flags & DMA_MEMORY_NOMAP) &&
|
||||||
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) ||
|
dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) ||
|
||||||
is_compression) {
|
is_gpu) {
|
||||||
align = 0;
|
align = 0;
|
||||||
} else {
|
} else {
|
||||||
if (order > DMA_BUF_ALIGNMENT)
|
if (order > DMA_BUF_ALIGNMENT)
|
||||||
@@ -455,7 +455,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
|
|
||||||
count -= alloc_size;
|
count -= alloc_size;
|
||||||
if (pages) {
|
if (pages) {
|
||||||
if (!is_compression)
|
if (!is_gpu)
|
||||||
pages[i++] = pfn_to_page(mem->pfn_base + pageno);
|
pages[i++] = pfn_to_page(mem->pfn_base + pageno);
|
||||||
else {
|
else {
|
||||||
/* Handle granules */
|
/* Handle granules */
|
||||||
@@ -472,7 +472,7 @@ static void *__nvmap_dma_alloc_from_coherent(struct device *dev,
|
|||||||
/*
|
/*
|
||||||
* Memory was found in the coherent area.
|
* Memory was found in the coherent area.
|
||||||
*/
|
*/
|
||||||
if (!is_compression)
|
if (!is_gpu)
|
||||||
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT);
|
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT);
|
||||||
else
|
else
|
||||||
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_GRANULE(granule_size));
|
*dma_handle = mem->device_base + (first_pageno << PAGE_SHIFT_GRANULE(granule_size));
|
||||||
@@ -526,7 +526,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int pageno, page_shift_val;
|
unsigned int pageno, page_shift_val;
|
||||||
struct dma_coherent_mem_replica *mem;
|
struct dma_coherent_mem_replica *mem;
|
||||||
bool is_compression = false;
|
bool is_gpu = false;
|
||||||
const char *device_name;
|
const char *device_name;
|
||||||
u32 granule_size = 0;
|
u32 granule_size = 0;
|
||||||
|
|
||||||
@@ -539,11 +539,11 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!strncmp(device_name, "compression", 11)) {
|
if (!strncmp(device_name, "gpu", 3)) {
|
||||||
struct nvmap_platform_carveout *co;
|
struct nvmap_platform_carveout *co;
|
||||||
|
|
||||||
is_compression = true;
|
is_gpu = true;
|
||||||
co = nvmap_get_carveout_pdata("compression");
|
co = nvmap_get_carveout_pdata("gpu");
|
||||||
granule_size = co->granule_size;
|
granule_size = co->granule_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,7 +554,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_irqsave(&mem->spinlock, flags);
|
spin_lock_irqsave(&mem->spinlock, flags);
|
||||||
if (!is_compression) {
|
if (!is_gpu) {
|
||||||
for (i = 0; i < (size >> PAGE_SHIFT); i++) {
|
for (i = 0; i < (size >> PAGE_SHIFT); i++) {
|
||||||
pageno = page_to_pfn(pages[i]) - mem->pfn_base;
|
pageno = page_to_pfn(pages[i]) - mem->pfn_base;
|
||||||
if (WARN_ONCE(pageno > mem->size,
|
if (WARN_ONCE(pageno > mem->size,
|
||||||
@@ -582,7 +582,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
else
|
else
|
||||||
mem_addr = mem->virt_base;
|
mem_addr = mem->virt_base;
|
||||||
|
|
||||||
page_shift_val = is_compression ? PAGE_SHIFT_GRANULE(granule_size) : PAGE_SHIFT;
|
page_shift_val = is_gpu ? PAGE_SHIFT_GRANULE(granule_size) : PAGE_SHIFT;
|
||||||
if (mem && cpu_addr >= mem_addr &&
|
if (mem && cpu_addr >= mem_addr &&
|
||||||
cpu_addr - mem_addr < (u64)mem->size << page_shift_val) {
|
cpu_addr - mem_addr < (u64)mem->size << page_shift_val) {
|
||||||
unsigned int page = (cpu_addr - mem_addr) >> page_shift_val;
|
unsigned int page = (cpu_addr - mem_addr) >> page_shift_val;
|
||||||
@@ -590,7 +590,7 @@ void nvmap_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
|
||||||
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) {
|
if (DMA_ATTR_ALLOC_EXACT_SIZE & attrs) {
|
||||||
if (is_compression)
|
if (is_gpu)
|
||||||
count = ALIGN_GRANULE_SIZE(size, granule_size) >> page_shift_val;
|
count = ALIGN_GRANULE_SIZE(size, granule_size) >> page_shift_val;
|
||||||
else
|
else
|
||||||
count = PAGE_ALIGN(size) >> page_shift_val;
|
count = PAGE_ALIGN(size) >> page_shift_val;
|
||||||
@@ -682,7 +682,7 @@ static int nvmap_dma_assign_coherent_memory(struct device *dev,
|
|||||||
|
|
||||||
static int nvmap_dma_init_coherent_memory(
|
static int nvmap_dma_init_coherent_memory(
|
||||||
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
|
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
|
||||||
struct dma_coherent_mem_replica **mem, bool is_compression, u32 granule_size)
|
struct dma_coherent_mem_replica **mem, bool is_gpu, u32 granule_size)
|
||||||
{
|
{
|
||||||
struct dma_coherent_mem_replica *dma_mem = NULL;
|
struct dma_coherent_mem_replica *dma_mem = NULL;
|
||||||
void *mem_base = NULL;
|
void *mem_base = NULL;
|
||||||
@@ -693,7 +693,7 @@ static int nvmap_dma_init_coherent_memory(
|
|||||||
if (!size)
|
if (!size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (is_compression)
|
if (is_gpu)
|
||||||
pages = size >> PAGE_SHIFT_GRANULE(granule_size);
|
pages = size >> PAGE_SHIFT_GRANULE(granule_size);
|
||||||
else
|
else
|
||||||
pages = size >> PAGE_SHIFT;
|
pages = size >> PAGE_SHIFT;
|
||||||
@@ -737,14 +737,14 @@ err_memunmap:
|
|||||||
}
|
}
|
||||||
|
|
||||||
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||||
dma_addr_t device_addr, size_t size, int flags, bool is_compression,
|
dma_addr_t device_addr, size_t size, int flags, bool is_gpu,
|
||||||
u32 granule_size)
|
u32 granule_size)
|
||||||
{
|
{
|
||||||
struct dma_coherent_mem_replica *mem;
|
struct dma_coherent_mem_replica *mem;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem,
|
ret = nvmap_dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem,
|
||||||
is_compression, granule_size);
|
is_gpu, granule_size);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -776,7 +776,7 @@ static int __init nvmap_co_device_init(struct reserved_mem *rmem,
|
|||||||
#else
|
#else
|
||||||
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
|
err = nvmap_dma_declare_coherent_memory(co->dma_dev, 0,
|
||||||
co->base, co->size,
|
co->base, co->size,
|
||||||
DMA_MEMORY_NOMAP, co->is_compression_co,
|
DMA_MEMORY_NOMAP, co->is_gpu_co,
|
||||||
co->granule_size);
|
co->granule_size);
|
||||||
#endif
|
#endif
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||||
@@ -907,8 +907,8 @@ int __init nvmap_co_setup(struct reserved_mem *rmem, u32 granule_size)
|
|||||||
co->base = rmem->base;
|
co->base = rmem->base;
|
||||||
co->size = rmem->size;
|
co->size = rmem->size;
|
||||||
co->cma_dev = NULL;
|
co->cma_dev = NULL;
|
||||||
if (!strncmp(co->name, "compression", 11)) {
|
if (!strncmp(co->name, "gpu", 3)) {
|
||||||
co->is_compression_co = true;
|
co->is_gpu_co = true;
|
||||||
co->granule_size = granule_size;
|
co->granule_size = granule_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -943,8 +943,8 @@ int __init nvmap_init(struct platform_device *pdev)
|
|||||||
while (!of_phandle_iterator_next(&it) && it.node) {
|
while (!of_phandle_iterator_next(&it) && it.node) {
|
||||||
if (of_device_is_available(it.node) &&
|
if (of_device_is_available(it.node) &&
|
||||||
!of_device_is_compatible(it.node, "nvidia,ivm_carveout")) {
|
!of_device_is_compatible(it.node, "nvidia,ivm_carveout")) {
|
||||||
/* Read granule size in case of compression carveout */
|
/* Read granule size in case of gpu carveout */
|
||||||
if (of_device_is_compatible(it.node, "nvidia,compression_carveout")
|
if (of_device_is_compatible(it.node, "nvidia,gpu_carveout")
|
||||||
&& of_property_read_u32(it.node, "granule-size", &granule_size)) {
|
&& of_property_read_u32(it.node, "granule-size", &granule_size)) {
|
||||||
pr_err("granule-size property is missing\n");
|
pr_err("granule-size property is missing\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -207,13 +207,13 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In case of Compression carveout, the handle size needs to be aligned to granule.
|
* In case of Gpu carveout, the handle size needs to be aligned to granule.
|
||||||
*/
|
*/
|
||||||
if (op.heap_mask & NVMAP_HEAP_CARVEOUT_COMPRESSION) {
|
if (op.heap_mask & NVMAP_HEAP_CARVEOUT_GPU) {
|
||||||
u32 granule_size = 0;
|
u32 granule_size = 0;
|
||||||
|
|
||||||
for (i = 0; i < nvmap_dev->nr_carveouts; i++)
|
for (i = 0; i < nvmap_dev->nr_carveouts; i++)
|
||||||
if (nvmap_dev->heaps[i].heap_bit & NVMAP_HEAP_CARVEOUT_COMPRESSION)
|
if (nvmap_dev->heaps[i].heap_bit & NVMAP_HEAP_CARVEOUT_GPU)
|
||||||
granule_size = nvmap_dev->heaps[i].carveout->granule_size;
|
granule_size = nvmap_dev->heaps[i].carveout->granule_size;
|
||||||
handle->size = ALIGN_GRANULE_SIZE(handle->size, granule_size);
|
handle->size = ALIGN_GRANULE_SIZE(handle->size, granule_size);
|
||||||
page_sz = granule_size;
|
page_sz = granule_size;
|
||||||
@@ -1148,9 +1148,9 @@ int nvmap_ioctl_get_handle_parameters(struct file *filp, void __user *arg)
|
|||||||
/*
|
/*
|
||||||
* Check handle is allocated or not while setting contig.
|
* Check handle is allocated or not while setting contig.
|
||||||
* If heap type is IOVMM, check if it has flag set for contiguous memory
|
* If heap type is IOVMM, check if it has flag set for contiguous memory
|
||||||
* allocation request. Otherwise, if handle belongs to any carveout except compression
|
* allocation request. Otherwise, if handle belongs to any carveout except gpu
|
||||||
* carveout then all allocations are contiguous, hence set contig flag to true.
|
* carveout then all allocations are contiguous, hence set contig flag to true.
|
||||||
* In case of compression carveout, if allocation is page based then set contig flag to
|
* In case of gpu carveout, if allocation is page based then set contig flag to
|
||||||
* false otherwise true.
|
* false otherwise true.
|
||||||
*/
|
*/
|
||||||
if (handle->alloc &&
|
if (handle->alloc &&
|
||||||
@@ -1359,7 +1359,7 @@ int nvmap_ioctl_query_heap_params(struct file *filp, void __user *arg)
|
|||||||
heap = nvmap_dev->heaps[i].carveout;
|
heap = nvmap_dev->heaps[i].carveout;
|
||||||
op.total = nvmap_query_heap_size(heap);
|
op.total = nvmap_query_heap_size(heap);
|
||||||
op.free = heap->free_size;
|
op.free = heap->free_size;
|
||||||
if (nvmap_dev->heaps[i].carveout->is_compression_co)
|
if (nvmap_dev->heaps[i].carveout->is_gpu_co)
|
||||||
op.granule_size = nvmap_dev->heaps[i].carveout->granule_size;
|
op.granule_size = nvmap_dev->heaps[i].carveout->granule_size;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -488,7 +488,7 @@ struct dma_coherent_mem_replica {
|
|||||||
};
|
};
|
||||||
|
|
||||||
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
int nvmap_dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||||
dma_addr_t device_addr, size_t size, int flags, bool is_compression,
|
dma_addr_t device_addr, size_t size, int flags, bool is_gpu,
|
||||||
u32 granule_size);
|
u32 granule_size);
|
||||||
#endif
|
#endif
|
||||||
int nvmap_probe(struct platform_device *pdev);
|
int nvmap_probe(struct platform_device *pdev);
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
|
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
|
||||||
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
|
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
|
||||||
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
|
#define NVMAP_HEAP_CARVEOUT_VIDMEM (1ul<<26)
|
||||||
#define NVMAP_HEAP_CARVEOUT_COMPRESSION (1ul << 3)
|
#define NVMAP_HEAP_CARVEOUT_GPU (1ul << 3)
|
||||||
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
|
#define NVMAP_HEAP_CARVEOUT_FSI (1ul<<2)
|
||||||
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
|
#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
|
||||||
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
|
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
|
||||||
@@ -94,8 +94,8 @@ struct nvmap_platform_carveout {
|
|||||||
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
|
bool no_cpu_access; /* carveout can't be accessed from cpu at all */
|
||||||
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
|
bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
|
||||||
struct nvmap_pm_ops pm_ops;
|
struct nvmap_pm_ops pm_ops;
|
||||||
bool is_compression_co; /* Compression carveout is treated differently */
|
bool is_gpu_co; /* Gpu carveout is treated differently */
|
||||||
u32 granule_size; /* Granule size for compression carveout */
|
u32 granule_size; /* Granule size for gpu carveout */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvmap_platform_data {
|
struct nvmap_platform_data {
|
||||||
|
|||||||
Reference in New Issue
Block a user