gpu: nvgpu: add APIs to allocate/free dummy secure buffer

Add APIs to allocate and free dummy secure buffer of size PAGE_SIZE.
Also, fix small errors during secure memory alloc/free.

Bug 1487804

Change-Id: If078116fb973e81bfcee054b900c09a313e389c6
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/421700
(cherry picked from commit 5391515dab27cc88b921cf81913085dea98197e0)
Reviewed-on: http://git-master/r/419609
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2014-06-05 17:00:20 +05:30
committed by Dan Willemsen
parent bea4bb915a
commit 725b56f71a
2 changed files with 49 additions and 4 deletions

View File

@@ -20,12 +20,20 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/dma-attrs.h>
struct gk20a;
struct channel_gk20a;
struct gr_ctx_buffer_desc;
struct gk20a_scale_profile;
struct secure_page_buffer {
void (*destroy)(struct platform_device *, struct secure_page_buffer *);
size_t size;
u64 iova;
struct dma_attrs attrs;
};
struct gk20a_platform {
#ifdef CONFIG_TEGRA_GK20A
u32 syncpt_base;
@@ -79,6 +87,13 @@ struct gk20a_platform {
struct gr_ctx_buffer_desc *desc,
size_t size);
/* Function to allocate a secure buffer of PAGE_SIZE at probe time.
* This is also helpful to trigger secure memory resizing
* while GPU is off
*/
int (*secure_page_alloc)(struct platform_device *dev);
struct secure_page_buffer secure_buffer;
/* Device is going to be suspended */
int (*suspend)(struct device *);

View File

@@ -93,12 +93,41 @@ int FXDIV(int x, int y)
return (x << pos) / y;
}
static void gk20a_tegra_secure_page_destroy(struct platform_device *pdev,
struct secure_page_buffer *secure_buffer)
{
dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
(void *)(uintptr_t)secure_buffer->iova,
secure_buffer->iova, &secure_buffer->attrs);
}
static int gk20a_tegra_secure_page_alloc(struct platform_device *pdev)
{
struct gk20a_platform *platform = platform_get_drvdata(pdev);
struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
DEFINE_DMA_ATTRS(attrs);
dma_addr_t iova;
size_t size = PAGE_SIZE;
(void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
DMA_MEMORY_NOMAP, &attrs);
if (dma_mapping_error(&tegra_vpr_dev, iova))
return -ENOMEM;
secure_buffer->size = size;
secure_buffer->iova = iova;
secure_buffer->attrs = attrs;
secure_buffer->destroy = gk20a_tegra_secure_page_destroy;
return 0;
}
static void gk20a_tegra_secure_destroy(struct platform_device *pdev,
struct gr_ctx_buffer_desc *desc)
{
gk20a_free_sgtable(&desc->sgt);
dma_free_attrs(&tegra_vpr_dev, desc->size,
(void *)(uintptr_t)&desc->iova,
(void *)(uintptr_t)desc->iova,
desc->iova, &desc->attrs);
}
@@ -113,10 +142,8 @@ static int gk20a_tegra_secure_alloc(struct platform_device *pdev,
struct page *page;
int err = 0;
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
(void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
GFP_KERNEL, &attrs);
DMA_MEMORY_NOMAP, &attrs);
if (dma_mapping_error(&tegra_vpr_dev, iova))
return -ENOMEM;
@@ -524,6 +551,7 @@ struct gk20a_platform t132_gk20a_tegra_platform = {
.qos_id = PM_QOS_GPU_FREQ_MIN,
.secure_alloc = gk20a_tegra_secure_alloc,
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
.dump_platform_dependencies = gk20a_tegra_debug_dump,
};
@@ -551,6 +579,7 @@ struct gk20a_platform gk20a_tegra_platform = {
.qos_id = PM_QOS_GPU_FREQ_MIN,
.secure_alloc = gk20a_tegra_secure_alloc,
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
.dump_platform_dependencies = gk20a_tegra_debug_dump,
};
@@ -574,6 +603,7 @@ struct gk20a_platform gm20b_tegra_platform = {
.qos_id = PM_QOS_GPU_FREQ_MIN,
.secure_alloc = gk20a_tegra_secure_alloc,
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
.dump_platform_dependencies = gk20a_tegra_debug_dump,
};