gpu: nvgpu: Pass struct gk20a to secure alloc

Pass struct gk20a to secure alloc API instead of Linux specific
struct device.

JIRA NVGPU-38

Change-Id: I6d9afaeeff9b957351072caa29690f2caf58f858
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1505179
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-06-19 14:23:16 -07:00
committed by mobile promotions
parent 9f65627d0e
commit 0b6eff2965
5 changed files with 10 additions and 10 deletions

View File

@@ -984,7 +984,7 @@ static int __exit gk20a_remove(struct platform_device *pdev)
gk20a_remove_sysfs(dev);
if (platform->secure_buffer.destroy)
platform->secure_buffer.destroy(dev,
platform->secure_buffer.destroy(g,
&platform->secure_buffer);
if (pm_runtime_enabled(dev))

View File

@@ -695,7 +695,7 @@ struct gpu_ops {
* context buffer descriptor (especially fields destroy, sgt,
* size).
*/
int (*secure_alloc)(struct device *dev,
int (*secure_alloc)(struct gk20a *g,
struct gr_ctx_buffer_desc *desc,
size_t size);
} mm;

View File

@@ -2728,7 +2728,6 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
int attr_buffer_size, err;
struct device *dev = g->dev;
u32 cb_buffer_size = gr->bundle_cb_default_size *
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
@@ -2748,7 +2747,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
goto clean_up;
if (g->ops.mm.secure_alloc)
g->ops.mm.secure_alloc(dev,
g->ops.mm.secure_alloc(g,
&gr->global_ctx_buffer[CIRCULAR_VPR],
cb_buffer_size);
@@ -2760,7 +2759,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
goto clean_up;
if (g->ops.mm.secure_alloc)
g->ops.mm.secure_alloc(dev,
g->ops.mm.secure_alloc(g,
&gr->global_ctx_buffer[PAGEPOOL_VPR],
pagepool_buffer_size);
@@ -2772,7 +2771,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
goto clean_up;
if (g->ops.mm.secure_alloc)
g->ops.mm.secure_alloc(dev,
g->ops.mm.secure_alloc(g,
&gr->global_ctx_buffer[ATTRIBUTE_VPR],
attr_buffer_size);

View File

@@ -31,7 +31,7 @@ struct gr_ctx_buffer_desc;
struct gk20a_scale_profile;
struct secure_page_buffer {
void (*destroy)(struct device *, struct secure_page_buffer *);
void (*destroy)(struct gk20a *, struct secure_page_buffer *);
size_t size;
u64 iova;
};

View File

@@ -95,7 +95,7 @@ static inline void __maybe_unused pmc_write(u32 val, unsigned long reg)
#define MHZ_TO_HZ(x) ((x) * 1000000)
#define HZ_TO_MHZ(x) ((x) / 1000000)
static void gk20a_tegra_secure_page_destroy(struct device *dev,
static void gk20a_tegra_secure_page_destroy(struct gk20a *g,
struct secure_page_buffer *secure_buffer)
{
DEFINE_DMA_ATTRS(attrs);
@@ -148,10 +148,11 @@ static void gk20a_tegra_secure_destroy(struct gk20a *g,
}
}
static int gk20a_tegra_secure_alloc(struct device *dev,
static int gk20a_tegra_secure_alloc(struct gk20a *g,
struct gr_ctx_buffer_desc *desc,
size_t size)
{
struct device *dev = dev_from_gk20a(g);
struct gk20a_platform *platform = dev_get_drvdata(dev);
DEFINE_DMA_ATTRS(attrs);
dma_addr_t iova;
@@ -187,7 +188,7 @@ static int gk20a_tegra_secure_alloc(struct device *dev,
desc->mem.aperture = APERTURE_SYSMEM;
if (platform->secure_buffer.destroy)
platform->secure_buffer.destroy(dev, &platform->secure_buffer);
platform->secure_buffer.destroy(g, &platform->secure_buffer);
return err;