mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: Move secure_alloc to struct gk20a
Move the function pointer for VPR page allocation to struct gk20a and use it from there. At the same time remove secure_page_alloc pointer and add a direct call to it in probe. Move platform_tegra.h as tegra/linux/platform_gk20a_tegra.h, as it's only declaring functions defined in platform_gk20a_tegra.c to other files in the same directory. JIRA NVGPU-16 Change-Id: I19ac9ee0b2f6734203ae32a1f51d67fd51aced9f Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1473706 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
c3fa78b1d9
commit
bd68f98ba7
@@ -134,20 +134,6 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
|
|||||||
nvgpu_mutex_init(&g->mm.priv_lock);
|
nvgpu_mutex_init(&g->mm.priv_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gk20a_secure_page_alloc(struct device *dev)
|
|
||||||
{
|
|
||||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (platform->secure_page_alloc) {
|
|
||||||
err = platform->secure_page_alloc(dev);
|
|
||||||
if (!err)
|
|
||||||
platform->secure_alloc_ready = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
int nvgpu_probe(struct gk20a *g,
|
int nvgpu_probe(struct gk20a *g,
|
||||||
const char *debugfs_symlink,
|
const char *debugfs_symlink,
|
||||||
const char *interface_name,
|
const char *interface_name,
|
||||||
@@ -178,11 +164,6 @@ int nvgpu_probe(struct gk20a *g,
|
|||||||
if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
|
if (IS_ENABLED(CONFIG_GK20A_DEVFREQ))
|
||||||
gk20a_scale_init(g->dev);
|
gk20a_scale_init(g->dev);
|
||||||
|
|
||||||
err = gk20a_secure_page_alloc(g->dev);
|
|
||||||
if (err)
|
|
||||||
dev_err(g->dev,
|
|
||||||
"failed to allocate secure buffer %d\n", err);
|
|
||||||
|
|
||||||
if (platform->late_probe) {
|
if (platform->late_probe) {
|
||||||
err = platform->late_probe(g->dev);
|
err = platform->late_probe(g->dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|||||||
@@ -669,6 +669,14 @@ struct gpu_ops {
|
|||||||
void (*init_inst_block)(struct nvgpu_mem *inst_block,
|
void (*init_inst_block)(struct nvgpu_mem *inst_block,
|
||||||
struct vm_gk20a *vm, u32 big_page_size);
|
struct vm_gk20a *vm, u32 big_page_size);
|
||||||
bool (*mmu_fault_pending)(struct gk20a *g);
|
bool (*mmu_fault_pending)(struct gk20a *g);
|
||||||
|
/* This function is called to allocate secure memory (memory
|
||||||
|
* that the CPU cannot see). The function should fill the
|
||||||
|
* context buffer descriptor (especially fields destroy, sgt,
|
||||||
|
* size).
|
||||||
|
*/
|
||||||
|
int (*secure_alloc)(struct device *dev,
|
||||||
|
struct gr_ctx_buffer_desc *desc,
|
||||||
|
size_t size);
|
||||||
} mm;
|
} mm;
|
||||||
struct {
|
struct {
|
||||||
u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem,
|
u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem,
|
||||||
|
|||||||
@@ -2722,7 +2722,6 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
|
|||||||
|
|
||||||
static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
int attr_buffer_size, err;
|
int attr_buffer_size, err;
|
||||||
struct device *dev = g->dev;
|
struct device *dev = g->dev;
|
||||||
@@ -2744,8 +2743,8 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
|||||||
if (err)
|
if (err)
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
|
|
||||||
if (platform->secure_alloc)
|
if (g->ops.mm.secure_alloc)
|
||||||
platform->secure_alloc(dev,
|
g->ops.mm.secure_alloc(dev,
|
||||||
&gr->global_ctx_buffer[CIRCULAR_VPR],
|
&gr->global_ctx_buffer[CIRCULAR_VPR],
|
||||||
cb_buffer_size);
|
cb_buffer_size);
|
||||||
|
|
||||||
@@ -2756,8 +2755,8 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
|||||||
if (err)
|
if (err)
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
|
|
||||||
if (platform->secure_alloc)
|
if (g->ops.mm.secure_alloc)
|
||||||
platform->secure_alloc(dev,
|
g->ops.mm.secure_alloc(dev,
|
||||||
&gr->global_ctx_buffer[PAGEPOOL_VPR],
|
&gr->global_ctx_buffer[PAGEPOOL_VPR],
|
||||||
pagepool_buffer_size);
|
pagepool_buffer_size);
|
||||||
|
|
||||||
@@ -2768,14 +2767,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
|||||||
if (err)
|
if (err)
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
|
|
||||||
if (platform->secure_alloc)
|
if (g->ops.mm.secure_alloc)
|
||||||
platform->secure_alloc(dev,
|
g->ops.mm.secure_alloc(dev,
|
||||||
&gr->global_ctx_buffer[ATTRIBUTE_VPR],
|
&gr->global_ctx_buffer[ATTRIBUTE_VPR],
|
||||||
attr_buffer_size);
|
attr_buffer_size);
|
||||||
|
|
||||||
if (platform->secure_buffer.destroy)
|
|
||||||
platform->secure_buffer.destroy(dev, &platform->secure_buffer);
|
|
||||||
|
|
||||||
gk20a_dbg_info("golden_image_size : %d",
|
gk20a_dbg_info("golden_image_size : %d",
|
||||||
gr->ctx_vars.golden_image_size);
|
gr->ctx_vars.golden_image_size);
|
||||||
|
|
||||||
|
|||||||
@@ -148,21 +148,7 @@ struct gk20a_platform {
|
|||||||
/* Powerdown platform dependencies */
|
/* Powerdown platform dependencies */
|
||||||
void (*idle)(struct device *dev);
|
void (*idle)(struct device *dev);
|
||||||
|
|
||||||
/* This function is called to allocate secure memory (memory that the
|
|
||||||
* CPU cannot see). The function should fill the context buffer
|
|
||||||
* descriptor (especially fields destroy, sgt, size).
|
|
||||||
*/
|
|
||||||
int (*secure_alloc)(struct device *dev,
|
|
||||||
struct gr_ctx_buffer_desc *desc,
|
|
||||||
size_t size);
|
|
||||||
|
|
||||||
/* Function to allocate a secure buffer of PAGE_SIZE at probe time.
|
|
||||||
* This is also helpful to trigger secure memory resizing
|
|
||||||
* while GPU is off
|
|
||||||
*/
|
|
||||||
int (*secure_page_alloc)(struct device *dev);
|
|
||||||
struct secure_page_buffer secure_buffer;
|
struct secure_page_buffer secure_buffer;
|
||||||
bool secure_alloc_ready;
|
|
||||||
|
|
||||||
/* Device is going to be suspended */
|
/* Device is going to be suspended */
|
||||||
int (*suspend)(struct device *);
|
int (*suspend)(struct device *);
|
||||||
|
|||||||
@@ -110,6 +110,8 @@ static void gk20a_tegra_secure_page_destroy(struct device *dev,
|
|||||||
dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
|
dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
|
||||||
(void *)(uintptr_t)secure_buffer->iova,
|
(void *)(uintptr_t)secure_buffer->iova,
|
||||||
secure_buffer->iova, __DMA_ATTR(attrs));
|
secure_buffer->iova, __DMA_ATTR(attrs));
|
||||||
|
|
||||||
|
secure_buffer->destroy = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_tegra_secure_page_alloc(struct device *dev)
|
int gk20a_tegra_secure_page_alloc(struct device *dev)
|
||||||
@@ -153,7 +155,7 @@ static void gk20a_tegra_secure_destroy(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_tegra_secure_alloc(struct device *dev,
|
static int gk20a_tegra_secure_alloc(struct device *dev,
|
||||||
struct gr_ctx_buffer_desc *desc,
|
struct gr_ctx_buffer_desc *desc,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
@@ -164,9 +166,6 @@ int gk20a_tegra_secure_alloc(struct device *dev,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!platform->secure_alloc_ready)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
|
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
|
||||||
(void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
|
(void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
|
||||||
GFP_KERNEL, __DMA_ATTR(attrs));
|
GFP_KERNEL, __DMA_ATTR(attrs));
|
||||||
@@ -194,6 +193,9 @@ int gk20a_tegra_secure_alloc(struct device *dev,
|
|||||||
desc->mem.size = size;
|
desc->mem.size = size;
|
||||||
desc->mem.aperture = APERTURE_SYSMEM;
|
desc->mem.aperture = APERTURE_SYSMEM;
|
||||||
|
|
||||||
|
if (platform->secure_buffer.destroy)
|
||||||
|
platform->secure_buffer.destroy(dev, &platform->secure_buffer);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
fail_sgt:
|
fail_sgt:
|
||||||
@@ -896,6 +898,11 @@ void gk20a_tegra_idle(struct device *dev)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gk20a_tegra_init_secure_alloc(struct gk20a *g)
|
||||||
|
{
|
||||||
|
g->ops.mm.secure_alloc = gk20a_tegra_secure_alloc;
|
||||||
|
}
|
||||||
|
|
||||||
static int gk20a_tegra_probe(struct device *dev)
|
static int gk20a_tegra_probe(struct device *dev)
|
||||||
{
|
{
|
||||||
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
struct gk20a_platform *platform = dev_get_drvdata(dev);
|
||||||
@@ -974,6 +981,7 @@ static int gk20a_tegra_probe(struct device *dev)
|
|||||||
|
|
||||||
gk20a_tegra_get_clocks(dev);
|
gk20a_tegra_get_clocks(dev);
|
||||||
nvgpu_linux_init_clk_support(platform->g);
|
nvgpu_linux_init_clk_support(platform->g);
|
||||||
|
gk20a_tegra_init_secure_alloc(platform->g);
|
||||||
|
|
||||||
if (platform->clk_register) {
|
if (platform->clk_register) {
|
||||||
ret = platform->clk_register(platform->g);
|
ret = platform->clk_register(platform->g);
|
||||||
@@ -988,8 +996,11 @@ static int gk20a_tegra_probe(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gk20a_tegra_late_probe(struct device *dev)
|
int gk20a_tegra_late_probe(struct device *dev)
|
||||||
{
|
{
|
||||||
|
/* Cause early VPR resize */
|
||||||
|
gk20a_tegra_secure_page_alloc(dev);
|
||||||
|
|
||||||
/* Initialise tegra specific scaling quirks */
|
/* Initialise tegra specific scaling quirks */
|
||||||
gk20a_tegra_scale_init(dev);
|
gk20a_tegra_scale_init(dev);
|
||||||
|
|
||||||
@@ -1085,8 +1096,6 @@ struct gk20a_platform gk20a_tegra_platform = {
|
|||||||
.devfreq_governor = "nvhost_podgov",
|
.devfreq_governor = "nvhost_podgov",
|
||||||
.qos_notify = gk20a_scale_qos_notify,
|
.qos_notify = gk20a_scale_qos_notify,
|
||||||
|
|
||||||
.secure_alloc = gk20a_tegra_secure_alloc,
|
|
||||||
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
|
|
||||||
.dump_platform_dependencies = gk20a_tegra_debug_dump,
|
.dump_platform_dependencies = gk20a_tegra_debug_dump,
|
||||||
|
|
||||||
.soc_name = "tegra12x",
|
.soc_name = "tegra12x",
|
||||||
@@ -1157,8 +1166,6 @@ struct gk20a_platform gm20b_tegra_platform = {
|
|||||||
.devfreq_governor = "nvhost_podgov",
|
.devfreq_governor = "nvhost_podgov",
|
||||||
.qos_notify = gk20a_scale_qos_notify,
|
.qos_notify = gk20a_scale_qos_notify,
|
||||||
|
|
||||||
.secure_alloc = gk20a_tegra_secure_alloc,
|
|
||||||
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
|
|
||||||
.dump_platform_dependencies = gk20a_tegra_debug_dump,
|
.dump_platform_dependencies = gk20a_tegra_debug_dump,
|
||||||
|
|
||||||
.has_cde = true,
|
.has_cde = true,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GK20A Platform (SoC) Interface
|
* GK20A Platform (SoC) Interface
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -13,17 +13,13 @@
|
|||||||
* more details.
|
* more details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _NVGPU_PLATFORM_TEGRA_H_
|
#ifndef _NVGPU_PLATFORM_GK20A_TEGRA_H_
|
||||||
#define _NVGPU_PLATFORM_TEGRA_H_
|
#define _NVGPU_PLATFORM_GK20A_TEGRA_H_
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
struct device;
|
||||||
|
struct gk20a;
|
||||||
|
|
||||||
struct platform_device;
|
void gk20a_tegra_init_secure_alloc(struct gk20a *g);
|
||||||
struct gr_ctx_buffer_desc;
|
|
||||||
|
|
||||||
int gk20a_tegra_secure_alloc(struct device *dev,
|
|
||||||
struct gr_ctx_buffer_desc *desc,
|
|
||||||
size_t size);
|
|
||||||
int gk20a_tegra_secure_page_alloc(struct device *dev);
|
int gk20a_tegra_secure_page_alloc(struct device *dev);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -36,7 +36,7 @@
|
|||||||
#include "gk20a/gk20a.h"
|
#include "gk20a/gk20a.h"
|
||||||
#include "gk20a/gk20a_scale.h"
|
#include "gk20a/gk20a_scale.h"
|
||||||
|
|
||||||
#include "platform_tegra.h"
|
#include "platform_gk20a_tegra.h"
|
||||||
#include "gp10b/gp10b_sysfs.h"
|
#include "gp10b/gp10b_sysfs.h"
|
||||||
#include "gp10b/platform_gp10b.h"
|
#include "gp10b/platform_gp10b.h"
|
||||||
|
|
||||||
@@ -163,12 +163,16 @@ static int gp10b_tegra_probe(struct device *dev)
|
|||||||
|
|
||||||
gp10b_tegra_get_clocks(dev);
|
gp10b_tegra_get_clocks(dev);
|
||||||
nvgpu_linux_init_clk_support(platform->g);
|
nvgpu_linux_init_clk_support(platform->g);
|
||||||
|
gk20a_tegra_init_secure_alloc(platform->g);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gp10b_tegra_late_probe(struct device *dev)
|
static int gp10b_tegra_late_probe(struct device *dev)
|
||||||
{
|
{
|
||||||
|
/* Cause early VPR resize */
|
||||||
|
gk20a_tegra_secure_page_alloc(dev);
|
||||||
|
|
||||||
/*Create GP10B specific sysfs*/
|
/*Create GP10B specific sysfs*/
|
||||||
gp10b_create_sysfs(dev);
|
gp10b_create_sysfs(dev);
|
||||||
|
|
||||||
@@ -423,9 +427,6 @@ struct gk20a_platform gp10b_tegra_platform = {
|
|||||||
|
|
||||||
.qos_notify = gk20a_scale_qos_notify,
|
.qos_notify = gk20a_scale_qos_notify,
|
||||||
|
|
||||||
.secure_alloc = gk20a_tegra_secure_alloc,
|
|
||||||
.secure_page_alloc = gk20a_tegra_secure_page_alloc,
|
|
||||||
|
|
||||||
.reset_assert = gp10b_tegra_reset_assert,
|
.reset_assert = gp10b_tegra_reset_assert,
|
||||||
.reset_deassert = gp10b_tegra_reset_deassert,
|
.reset_deassert = gp10b_tegra_reset_deassert,
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user