gpu: nvgpu: add check for is_fmodel

is_fmodel flag will be set in gk20a_probe().
Updated code for is_fmodel check, instead of
check for supported simulated platforms.

Bug 1735760

Change-Id: I7cbac2196130fe5ce4c1a910504879e6948c13da
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: http://git-master/r/1177869
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
This commit is contained in:
Seema Khowala
2016-07-08 16:12:44 -07:00
committed by Seshendra Gadagottu
parent 9ca4c6b596
commit d64e201514
11 changed files with 51 additions and 26 deletions

View File

@@ -1483,6 +1483,9 @@ static int gk20a_probe(struct platform_device *dev)
return -ENODATA;
}
if (tegra_platform_is_linsim() || tegra_platform_is_vdk())
platform->is_fmodel = true;
gk20a_dbg_fn("");
platform_set_drvdata(dev, platform);

View File

@@ -430,7 +430,9 @@ done:
int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr)
{
if (tegra_platform_is_linsim())
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
if (platform->is_fmodel)
return gr_gk20a_init_ctx_vars_sim(g, gr);
else
return gr_gk20a_init_ctx_vars_fw(g, gr);

View File

@@ -372,8 +372,9 @@ static int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long end_jiffies,
{
u32 val;
u32 delay = expect_delay;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return 0;
gk20a_dbg_fn("");
@@ -1491,6 +1492,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
u32 last_method_data = 0;
int retries = FE_PWR_MODE_TIMEOUT_MAX / FE_PWR_MODE_TIMEOUT_DEFAULT;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
@@ -1502,7 +1504,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
if (gr->ctx_vars.golden_image_initialized)
goto clean_up;
if (!tegra_platform_is_linsim()) {
if (!platform->is_fmodel) {
gk20a_writel(g, gr_fe_pwr_mode_r(),
gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_force_on_f());
do {
@@ -1542,7 +1544,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r());
udelay(10);
if (!tegra_platform_is_linsim()) {
if (!platform->is_fmodel) {
gk20a_writel(g, gr_fe_pwr_mode_r(),
gr_fe_pwr_mode_req_send_f() | gr_fe_pwr_mode_mode_auto_f());
@@ -1903,6 +1905,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
u32 v, data;
int ret = 0;
struct mem_desc *mem = &ch_ctx->gr_ctx->mem;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
@@ -1990,7 +1993,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
gk20a_mem_end(g, mem);
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
u32 mdata = fecs_current_ctx_data(g, &c->inst_block);
ret = gr_gk20a_submit_fecs_method_op(g,
@@ -2416,10 +2419,11 @@ static void gr_gk20a_load_falcon_with_bootloader(struct gk20a *g)
int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
{
int err;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
gr_fecs_ctxsw_mailbox_value_f(0xc0de7777));
gk20a_writel(g, gr_gpccs_ctxsw_mailbox_r(7),
@@ -4211,6 +4215,7 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
{
u32 gate_ctrl, idle_filter;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
@@ -4239,7 +4244,7 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
"invalid elcg mode %d", mode);
}
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
gate_ctrl = set_field(gate_ctrl,
therm_gate_ctrl_eng_delay_after_m(),
therm_gate_ctrl_eng_delay_after_f(4));

View File

@@ -3,7 +3,7 @@
*
* GK20A Graphics
*
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -92,8 +92,9 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
u64 compbit_base_post_multiply64;
u64 compbit_store_iova;
u64 compbit_base_post_divide64;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
else
compbit_store_iova = g->ops.mm.get_iova_addr(g,

View File

@@ -20,6 +20,7 @@
#include <trace/events/gk20a.h>
#include "hw_ltc_gk20a.h"
#include "gk20a.h"
#include "ltc_common.c"
@@ -45,6 +46,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
u32 compbit_backing_size;
int err;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
@@ -78,7 +80,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
gk20a_dbg_info("max comptag lines : %d",
max_comptag_lines);
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size);
else
err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size);

View File

@@ -1023,10 +1023,11 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
u32 num_pages = 1 << order;
u32 len = num_pages * PAGE_SIZE;
int err;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return alloc_gmmu_phys_pages(vm, order, entry);
/*
@@ -1052,13 +1053,14 @@ void free_gmmu_pages(struct vm_gk20a *vm,
struct gk20a_mm_entry *entry)
{
struct gk20a *g = gk20a_from_vm(vm);
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
if (!entry->mem.size)
return;
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
free_gmmu_phys_pages(vm, entry);
return;
}
@@ -1076,9 +1078,11 @@ void free_gmmu_pages(struct vm_gk20a *vm,
int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
{
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return map_gmmu_phys_pages(entry);
if (IS_ENABLED(CONFIG_ARM64)) {
@@ -1100,9 +1104,11 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
{
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
unmap_gmmu_phys_pages(entry);
return;
}

View File

@@ -83,7 +83,7 @@ int gk20a_tegra_secure_page_alloc(struct device *dev)
dma_addr_t iova;
size_t size = PAGE_SIZE;
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return -EINVAL;
(void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
@@ -314,7 +314,7 @@ static bool gk20a_tegra_is_railgated(struct device *dev)
struct gk20a_platform *platform = dev_get_drvdata(dev);
bool ret = false;
if (!tegra_platform_is_linsim())
if (!(platform->is_fmodel))
ret = !tegra_dvfs_is_rail_up(platform->gpu_rail);
return ret;
@@ -331,7 +331,7 @@ static int gk20a_tegra_railgate(struct device *dev)
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
if (tegra_platform_is_linsim() ||
if (platform->is_fmodel ||
!tegra_dvfs_is_rail_up(platform->gpu_rail))
return 0;
@@ -383,7 +383,7 @@ static int gm20b_tegra_railgate(struct device *dev)
struct gk20a_platform *platform = dev_get_drvdata(dev);
int ret = 0;
if (tegra_platform_is_linsim() ||
if (platform->is_fmodel ||
!tegra_dvfs_is_rail_up(platform->gpu_rail))
return 0;
@@ -439,7 +439,7 @@ static int gk20a_tegra_unrailgate(struct device *dev)
int ret = 0;
bool first = false;
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return 0;
if (!platform->gpu_rail) {
@@ -510,7 +510,7 @@ static int gm20b_tegra_unrailgate(struct device *dev)
int ret = 0;
bool first = false;
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return 0;
if (!platform->gpu_rail) {

View File

@@ -25,7 +25,9 @@
void gk20a_reset_priv_ring(struct gk20a *g)
{
if (tegra_platform_is_linsim())
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
if (platform->is_fmodel)
return;
if (g->ops.clock_gating.slcg_priring_load_gating_prod)
@@ -50,8 +52,9 @@ void gk20a_priv_ring_isr(struct gk20a *g)
u32 status0, status1;
u32 cmd;
s32 retry = 100;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
return;
status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());

View File

@@ -709,10 +709,11 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
u32 reg_offset = gr_gpcs_gpccs_falcon_hwcfg_r() -
gr_fecs_falcon_hwcfg_r();
u8 falcon_id_mask = 0;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
gr_fecs_ctxsw_mailbox_value_f(0xc0de7777));
gk20a_writel(g, gr_gpccs_ctxsw_mailbox_r(7),

View File

@@ -177,11 +177,12 @@ int gm20b_init_hal(struct gk20a *g)
{
struct gpu_ops *gops = &g->ops;
struct nvgpu_gpu_characteristics *c = &g->gpu_characteristics;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
*gops = gm20b_ops;
gops->securegpccs = false;
#ifdef CONFIG_TEGRA_ACR
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
gops->privsecurity = 1;
} else {
if (tegra_fuse_readl(FUSE_OPT_PRIV_SEC_DIS_0) &
@@ -193,7 +194,7 @@ int gm20b_init_hal(struct gk20a *g)
}
}
#else
if (tegra_platform_is_linsim()) {
if (platform->is_fmodel) {
gk20a_dbg_info("running ASIM with PRIV security disabled");
gops->privsecurity = 0;
} else {

View File

@@ -48,6 +48,7 @@ static int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
u32 compbit_backing_size;
int err;
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
gk20a_dbg_fn("");
@@ -80,7 +81,7 @@ static int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
gk20a_dbg_info("max comptag lines : %d",
max_comptag_lines);
if (tegra_platform_is_linsim())
if (platform->is_fmodel)
err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size);
else
err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size);