gpu: nvgpu: falcon HAL to support SEC2

- Updated falcon controller HAL to support SEC2 falcon
& used "is_falcon_supported" flag to know the support on chip.
- Created falcon HAL “flcn_gp106.c/h” under gp106 to enable
support for SEC2 & inherited gk20a flcn support.
- Deleted SEC2 falcon related methods to make use of
generic flacon controller methods for SEC2.
- GP106 SEC2 code cleanup

NVPU JIRA-99

Change-Id: I846e8015ed33554b3d8a45795314f1d28eee482f
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master/r/1510200
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2017-06-28 16:23:18 +05:30
committed by mobile promotions
parent d2486cf1b1
commit 2f712e2230
9 changed files with 121 additions and 121 deletions

View File

@@ -186,6 +186,7 @@ nvgpu-y += \
gp10b/gp10b.o \
gp106/hal_gp106.o \
gp106/mm_gp106.o \
gp106/flcn_gp106.o \
gp106/pmu_gp106.o \
gp106/mclk_gp106.o \
gp106/gr_gp106.o \

View File

@@ -269,7 +269,7 @@ static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
}
}
static void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
{
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
@@ -294,6 +294,11 @@ static void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;
case FALCON_ID_SEC2:
flcn->flcn_base = FALCON_SEC_BASE;
flcn->is_falcon_supported = false;
flcn->is_interrupt_enabled = false;
break;
case FALCON_ID_FECS:
flcn->flcn_base = FALCON_FECS_BASE;
flcn->is_falcon_supported = true;
@@ -314,8 +319,8 @@ static void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
nvgpu_mutex_init(&flcn->copy_lock);
gk20a_falcon_ops(flcn);
} else
nvgpu_info(g, "flcn-Id 0x%x not supported on current chip",
flcn->flcn_id);
nvgpu_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);
}
void gk20a_falcon_init_hal(struct gpu_ops *gops)

View File

@@ -13,6 +13,7 @@
#ifndef __FLCN_GK20A_H__
#define __FLCN_GK20A_H__
void gk20a_falcon_ops(struct nvgpu_falcon *flcn);
void gk20a_falcon_init_hal(struct gpu_ops *gops);
#endif /* __FLCN_GK20A_H__ */

View File

@@ -176,6 +176,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
/* init interface layer support for PMU falcon */
nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
nvgpu_flcn_sw_init(g, FALCON_ID_SEC2);
if (g->ops.bios_init)
err = g->ops.bios_init(g);

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "gk20a/gk20a.h"
#include "gk20a/flcn_gk20a.h"
#include "gp106/sec2_gp106.h"
#include <nvgpu/hw/gp106/hw_falcon_gp106.h>
static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
{
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
&flcn->flcn_engine_dep_ops;
switch (flcn->flcn_id) {
case FALCON_ID_PMU:
flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
break;
case FALCON_ID_SEC2:
flcn_eng_dep_ops->reset_eng = gp106_sec2_reset;
break;
default:
flcn_eng_dep_ops->reset_eng = NULL;
break;
}
}
static void gp106_falcon_ops(struct nvgpu_falcon *flcn)
{
gk20a_falcon_ops(flcn);
gp106_falcon_engine_dependency_ops(flcn);
}
static void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
{
struct gk20a *g = flcn->g;
switch (flcn->flcn_id) {
case FALCON_ID_PMU:
flcn->flcn_base = FALCON_PWR_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;
case FALCON_ID_SEC2:
flcn->flcn_base = FALCON_SEC_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
case FALCON_ID_FECS:
flcn->flcn_base = FALCON_FECS_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
case FALCON_ID_GPCCS:
flcn->flcn_base = FALCON_GPCCS_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
default:
flcn->is_falcon_supported = false;
nvgpu_err(g, "Invalid flcn request");
break;
}
if (flcn->is_falcon_supported) {
nvgpu_mutex_init(&flcn->copy_lock);
gp106_falcon_ops(flcn);
} else
nvgpu_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);
}
void gp106_falcon_init_hal(struct gpu_ops *gops)
{
gops->falcon.falcon_hal_sw_init = gp106_falcon_hal_sw_init;
}

View File

@@ -0,0 +1,18 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __FLCN_GK20A_H__
#define __FLCN_GK20A_H__
void gp106_falcon_init_hal(struct gpu_ops *gops);
#endif /* __FLCN_GK20A_H__ */

View File

@@ -18,7 +18,6 @@
#include "gk20a/css_gr_gk20a.h"
#include "gk20a/bus_gk20a.h"
#include "gk20a/pramin_gk20a.h"
#include "gk20a/flcn_gk20a.h"
#include "gp10b/ltc_gp10b.h"
#include "gp10b/gr_gp10b.h"
@@ -51,6 +50,7 @@
#include "gp106/gr_gp106.h"
#include "gp106/fb_gp106.h"
#include "gp106/gp106_gating_reglist.h"
#include "gp106/flcn_gp106.h"
#include "hal_gp106.h"
@@ -264,7 +264,7 @@ int gp106_init_hal(struct gk20a *g)
gp10b_init_ce(gops);
gp106_init_gr_ctx(gops);
gp106_init_mm(gops);
gk20a_falcon_init_hal(gops);
gp106_falcon_init_hal(gops);
gp106_init_pmu_ops(gops);
gk20a_init_debug_ops(gops);
gk20a_init_dbg_session_ops(gops);

View File

@@ -90,57 +90,6 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
return completion;
}
void sec2_copy_to_dmem(struct nvgpu_pmu *pmu,
u32 dst, u8 *src, u32 size, u8 port)
{
struct gk20a *g = gk20a_from_pmu(pmu);
u32 i, words, bytes;
u32 data, addr_mask;
u32 *src_u32 = (u32*)src;
if (size == 0) {
nvgpu_err(g, "size is zero");
return;
}
if (dst & 0x3) {
nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
return;
}
nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
words = size >> 2;
bytes = size & 0x3;
addr_mask = psec_falcon_dmemc_offs_m() |
psec_falcon_dmemc_blk_m();
dst &= addr_mask;
gk20a_writel(g, psec_falcon_dmemc_r(port),
dst | psec_falcon_dmemc_aincw_f(1));
for (i = 0; i < words; i++)
gk20a_writel(g, psec_falcon_dmemd_r(port), src_u32[i]);
if (bytes > 0) {
data = 0;
for (i = 0; i < bytes; i++)
((u8 *)&data)[i] = src[(words << 2) + i];
gk20a_writel(g, psec_falcon_dmemd_r(port), data);
}
data = gk20a_readl(g, psec_falcon_dmemc_r(port)) & addr_mask;
size = ALIGN(size, 4);
if (data != dst + size) {
nvgpu_err(g, "copy failed. bytes written %d, expected %d",
data - dst, size);
}
nvgpu_mutex_release(&pmu->pmu_copy_lock);
return;
}
int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
void *desc, u32 bl_sz)
{
@@ -184,7 +133,7 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
psec_falcon_dmemc_offs_f(0) |
psec_falcon_dmemc_blk_f(0) |
psec_falcon_dmemc_aincw_f(1));
sec2_copy_to_dmem(pmu, 0, (u8 *)desc,
nvgpu_flcn_copy_to_dmem(&g->sec2_flcn, 0, (u8 *)desc,
sizeof(struct flcn_bl_dmem_desc), 0);
/*TODO This had to be copied to bl_desc_dmem_load_off, but since
* this is 0, so ok for now*/
@@ -225,61 +174,6 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
return 0;
}
void sec_enable_irq(struct nvgpu_pmu *pmu, bool enable)
{
struct gk20a *g = gk20a_from_pmu(pmu);
gk20a_dbg_fn("");
gk20a_writel(g, psec_falcon_irqmclr_r(),
psec_falcon_irqmclr_gptmr_f(1) |
psec_falcon_irqmclr_wdtmr_f(1) |
psec_falcon_irqmclr_mthd_f(1) |
psec_falcon_irqmclr_ctxsw_f(1) |
psec_falcon_irqmclr_halt_f(1) |
psec_falcon_irqmclr_exterr_f(1) |
psec_falcon_irqmclr_swgen0_f(1) |
psec_falcon_irqmclr_swgen1_f(1) |
psec_falcon_irqmclr_ext_f(0xff));
if (enable) {
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
gk20a_writel(g, psec_falcon_irqdest_r(),
psec_falcon_irqdest_host_gptmr_f(0) |
psec_falcon_irqdest_host_wdtmr_f(1) |
psec_falcon_irqdest_host_mthd_f(0) |
psec_falcon_irqdest_host_ctxsw_f(0) |
psec_falcon_irqdest_host_halt_f(1) |
psec_falcon_irqdest_host_exterr_f(0) |
psec_falcon_irqdest_host_swgen0_f(1) |
psec_falcon_irqdest_host_swgen1_f(0) |
psec_falcon_irqdest_host_ext_f(0xff) |
psec_falcon_irqdest_target_gptmr_f(1) |
psec_falcon_irqdest_target_wdtmr_f(0) |
psec_falcon_irqdest_target_mthd_f(0) |
psec_falcon_irqdest_target_ctxsw_f(0) |
psec_falcon_irqdest_target_halt_f(0) |
psec_falcon_irqdest_target_exterr_f(0) |
psec_falcon_irqdest_target_swgen0_f(0) |
psec_falcon_irqdest_target_swgen1_f(1) |
psec_falcon_irqdest_target_ext_f(0xff));
/* 0=disable, 1=enable */
gk20a_writel(g, psec_falcon_irqmset_r(),
psec_falcon_irqmset_gptmr_f(1) |
psec_falcon_irqmset_wdtmr_f(1) |
psec_falcon_irqmset_mthd_f(0) |
psec_falcon_irqmset_ctxsw_f(0) |
psec_falcon_irqmset_halt_f(1) |
psec_falcon_irqmset_exterr_f(1) |
psec_falcon_irqmset_swgen0_f(1) |
psec_falcon_irqmset_swgen1_f(1));
}
gk20a_dbg_fn("done");
}
void init_pmu_setup_hw1(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
@@ -330,7 +224,7 @@ void init_pmu_setup_hw1(struct gk20a *g)
}
static int gp106_sec2_reset(struct gk20a *g)
int gp106_sec2_reset(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
@@ -351,9 +245,9 @@ int init_sec2_setup_hw1(struct gk20a *g,
int err;
u32 data = 0;
gk20a_dbg_fn("");
nvgpu_log_fn(g, " ");
gp106_sec2_reset(g);
nvgpu_flcn_reset(&g->sec2_flcn);
data = gk20a_readl(g, psec_fbif_ctl_r());
data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
@@ -380,8 +274,6 @@ int init_sec2_setup_hw1(struct gk20a *g,
psec_fbif_transcfg_mem_type_physical_f() |
psec_fbif_transcfg_target_noncoherent_sysmem_f());
/*disable irqs for hs falcon booting as we will poll for halt*/
sec_enable_irq(pmu, false);
err = bl_bootstrap_sec2(pmu, desc, bl_sz);
if (err)
return err;

View File

@@ -16,14 +16,11 @@
int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout);
void sec2_copy_to_dmem(struct nvgpu_pmu *pmu,
u32 dst, u8 *src, u32 size, u8 port);
void sec2_dump_falcon_stats(struct nvgpu_pmu *pmu);
int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
void *desc, u32 bl_sz);
void sec_enable_irq(struct nvgpu_pmu *pmu, bool enable);
void init_pmu_setup_hw1(struct gk20a *g);
int init_sec2_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz);
int gp106_sec2_reset(struct gk20a *g);
#endif /*__SEC2_H_*/