gpu: nvgpu: move nvgpu_falcon struct to nvgpu/falcon.h

This struct was earlier moved to falcon_priv.h to give exclusive access
to only falcon unit. However with HAL unit needing access to this we
need to move it public header nvgpu/falcon.h.

JIRA NVGPU-1993

Change-Id: Ia3b211798009107f64828c9765040d628448812a
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2069688
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-03-08 09:40:21 +05:30
committed by mobile promotions
parent daa4d7e42b
commit f4174ef048
34 changed files with 192 additions and 234 deletions

View File

@@ -167,7 +167,7 @@ static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc); hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc);
/* set on which falcon ACR need to execute*/ /* set on which falcon ACR need to execute*/
hs_acr->acr_flcn = g->pmu.flcn; hs_acr->acr_flcn = &g->pmu.flcn;
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap = hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
gm20b_pmu_setup_hw_and_bl_bootstrap; gm20b_pmu_setup_hw_and_bl_bootstrap;
hs_acr->acr_engine_bus_err_status = hs_acr->acr_engine_bus_err_status =

View File

@@ -184,7 +184,7 @@ static void nvgpu_gv100_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_a
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1; hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1); hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
hs_acr->acr_flcn = g->sec2.flcn; hs_acr->acr_flcn = &g->sec2.flcn;
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap = hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
gp106_sec2_setup_hw_and_bl_bootstrap; gp106_sec2_setup_hw_and_bl_bootstrap;
} }

View File

@@ -160,7 +160,7 @@ static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1; hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1); hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
hs_acr->acr_flcn = g->pmu.flcn; hs_acr->acr_flcn = &g->pmu.flcn;
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap = hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
gm20b_pmu_setup_hw_and_bl_bootstrap; gm20b_pmu_setup_hw_and_bl_bootstrap;
hs_acr->report_acr_engine_bus_err_status = hs_acr->report_acr_engine_bus_err_status =

View File

@@ -92,7 +92,7 @@ static void nvgpu_tu104_acr_ahesasc_sw_init(struct gk20a *g,
acr_ahesasc->ptr_bl_dmem_desc = &acr_ahesasc->bl_dmem_desc_v1; acr_ahesasc->ptr_bl_dmem_desc = &acr_ahesasc->bl_dmem_desc_v1;
acr_ahesasc->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1); acr_ahesasc->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
acr_ahesasc->acr_flcn = g->sec2.flcn; acr_ahesasc->acr_flcn = &g->sec2.flcn;
acr_ahesasc->acr_flcn_setup_hw_and_bl_bootstrap = acr_ahesasc->acr_flcn_setup_hw_and_bl_bootstrap =
tu104_sec2_setup_hw_and_bl_bootstrap; tu104_sec2_setup_hw_and_bl_bootstrap;
} }
@@ -116,7 +116,7 @@ static void nvgpu_tu104_acr_asb_sw_init(struct gk20a *g,
acr_asb->ptr_bl_dmem_desc = &acr_asb->bl_dmem_desc_v1; acr_asb->ptr_bl_dmem_desc = &acr_asb->bl_dmem_desc_v1;
acr_asb->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1); acr_asb->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
acr_asb->acr_flcn = g->gsp_flcn; acr_asb->acr_flcn = &g->gsp_flcn;
acr_asb->acr_flcn_setup_hw_and_bl_bootstrap = acr_asb->acr_flcn_setup_hw_and_bl_bootstrap =
gv100_gsp_setup_hw_and_bl_bootstrap; gv100_gsp_setup_hw_and_bl_bootstrap;
} }

View File

@@ -21,8 +21,7 @@
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/timers.h> #include <nvgpu/timers.h>
#include <nvgpu/falcon.h>
#include "falcon_priv.h"
/* Delay depends on memory size and pwr_clk /* Delay depends on memory size and pwr_clk
* delay = (MAX {IMEM_SIZE, DMEM_SIZE} * 64 + 1) / pwr_clk * delay = (MAX {IMEM_SIZE, DMEM_SIZE} * 64 + 1) / pwr_clk
@@ -714,62 +713,54 @@ u32 nvgpu_falcon_get_id(struct nvgpu_falcon *flcn)
return flcn->flcn_id; return flcn->flcn_id;
} }
static struct nvgpu_falcon **falcon_get_instance(struct gk20a *g, u32 flcn_id) static struct nvgpu_falcon *falcon_get_instance(struct gk20a *g, u32 flcn_id)
{ {
struct nvgpu_falcon **flcn_p = NULL; struct nvgpu_falcon *flcn = NULL;
switch (flcn_id) { switch (flcn_id) {
case FALCON_ID_PMU: case FALCON_ID_PMU:
flcn_p = &g->pmu.flcn; flcn = &g->pmu.flcn;
break; break;
case FALCON_ID_SEC2: case FALCON_ID_SEC2:
flcn_p = &g->sec2.flcn; flcn = &g->sec2.flcn;
break; break;
case FALCON_ID_FECS: case FALCON_ID_FECS:
flcn_p = &g->fecs_flcn; flcn = &g->fecs_flcn;
break; break;
case FALCON_ID_GPCCS: case FALCON_ID_GPCCS:
flcn_p = &g->gpccs_flcn; flcn = &g->gpccs_flcn;
break; break;
case FALCON_ID_NVDEC: case FALCON_ID_NVDEC:
flcn_p = &g->nvdec_flcn; flcn = &g->nvdec_flcn;
break; break;
case FALCON_ID_MINION: case FALCON_ID_MINION:
flcn_p = &g->minion_flcn; flcn = &g->minion_flcn;
break; break;
case FALCON_ID_GSPLITE: case FALCON_ID_GSPLITE:
flcn_p = &g->gsp_flcn; flcn = &g->gsp_flcn;
break; break;
default: default:
nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id); nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
break; break;
}; };
return flcn_p; return flcn;
} }
int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id) int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
{ {
struct nvgpu_falcon **flcn_p = NULL, *flcn = NULL; struct nvgpu_falcon *flcn = NULL;
struct gpu_ops *gops = &g->ops; struct gpu_ops *gops = &g->ops;
int err; int err;
flcn_p = falcon_get_instance(g, flcn_id); flcn = falcon_get_instance(g, flcn_id);
if (flcn_p == NULL) {
return -ENODEV;
}
flcn = (struct nvgpu_falcon *)
nvgpu_kmalloc(g, sizeof(struct nvgpu_falcon));
if (flcn == NULL) { if (flcn == NULL) {
return -ENOMEM; return -ENODEV;
} }
err = nvgpu_mutex_init(&flcn->imem_lock); err = nvgpu_mutex_init(&flcn->imem_lock);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Error in flcn.imem_lock mutex initialization"); nvgpu_err(g, "Error in flcn.imem_lock mutex initialization");
nvgpu_kfree(g, flcn);
return err; return err;
} }
@@ -777,33 +768,27 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Error in flcn.dmem_lock mutex initialization"); nvgpu_err(g, "Error in flcn.dmem_lock mutex initialization");
nvgpu_mutex_destroy(&flcn->imem_lock); nvgpu_mutex_destroy(&flcn->imem_lock);
nvgpu_kfree(g, flcn);
return err; return err;
} }
flcn->flcn_id = flcn_id; flcn->flcn_id = flcn_id;
flcn->g = g; flcn->g = g;
*flcn_p = flcn;
/* call to HAL method to assign flcn base & ops to selected falcon */ /* call to HAL method to assign flcn base & ops to selected falcon */
return gops->falcon.falcon_hal_sw_init(flcn); return gops->falcon.falcon_hal_sw_init(flcn);
} }
void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id) void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
{ {
struct nvgpu_falcon **flcn_p = NULL, *flcn = NULL; struct nvgpu_falcon *flcn = NULL;
struct gpu_ops *gops = &g->ops; struct gpu_ops *gops = &g->ops;
flcn_p = falcon_get_instance(g, flcn_id); flcn = falcon_get_instance(g, flcn_id);
if ((flcn_p == NULL) || (*flcn_p == NULL)) { if (flcn == NULL) {
return; return;
} }
flcn = *flcn_p;
gops->falcon.falcon_hal_sw_free(flcn); gops->falcon.falcon_hal_sw_free(flcn);
nvgpu_mutex_destroy(&flcn->dmem_lock); nvgpu_mutex_destroy(&flcn->dmem_lock);
nvgpu_mutex_destroy(&flcn->imem_lock); nvgpu_mutex_destroy(&flcn->imem_lock);
nvgpu_kfree(g, flcn);
*flcn_p = NULL;
} }

View File

@@ -21,9 +21,9 @@
*/ */
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/falcon.h>
#include "falcon_gk20a.h" #include "falcon_gk20a.h"
#include "falcon_priv.h"
#include <nvgpu/hw/gm20b/hw_falcon_gm20b.h> #include <nvgpu/hw/gm20b/hw_falcon_gm20b.h>

View File

@@ -20,10 +20,10 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/falcon.h>
#include "falcon_gk20a.h" #include "falcon_gk20a.h"
#include "falcon_gp106.h" #include "falcon_gp106.h"
#include "falcon_priv.h"
static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
{ {

View File

@@ -20,11 +20,11 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/falcon.h>
#include "falcon_gk20a.h" #include "falcon_gk20a.h"
#include "falcon_gp106.h" #include "falcon_gp106.h"
#include "falcon_gv100.h" #include "falcon_gv100.h"
#include "falcon_priv.h"
static void gv100_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) static void gv100_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
{ {

View File

@@ -1,118 +0,0 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_PRIV_H
#define NVGPU_FALCON_PRIV_H
#include <nvgpu/lock.h>
#include <nvgpu/types.h>
/* Falcon Register index */
#define FALCON_REG_R0 (0U)
#define FALCON_REG_R1 (1U)
#define FALCON_REG_R2 (2U)
#define FALCON_REG_R3 (3U)
#define FALCON_REG_R4 (4U)
#define FALCON_REG_R5 (5U)
#define FALCON_REG_R6 (6U)
#define FALCON_REG_R7 (7U)
#define FALCON_REG_R8 (8U)
#define FALCON_REG_R9 (9U)
#define FALCON_REG_R10 (10U)
#define FALCON_REG_R11 (11U)
#define FALCON_REG_R12 (12U)
#define FALCON_REG_R13 (13U)
#define FALCON_REG_R14 (14U)
#define FALCON_REG_R15 (15U)
#define FALCON_REG_IV0 (16U)
#define FALCON_REG_IV1 (17U)
#define FALCON_REG_UNDEFINED (18U)
#define FALCON_REG_EV (19U)
#define FALCON_REG_SP (20U)
#define FALCON_REG_PC (21U)
#define FALCON_REG_IMB (22U)
#define FALCON_REG_DMB (23U)
#define FALCON_REG_CSW (24U)
#define FALCON_REG_CCR (25U)
#define FALCON_REG_SEC (26U)
#define FALCON_REG_CTX (27U)
#define FALCON_REG_EXCI (28U)
#define FALCON_REG_RSVD0 (29U)
#define FALCON_REG_RSVD1 (30U)
#define FALCON_REG_RSVD2 (31U)
#define FALCON_REG_SIZE (32U)
struct gk20a;
struct nvgpu_falcon;
struct nvgpu_falcon_bl_info;
/* ops which are falcon engine specific */
struct nvgpu_falcon_engine_dependency_ops {
int (*reset_eng)(struct gk20a *g);
int (*copy_from_emem)(struct gk20a *g, u32 src, u8 *dst,
u32 size, u8 port);
int (*copy_to_emem)(struct gk20a *g, u32 dst, u8 *src,
u32 size, u8 port);
};
struct nvgpu_falcon_ops {
void (*reset)(struct nvgpu_falcon *flcn);
void (*set_irq)(struct nvgpu_falcon *flcn, bool enable,
u32 intr_mask, u32 intr_dest);
bool (*clear_halt_interrupt_status)(struct nvgpu_falcon *flcn);
bool (*is_falcon_cpu_halted)(struct nvgpu_falcon *flcn);
bool (*is_falcon_idle)(struct nvgpu_falcon *flcn);
bool (*is_falcon_scrubbing_done)(struct nvgpu_falcon *flcn);
int (*copy_from_dmem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst,
u32 size, u8 port);
int (*copy_to_dmem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src,
u32 size, u8 port);
int (*copy_from_imem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst,
u32 size, u8 port);
int (*copy_to_imem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src,
u32 size, u8 port, bool sec, u32 tag);
u32 (*mailbox_read)(struct nvgpu_falcon *flcn, u32 mailbox_index);
void (*mailbox_write)(struct nvgpu_falcon *flcn, u32 mailbox_index,
u32 data);
int (*bootstrap)(struct nvgpu_falcon *flcn, u32 boot_vector);
void (*dump_falcon_stats)(struct nvgpu_falcon *flcn);
void (*get_falcon_ctls)(struct nvgpu_falcon *flcn, u32 *sctl,
u32 *cpuctl);
u32 (*get_mem_size)(struct nvgpu_falcon *flcn,
enum falcon_mem_type mem_type);
u8 (*get_ports_count)(struct nvgpu_falcon *flcn,
enum falcon_mem_type mem_type);
};
struct nvgpu_falcon {
struct gk20a *g;
u32 flcn_id;
u32 flcn_base;
bool is_falcon_supported;
bool is_interrupt_enabled;
struct nvgpu_mutex imem_lock;
struct nvgpu_mutex dmem_lock;
struct nvgpu_falcon_ops flcn_ops;
struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops;
};
#endif /* NVGPU_FALCON_PRIV_H */

View File

@@ -20,11 +20,11 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/falcon.h>
#include "falcon_gk20a.h" #include "falcon_gk20a.h"
#include "falcon_gv100.h" #include "falcon_gv100.h"
#include "falcon_tu104.h" #include "falcon_tu104.h"
#include "falcon_priv.h"
static void tu104_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) static void tu104_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
{ {

View File

@@ -128,7 +128,7 @@ int gv100_fb_memory_unlock(struct gk20a *g)
/* Enable nvdec */ /* Enable nvdec */
g->ops.mc.enable(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_NVDEC)); g->ops.mc.enable(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_NVDEC));
err = nvgpu_acr_self_hs_load_bootstrap(g, g->nvdec_flcn, mem_unlock_fw, err = nvgpu_acr_self_hs_load_bootstrap(g, &g->nvdec_flcn, mem_unlock_fw,
MEM_UNLOCK_TIMEOUT ); MEM_UNLOCK_TIMEOUT );
if (err != 0) { if (err != 0) {
nvgpu_err(g, "mem unlock HS ucode failed, err-0x%x", err); nvgpu_err(g, "mem unlock HS ucode failed, err-0x%x", err);

View File

@@ -58,6 +58,7 @@
nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \ nvlipt_err_uc_status_link0_stompedpacketreceived_f(1) | \
nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \ nvlipt_err_uc_status_link0_unsupportedrequest_f(1) | \
nvlipt_err_uc_status_link0_ucinternal_f(1)) nvlipt_err_uc_status_link0_ucinternal_f(1))
/* /*
* Init TLC per link interrupts * Init TLC per link interrupts
*/ */

View File

@@ -68,7 +68,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
} }
/* Minion reset */ /* Minion reset */
err = nvgpu_falcon_reset(g->minion_flcn); err = nvgpu_falcon_reset(&g->minion_flcn);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Minion reset failed"); nvgpu_err(g, "Minion reset failed");
goto exit; goto exit;
@@ -83,7 +83,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
} }
/* set BOOTVEC to start of non-secure code */ /* set BOOTVEC to start of non-secure code */
err = nvgpu_falcon_bootstrap(g->minion_flcn, 0x0); err = nvgpu_falcon_bootstrap(&g->minion_flcn, 0x0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Minion bootstrap failed"); nvgpu_err(g, "Minion bootstrap failed");
goto exit; goto exit;

View File

@@ -67,7 +67,7 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
nvgpu_cg_blcg_pmu_load_enable(g); nvgpu_cg_blcg_pmu_load_enable(g);
if (nvgpu_falcon_mem_scrub_wait(pmu->flcn) != 0) { if (nvgpu_falcon_mem_scrub_wait(&pmu->flcn) != 0) {
/* keep PMU falcon/engine in reset /* keep PMU falcon/engine in reset
* if IMEM/DMEM scrubbing fails * if IMEM/DMEM scrubbing fails
*/ */
@@ -102,7 +102,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
goto exit; goto exit;
} }
err = nvgpu_falcon_wait_idle(pmu->flcn); err = nvgpu_falcon_wait_idle(&pmu->flcn);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
} }
@@ -120,7 +120,7 @@ int nvgpu_pmu_reset(struct gk20a *g)
nvgpu_log_fn(g, " %s ", g->name); nvgpu_log_fn(g, " %s ", g->name);
err = nvgpu_falcon_wait_idle(pmu->flcn); err = nvgpu_falcon_wait_idle(&pmu->flcn);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
} }
@@ -269,7 +269,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
/* Reset PMU engine */ /* Reset PMU engine */
err = nvgpu_falcon_reset(g->pmu.flcn); err = nvgpu_falcon_reset(&g->pmu.flcn);
/* Bootstrap PMU from SEC2 RTOS*/ /* Bootstrap PMU from SEC2 RTOS*/
err = nvgpu_sec2_bootstrap_ls_falcons(g, &g->sec2, err = nvgpu_sec2_bootstrap_ls_falcons(g, &g->sec2,
@@ -283,7 +283,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
* clear halt interrupt to avoid PMU-RTOS ucode * clear halt interrupt to avoid PMU-RTOS ucode
* hitting breakpoint due to PMU halt * hitting breakpoint due to PMU halt
*/ */
err = nvgpu_falcon_clear_halt_intr_status(g->pmu.flcn, err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn,
gk20a_get_gr_idle_timeout(g)); gk20a_get_gr_idle_timeout(g));
if (err != 0) { if (err != 0) {
goto exit; goto exit;
@@ -388,7 +388,7 @@ static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu,
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET); g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0); (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed"); nvgpu_err(g, "PMU falcon DMEM copy failed");
@@ -400,7 +400,7 @@ static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu,
goto exit; goto exit;
} }
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail + PMU_MSG_HDR_SIZE, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, tail + PMU_MSG_HDR_SIZE,
(u8 *)&msg->msg, (u32)msg->hdr.size - PMU_MSG_HDR_SIZE, 0); (u8 *)&msg->msg, (u32)msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed"); nvgpu_err(g, "PMU falcon DMEM copy failed");
@@ -450,7 +450,7 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
if (!pmu->gid_info.valid) { if (!pmu->gid_info.valid) {
u32 *gid_hdr_data = (u32 *)(gid_data.signature); u32 *gid_hdr_data = (u32 *)(gid_data.signature);
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pv->get_pmu_init_msg_pmu_sw_mg_off(init), pv->get_pmu_init_msg_pmu_sw_mg_off(init),
(u8 *)&gid_data, (u8 *)&gid_data,
(u32)sizeof(struct pmu_sha1_gid_data), 0); (u32)sizeof(struct pmu_sha1_gid_data), 0);
@@ -657,7 +657,7 @@ void nvgpu_pmu_get_cmd_line_args_offset(struct gk20a *g,
u32 dmem_size = 0; u32 dmem_size = 0;
int err = 0; int err = 0;
err = nvgpu_falcon_get_mem_size(pmu->flcn, MEM_DMEM, &dmem_size); err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "dmem size request failed"); nvgpu_err(g, "dmem size request failed");
*args_offset = 0; *args_offset = 0;

View File

@@ -34,7 +34,7 @@ void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
/* Print PG stats */ /* Print PG stats */
nvgpu_err(g, "Print PG stats"); nvgpu_err(g, "Print PG stats");
nvgpu_falcon_print_dmem(pmu->flcn, nvgpu_falcon_print_dmem(&pmu->flcn,
pmu->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS], pmu->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS],
(u32)sizeof(struct pmu_pg_stats_v2)); (u32)sizeof(struct pmu_pg_stats_v2));
@@ -45,7 +45,7 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
{ {
struct gk20a *g = pmu->g; struct gk20a *g = pmu->g;
nvgpu_falcon_dump_stats(pmu->flcn); nvgpu_falcon_dump_stats(&pmu->flcn);
g->ops.pmu.pmu_dump_falcon_stats(pmu); g->ops.pmu.pmu_dump_falcon_stats(pmu);
nvgpu_err(g, "pmu state: %d", pmu->pmu_state); nvgpu_err(g, "pmu state: %d", pmu->pmu_state);

View File

@@ -158,7 +158,7 @@ void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false, g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false,
mc_intr_mask_1_pmu_enabled_f()); mc_intr_mask_1_pmu_enabled_f());
nvgpu_falcon_set_irq(pmu->flcn, false, 0x0, 0x0); nvgpu_falcon_set_irq(&pmu->flcn, false, 0x0, 0x0);
if (enable) { if (enable) {
intr_dest = g->ops.pmu.get_irqdest(g); intr_dest = g->ops.pmu.get_irqdest(g);
@@ -172,7 +172,7 @@ void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
pwr_falcon_irqmset_swgen0_f(1) | pwr_falcon_irqmset_swgen0_f(1) |
pwr_falcon_irqmset_swgen1_f(1); pwr_falcon_irqmset_swgen1_f(1);
nvgpu_falcon_set_irq(pmu->flcn, true, intr_mask, intr_dest); nvgpu_falcon_set_irq(&pmu->flcn, true, intr_mask, intr_dest);
g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true, g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true,
mc_intr_mask_0_pmu_enabled_f()); mc_intr_mask_0_pmu_enabled_f());
@@ -221,7 +221,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
<< GK20A_PMU_DMEM_BLKSIZE2) - << GK20A_PMU_DMEM_BLKSIZE2) -
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
nvgpu_falcon_copy_to_dmem(pmu->flcn, addr_args, nvgpu_falcon_copy_to_dmem(&pmu->flcn, addr_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
@@ -267,7 +267,8 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE));
} }
err = nvgpu_falcon_bootstrap(g->pmu.flcn, desc->bootloader_entry_point); err = nvgpu_falcon_bootstrap(&g->pmu.flcn,
desc->bootloader_entry_point);
gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); gk20a_writel(g, pwr_falcon_os_r(), desc->app_version);
@@ -870,7 +871,7 @@ int gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats stats; struct pmu_pg_stats stats;
int err; int err;
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pmu->stat_dmem_offset[pg_engine_id], pmu->stat_dmem_offset[pg_engine_id],
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats), 0); (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats), 0);
if (err != 0) { if (err != 0) {

View File

@@ -290,7 +290,7 @@ int gm20b_ns_pmu_setup_hw_and_bootstrap(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
nvgpu_falcon_reset(pmu->flcn); nvgpu_falcon_reset(&pmu->flcn);
pmu->isr_enabled = true; pmu->isr_enabled = true;
nvgpu_mutex_release(&pmu->isr_mutex); nvgpu_mutex_release(&pmu->isr_mutex);
@@ -349,7 +349,7 @@ void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT); pmu, GK20A_PMU_DMAIDX_VIRT);
nvgpu_falcon_copy_to_dmem(pmu->flcn, cmd_line_args_offset, nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
} }
@@ -374,9 +374,9 @@ static int gm20b_bl_bootstrap(struct gk20a *g,
pwr_pmu_new_instblk_target_sys_coh_f() : pwr_pmu_new_instblk_target_sys_coh_f() :
pwr_pmu_new_instblk_target_sys_ncoh_f())) ; pwr_pmu_new_instblk_target_sys_ncoh_f())) ;
nvgpu_falcon_mailbox_write(g->pmu.flcn, FALCON_MAILBOX_0, 0xDEADA5A5U); nvgpu_falcon_mailbox_write(&g->pmu.flcn, FALCON_MAILBOX_0, 0xDEADA5A5U);
return nvgpu_falcon_bl_bootstrap(g->pmu.flcn, bl_info); return nvgpu_falcon_bl_bootstrap(&g->pmu.flcn, bl_info);
} }
int gm20b_pmu_setup_hw_and_bl_bootstrap(struct gk20a *g, int gm20b_pmu_setup_hw_and_bl_bootstrap(struct gk20a *g,
@@ -386,7 +386,7 @@ int gm20b_pmu_setup_hw_and_bl_bootstrap(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = nvgpu_falcon_reset(g->pmu.flcn); err = nvgpu_falcon_reset(&g->pmu.flcn);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
} }

View File

@@ -168,7 +168,7 @@ int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_v2 stats; struct pmu_pg_stats_v2 stats;
int err; int err;
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pmu->stat_dmem_offset[pg_engine_id], pmu->stat_dmem_offset[pg_engine_id],
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0); (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0);
if (err != 0) { if (err != 0) {
@@ -298,7 +298,7 @@ void gp106_update_lspmu_cmdline_args(struct gk20a *g)
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu); g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
} }
nvgpu_falcon_copy_to_dmem(pmu->flcn, cmd_line_args_offset, nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);

View File

@@ -271,7 +271,7 @@ int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_v1 stats; struct pmu_pg_stats_v1 stats;
int err; int err;
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pmu->stat_dmem_offset[pg_engine_id], pmu->stat_dmem_offset[pg_engine_id],
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0); (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0);
if (err != 0) { if (err != 0) {

View File

@@ -189,7 +189,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
<< GK20A_PMU_DMEM_BLKSIZE2) - << GK20A_PMU_DMEM_BLKSIZE2) -
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
nvgpu_falcon_copy_to_dmem(pmu->flcn, addr_args, nvgpu_falcon_copy_to_dmem(&pmu->flcn, addr_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
@@ -254,7 +254,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE));
} }
err = nvgpu_falcon_bootstrap(pmu->flcn, desc->bootloader_entry_point); err = nvgpu_falcon_bootstrap(&pmu->flcn, desc->bootloader_entry_point);
gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); gk20a_writel(g, pwr_falcon_os_r(), desc->app_version);

View File

@@ -385,7 +385,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
cmd, cmd->hdr.size); cmd, cmd->hdr.size);
} else { } else {
queue = pmu->queue[queue_id]; queue = pmu->queue[queue_id];
err = nvgpu_engine_mem_queue_push(pmu->flcn, queue, err = nvgpu_engine_mem_queue_push(&pmu->flcn, queue,
cmd, cmd->hdr.size); cmd, cmd->hdr.size);
} }
@@ -477,7 +477,7 @@ static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd,
seq->in_payload_fb_queue = true; seq->in_payload_fb_queue = true;
seq->out_payload_fb_queue = true; seq->out_payload_fb_queue = true;
} else { } else {
nvgpu_falcon_copy_to_dmem(pmu->flcn, alloc.dmem_offset, nvgpu_falcon_copy_to_dmem(&pmu->flcn, alloc.dmem_offset,
payload->rpc.prpc, payload->rpc.size_rpc, 0); payload->rpc.prpc, payload->rpc.size_rpc, 0);
} }
@@ -575,7 +575,7 @@ static int pmu_cmd_payload_setup(struct gk20a *g, struct pmu_cmd *cmd,
seq->in_payload_fb_queue = true; seq->in_payload_fb_queue = true;
} else { } else {
nvgpu_falcon_copy_to_dmem(pmu->flcn, nvgpu_falcon_copy_to_dmem(&pmu->flcn,
(pv->pmu_allocation_get_dmem_offset(pmu, in)), (pv->pmu_allocation_get_dmem_offset(pmu, in)),
payload->in.buf, payload->in.size, 0); payload->in.buf, payload->in.size, 0);
} }
@@ -877,7 +877,7 @@ static int pmu_payload_extract(struct nvgpu_pmu *pmu,
} else { } else {
if (pv->pmu_allocation_get_dmem_size(pmu, if (pv->pmu_allocation_get_dmem_size(pmu,
pv->get_pmu_seq_out_a_ptr(seq)) != 0U) { pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pv->pmu_allocation_get_dmem_offset(pmu, pv->pmu_allocation_get_dmem_offset(pmu,
pv->get_pmu_seq_out_a_ptr(seq)), pv->get_pmu_seq_out_a_ptr(seq)),
seq->out_payload, seq->out_payload,
@@ -1081,7 +1081,7 @@ static bool pmu_engine_mem_queue_read(struct nvgpu_pmu *pmu,
bytes_to_read, &bytes_read); bytes_to_read, &bytes_read);
} else { } else {
queue = pmu->queue[queue_id]; queue = pmu->queue[queue_id];
err = nvgpu_engine_mem_queue_pop(pmu->flcn, queue, data, err = nvgpu_engine_mem_queue_pop(&pmu->flcn, queue, data,
bytes_to_read, &bytes_read); bytes_to_read, &bytes_read);
} }
@@ -1140,7 +1140,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
if (msg->hdr.unit_id == PMU_UNIT_REWIND) { if (msg->hdr.unit_id == PMU_UNIT_REWIND) {
if (pmu->queue_type != QUEUE_TYPE_FB) { if (pmu->queue_type != QUEUE_TYPE_FB) {
queue = pmu->queue[queue_id]; queue = pmu->queue[queue_id];
err = nvgpu_engine_mem_queue_rewind(pmu->flcn, queue); err = nvgpu_engine_mem_queue_rewind(&pmu->flcn, queue);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d", nvgpu_err(g, "fail to rewind queue %d",
queue_id); queue_id);

View File

@@ -245,8 +245,9 @@ int nvgpu_pmu_load_update(struct gk20a *g)
nvgpu_pmu_perfmon_get_samples_rpc(pmu); nvgpu_pmu_perfmon_get_samples_rpc(pmu);
load = pmu->load; load = pmu->load;
} else { } else {
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, pmu->sample_buffer, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
(u8 *)&load, 2 * 1, 0); pmu->sample_buffer,
(u8 *)&load, 2 * 1, 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed"); nvgpu_err(g, "PMU falcon DMEM copy failed");
return err; return err;

View File

@@ -128,7 +128,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
do { do {
err = nvgpu_engine_mem_queue_push(g->sec2.flcn, queue, cmd, err = nvgpu_engine_mem_queue_push(&g->sec2.flcn, queue, cmd,
cmd->hdr.size); cmd->hdr.size);
if ((err == -EAGAIN) && (nvgpu_timeout_expired(&timeout) == 0)) { if ((err == -EAGAIN) && (nvgpu_timeout_expired(&timeout) == 0)) {
nvgpu_usleep_range(1000U, 2000U); nvgpu_usleep_range(1000U, 2000U);
@@ -250,7 +250,7 @@ static bool sec2_engine_mem_queue_read(struct nvgpu_sec2 *sec2,
u32 bytes_read; u32 bytes_read;
int err; int err;
err = nvgpu_engine_mem_queue_pop(sec2->flcn, queue, data, err = nvgpu_engine_mem_queue_pop(&sec2->flcn, queue, data,
bytes_to_read, &bytes_read); bytes_to_read, &bytes_read);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fail to read msg: err %d", err); nvgpu_err(g, "fail to read msg: err %d", err);
@@ -288,7 +288,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
} }
if (msg->hdr.unit_id == NV_SEC2_UNIT_REWIND) { if (msg->hdr.unit_id == NV_SEC2_UNIT_REWIND) {
err = nvgpu_engine_mem_queue_rewind(sec2->flcn, queue); err = nvgpu_engine_mem_queue_rewind(&sec2->flcn, queue);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d", queue_id); nvgpu_err(g, "fail to rewind queue %d", queue_id);
*status = err; *status = err;
@@ -337,7 +337,7 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2,
g->ops.sec2.msgq_tail(g, sec2, &tail, QUEUE_GET); g->ops.sec2.msgq_tail(g, sec2, &tail, QUEUE_GET);
err = nvgpu_falcon_copy_from_emem(sec2->flcn, tail, err = nvgpu_falcon_copy_from_emem(&sec2->flcn, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0U); (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0U);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
@@ -349,7 +349,7 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2,
goto exit; goto exit;
} }
err = nvgpu_falcon_copy_from_emem(sec2->flcn, tail + PMU_MSG_HDR_SIZE, err = nvgpu_falcon_copy_from_emem(&sec2->flcn, tail + PMU_MSG_HDR_SIZE,
(u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0U); (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0U);
if (err != 0) { if (err != 0) {
goto exit; goto exit;

View File

@@ -97,7 +97,7 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
{ {
unsigned int i; unsigned int i;
nvgpu_falcon_dump_stats(g->fecs_flcn); nvgpu_falcon_dump_stats(&g->fecs_flcn);
for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) { for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",

View File

@@ -45,7 +45,7 @@
static void upload_code(struct gk20a *g, u32 dst, static void upload_code(struct gk20a *g, u32 dst,
u8 *src, u32 size, u8 port, bool sec) u8 *src, u32 size, u8 port, bool sec)
{ {
nvgpu_falcon_copy_to_imem(g->pmu.flcn, dst, src, size, port, sec, nvgpu_falcon_copy_to_imem(&g->pmu.flcn, dst, src, size, port, sec,
dst >> 8); dst >> 8);
} }
@@ -81,7 +81,7 @@ int gp106_bios_devinit(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (nvgpu_falcon_reset(g->pmu.flcn) != 0) { if (nvgpu_falcon_reset(&g->pmu.flcn) != 0) {
err = -ETIMEDOUT; err = -ETIMEDOUT;
goto out; goto out;
} }
@@ -107,7 +107,7 @@ int gp106_bios_devinit(struct gk20a *g)
g->bios.bootscripts_size, g->bios.bootscripts_size,
0); 0);
err = nvgpu_falcon_bootstrap(g->pmu.flcn, err = nvgpu_falcon_bootstrap(&g->pmu.flcn,
g->bios.devinit.code_entry_point); g->bios.devinit.code_entry_point);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "falcon bootstrap failed %d", err); nvgpu_err(g, "falcon bootstrap failed %d", err);
@@ -131,7 +131,7 @@ int gp106_bios_devinit(struct gk20a *g)
goto out; goto out;
} }
err = nvgpu_falcon_clear_halt_intr_status(g->pmu.flcn, err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn,
gk20a_get_gr_idle_timeout(g)); gk20a_get_gr_idle_timeout(g));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err); nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err);
@@ -145,7 +145,7 @@ out:
int gp106_bios_preos_wait_for_halt(struct gk20a *g) int gp106_bios_preos_wait_for_halt(struct gk20a *g)
{ {
return nvgpu_falcon_wait_for_halt(g->pmu.flcn, return nvgpu_falcon_wait_for_halt(&g->pmu.flcn,
PMU_BOOT_TIMEOUT_MAX / 1000); PMU_BOOT_TIMEOUT_MAX / 1000);
} }
@@ -155,7 +155,7 @@ int gp106_bios_preos(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (nvgpu_falcon_reset(g->pmu.flcn) != 0) { if (nvgpu_falcon_reset(&g->pmu.flcn) != 0) {
err = -ETIMEDOUT; err = -ETIMEDOUT;
goto out; goto out;
} }
@@ -177,7 +177,7 @@ int gp106_bios_preos(struct gk20a *g)
g->bios.preos.dmem_size, g->bios.preos.dmem_size,
0); 0);
err = nvgpu_falcon_bootstrap(g->pmu.flcn, err = nvgpu_falcon_bootstrap(&g->pmu.flcn,
g->bios.preos.code_entry_point); g->bios.preos.code_entry_point);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "falcon bootstrap failed %d", err); nvgpu_err(g, "falcon bootstrap failed %d", err);
@@ -190,7 +190,7 @@ int gp106_bios_preos(struct gk20a *g)
goto out; goto out;
} }
err = nvgpu_falcon_clear_halt_intr_status(g->pmu.flcn, err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn,
gk20a_get_gr_idle_timeout(g)); gk20a_get_gr_idle_timeout(g));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err); nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err);

View File

@@ -77,9 +77,10 @@ static int sec2_flcn_bl_bootstrap(struct gk20a *g,
data |= BIT32(3); data |= BIT32(3);
gk20a_writel(g, psec_falcon_engctl_r(), data); gk20a_writel(g, psec_falcon_engctl_r(), data);
nvgpu_falcon_mailbox_write(g->sec2.flcn, FALCON_MAILBOX_0, 0xDEADA5A5U); nvgpu_falcon_mailbox_write(&g->sec2.flcn, FALCON_MAILBOX_0,
0xDEADA5A5U);
err = nvgpu_falcon_bl_bootstrap(g->sec2.flcn, bl_info); err = nvgpu_falcon_bl_bootstrap(&g->sec2.flcn, bl_info);
return err; return err;
} }
@@ -91,7 +92,7 @@ int gp106_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_falcon_reset(g->sec2.flcn); nvgpu_falcon_reset(&g->sec2.flcn);
data = gk20a_readl(g, psec_fbif_ctl_r()); data = gk20a_readl(g, psec_fbif_ctl_r());
data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f(); data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();

View File

@@ -68,9 +68,9 @@ static int gsp_flcn_bl_bootstrap(struct gk20a *g,
data |= pgsp_falcon_engctl_switch_context_true_f(); data |= pgsp_falcon_engctl_switch_context_true_f();
gk20a_writel(g, pgsp_falcon_engctl_r(), data); gk20a_writel(g, pgsp_falcon_engctl_r(), data);
nvgpu_falcon_mailbox_write(g->gsp_flcn, FALCON_MAILBOX_0, 0xDEADA5A5U); nvgpu_falcon_mailbox_write(&g->gsp_flcn, FALCON_MAILBOX_0, 0xDEADA5A5U);
status = nvgpu_falcon_bl_bootstrap(g->gsp_flcn, bl_info); status = nvgpu_falcon_bl_bootstrap(&g->gsp_flcn, bl_info);
return status; return status;
} }
@@ -81,7 +81,7 @@ int gv100_gsp_setup_hw_and_bl_bootstrap(struct gk20a *g,
u32 data = 0; u32 data = 0;
int err = 0; int err = 0;
err = nvgpu_falcon_reset(g->gsp_flcn); err = nvgpu_falcon_reset(&g->gsp_flcn);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
} }

View File

@@ -225,7 +225,7 @@ int gv100_nvlink_minion_send_dlcmd(struct gk20a *g, u32 link_id,
*/ */
void gv100_nvlink_minion_clear_intr(struct gk20a *g) void gv100_nvlink_minion_clear_intr(struct gk20a *g)
{ {
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK, nvgpu_falcon_set_irq(&g->minion_flcn, true, MINION_FALCON_INTR_MASK,
MINION_FALCON_INTR_DEST); MINION_FALCON_INTR_DEST);
} }

View File

@@ -24,6 +24,7 @@
#define NVGPU_FALCON_H #define NVGPU_FALCON_H
#include <nvgpu/types.h> #include <nvgpu/types.h>
#include <nvgpu/lock.h>
/* /*
* Falcon Id Defines * Falcon Id Defines
@@ -49,6 +50,41 @@
(((((ADDR) + (FALCON_BLOCK_SIZE - 1U)) & ~(FALCON_BLOCK_SIZE-1U)) \ (((((ADDR) + (FALCON_BLOCK_SIZE - 1U)) & ~(FALCON_BLOCK_SIZE-1U)) \
/ FALCON_BLOCK_SIZE) << 8U) / FALCON_BLOCK_SIZE) << 8U)
/* Falcon Register index */
#define FALCON_REG_R0 (0U)
#define FALCON_REG_R1 (1U)
#define FALCON_REG_R2 (2U)
#define FALCON_REG_R3 (3U)
#define FALCON_REG_R4 (4U)
#define FALCON_REG_R5 (5U)
#define FALCON_REG_R6 (6U)
#define FALCON_REG_R7 (7U)
#define FALCON_REG_R8 (8U)
#define FALCON_REG_R9 (9U)
#define FALCON_REG_R10 (10U)
#define FALCON_REG_R11 (11U)
#define FALCON_REG_R12 (12U)
#define FALCON_REG_R13 (13U)
#define FALCON_REG_R14 (14U)
#define FALCON_REG_R15 (15U)
#define FALCON_REG_IV0 (16U)
#define FALCON_REG_IV1 (17U)
#define FALCON_REG_UNDEFINED (18U)
#define FALCON_REG_EV (19U)
#define FALCON_REG_SP (20U)
#define FALCON_REG_PC (21U)
#define FALCON_REG_IMB (22U)
#define FALCON_REG_DMB (23U)
#define FALCON_REG_CSW (24U)
#define FALCON_REG_CCR (25U)
#define FALCON_REG_SEC (26U)
#define FALCON_REG_CTX (27U)
#define FALCON_REG_EXCI (28U)
#define FALCON_REG_RSVD0 (29U)
#define FALCON_REG_RSVD1 (30U)
#define FALCON_REG_RSVD2 (31U)
#define FALCON_REG_SIZE (32U)
/* Falcon ucode header format /* Falcon ucode header format
* OS Code Offset * OS Code Offset
* OS Code Size * OS Code Size
@@ -92,6 +128,56 @@ struct nvgpu_falcon_bl_info {
u32 bl_start_tag; u32 bl_start_tag;
}; };
/* ops which are falcon engine specific */
struct nvgpu_falcon_engine_dependency_ops {
int (*reset_eng)(struct gk20a *g);
int (*copy_from_emem)(struct gk20a *g, u32 src, u8 *dst,
u32 size, u8 port);
int (*copy_to_emem)(struct gk20a *g, u32 dst, u8 *src,
u32 size, u8 port);
};
struct nvgpu_falcon_ops {
void (*reset)(struct nvgpu_falcon *flcn);
void (*set_irq)(struct nvgpu_falcon *flcn, bool enable,
u32 intr_mask, u32 intr_dest);
bool (*clear_halt_interrupt_status)(struct nvgpu_falcon *flcn);
bool (*is_falcon_cpu_halted)(struct nvgpu_falcon *flcn);
bool (*is_falcon_idle)(struct nvgpu_falcon *flcn);
bool (*is_falcon_scrubbing_done)(struct nvgpu_falcon *flcn);
int (*copy_from_dmem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst,
u32 size, u8 port);
int (*copy_to_dmem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src,
u32 size, u8 port);
int (*copy_from_imem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst,
u32 size, u8 port);
int (*copy_to_imem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src,
u32 size, u8 port, bool sec, u32 tag);
u32 (*mailbox_read)(struct nvgpu_falcon *flcn, u32 mailbox_index);
void (*mailbox_write)(struct nvgpu_falcon *flcn, u32 mailbox_index,
u32 data);
int (*bootstrap)(struct nvgpu_falcon *flcn, u32 boot_vector);
void (*dump_falcon_stats)(struct nvgpu_falcon *flcn);
void (*get_falcon_ctls)(struct nvgpu_falcon *flcn, u32 *sctl,
u32 *cpuctl);
u32 (*get_mem_size)(struct nvgpu_falcon *flcn,
enum falcon_mem_type mem_type);
u8 (*get_ports_count)(struct nvgpu_falcon *flcn,
enum falcon_mem_type mem_type);
};
struct nvgpu_falcon {
struct gk20a *g;
u32 flcn_id;
u32 flcn_base;
bool is_falcon_supported;
bool is_interrupt_enabled;
struct nvgpu_mutex imem_lock;
struct nvgpu_mutex dmem_lock;
struct nvgpu_falcon_ops flcn_ops;
struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops;
};
int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn); int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn);
int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout); int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout);
int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn, int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,

View File

@@ -1767,11 +1767,11 @@ struct gk20a {
struct nvgpu_netlist_vars *netlist_vars; struct nvgpu_netlist_vars *netlist_vars;
bool netlist_valid; bool netlist_valid;
struct nvgpu_falcon *fecs_flcn; struct nvgpu_falcon fecs_flcn;
struct nvgpu_falcon *gpccs_flcn; struct nvgpu_falcon gpccs_flcn;
struct nvgpu_falcon *nvdec_flcn; struct nvgpu_falcon nvdec_flcn;
struct nvgpu_falcon *minion_flcn; struct nvgpu_falcon minion_flcn;
struct nvgpu_falcon *gsp_flcn; struct nvgpu_falcon gsp_flcn;
struct clk_gk20a clk; struct clk_gk20a clk;
struct fifo_gk20a fifo; struct fifo_gk20a fifo;
struct nvgpu_nvlink_dev nvlink; struct nvgpu_nvlink_dev nvlink;

View File

@@ -335,7 +335,7 @@ struct pmu_sequence {
struct nvgpu_pmu { struct nvgpu_pmu {
struct gk20a *g; struct gk20a *g;
struct nvgpu_falcon *flcn; struct nvgpu_falcon flcn;
struct nvgpu_firmware *fw_desc; struct nvgpu_firmware *fw_desc;
struct nvgpu_firmware *fw_image; struct nvgpu_firmware *fw_image;

View File

@@ -69,7 +69,7 @@ struct sec2_sequence {
struct nvgpu_sec2 { struct nvgpu_sec2 {
struct gk20a *g; struct gk20a *g;
struct nvgpu_falcon *flcn; struct nvgpu_falcon flcn;
u32 falcon_id; u32 falcon_id;
struct nvgpu_engine_mem_queue *queue[SEC2_QUEUE_NUM]; struct nvgpu_engine_mem_queue *queue[SEC2_QUEUE_NUM];

View File

@@ -218,13 +218,13 @@ int nvgpu_nvlink_minion_load_ucode(struct gk20a *g,
" - Ucode Data Size = %u", minion_hdr->ucode_data_size); " - Ucode Data Size = %u", minion_hdr->ucode_data_size);
/* Copy Non Secure IMEM code */ /* Copy Non Secure IMEM code */
nvgpu_falcon_copy_to_imem(g->minion_flcn, 0, nvgpu_falcon_copy_to_imem(&g->minion_flcn, 0,
(u8 *)&ndev->minion_img[minion_hdr->os_code_offset], (u8 *)&ndev->minion_img[minion_hdr->os_code_offset],
minion_hdr->os_code_size, 0, false, minion_hdr->os_code_size, 0, false,
GET_IMEM_TAG(minion_hdr->os_code_offset)); GET_IMEM_TAG(minion_hdr->os_code_offset));
/* Copy Non Secure DMEM code */ /* Copy Non Secure DMEM code */
nvgpu_falcon_copy_to_dmem(g->minion_flcn, 0, nvgpu_falcon_copy_to_dmem(&g->minion_flcn, 0,
(u8 *)&ndev->minion_img[minion_hdr->os_data_offset], (u8 *)&ndev->minion_img[minion_hdr->os_data_offset],
minion_hdr->os_data_size, 0); minion_hdr->os_data_size, 0);
@@ -236,14 +236,14 @@ int nvgpu_nvlink_minion_load_ucode(struct gk20a *g,
u32 app_data_size = minion_hdr->app_data_sizes[app]; u32 app_data_size = minion_hdr->app_data_sizes[app];
if (app_code_size) if (app_code_size)
nvgpu_falcon_copy_to_imem(g->minion_flcn, nvgpu_falcon_copy_to_imem(&g->minion_flcn,
app_code_start, app_code_start,
(u8 *)&ndev->minion_img[app_code_start], (u8 *)&ndev->minion_img[app_code_start],
app_code_size, 0, true, app_code_size, 0, true,
GET_IMEM_TAG(app_code_start)); GET_IMEM_TAG(app_code_start));
if (app_data_size) if (app_data_size)
nvgpu_falcon_copy_to_dmem(g->minion_flcn, nvgpu_falcon_copy_to_dmem(&g->minion_flcn,
app_data_start, app_data_start,
(u8 *)&ndev->minion_img[app_data_start], (u8 *)&ndev->minion_img[app_data_start],
app_data_size, 0); app_data_size, 0);

View File

@@ -202,9 +202,10 @@ static int tu104_sec2_flcn_bl_bootstrap(struct gk20a *g,
data |= (1U << 3U); data |= (1U << 3U);
gk20a_writel(g, psec_falcon_engctl_r(), data); gk20a_writel(g, psec_falcon_engctl_r(), data);
nvgpu_falcon_mailbox_write(g->sec2.flcn, FALCON_MAILBOX_0, 0xDEADA5A5U); nvgpu_falcon_mailbox_write(&g->sec2.flcn, FALCON_MAILBOX_0,
0xDEADA5A5U);
return nvgpu_falcon_bl_bootstrap(g->sec2.flcn, bl_info); return nvgpu_falcon_bl_bootstrap(&g->sec2.flcn, bl_info);
} }
int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g, int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
@@ -214,7 +215,7 @@ int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_falcon_reset(g->sec2.flcn); nvgpu_falcon_reset(&g->sec2.flcn);
data = gk20a_readl(g, psec_fbif_ctl_r()); data = gk20a_readl(g, psec_fbif_ctl_r());
data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f(); data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
@@ -318,7 +319,7 @@ void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
u32 intr_mask; u32 intr_mask;
u32 intr_dest; u32 intr_dest;
nvgpu_falcon_set_irq(g->sec2.flcn, false, 0x0, 0x0); nvgpu_falcon_set_irq(&g->sec2.flcn, false, 0x0, 0x0);
if (enable) { if (enable) {
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
@@ -351,7 +352,7 @@ void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
psec_falcon_irqmset_swgen0_f(1) | psec_falcon_irqmset_swgen0_f(1) |
psec_falcon_irqmset_swgen1_f(1); psec_falcon_irqmset_swgen1_f(1);
nvgpu_falcon_set_irq(g->sec2.flcn, true, intr_mask, intr_dest); nvgpu_falcon_set_irq(&g->sec2.flcn, true, intr_mask, intr_dest);
} }
} }
@@ -399,7 +400,7 @@ void tu104_sec2_isr(struct gk20a *g)
if ((intr & psec_falcon_irqstat_halt_true_f()) != 0U) { if ((intr & psec_falcon_irqstat_halt_true_f()) != 0U) {
nvgpu_err(g, "sec2 halt intr not implemented"); nvgpu_err(g, "sec2 halt intr not implemented");
nvgpu_falcon_dump_stats(g->sec2.flcn); nvgpu_falcon_dump_stats(&g->sec2.flcn);
} }
if ((intr & psec_falcon_irqstat_exterr_true_f()) != 0U) { if ((intr & psec_falcon_irqstat_exterr_true_f()) != 0U) {
nvgpu_err(g, nvgpu_err(g,