mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 18:42:29 +03:00
gpu: nvgpu: make all falcons struct nvgpu_falcon*
With intention to make falcon header free of private data we are making all falcon struct members (pmu.flcn, sec2.flcn, fecs_flcn, gpccs_flcn, nvdec_flcn, minion_flcn, gsp_flcn) in the gk20a, pointers to struct nvgpu_falcon. Falcon structures are allocated/deallocated by falcon_sw_init & _free respectively. While at it, remove duplicate gk20a.pmu_flcn and gk20a.sec2_flcn, refactor flcn_id assignment and introduce falcon_hal_sw_free. JIRA NVGPU-1594 Change-Id: I222086cf28215ea8ecf9a6166284d5cc506bb0c5 Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1968242 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b8c8d627af
commit
5efc446a06
@@ -34,10 +34,16 @@
|
||||
int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 idle_stat;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->is_falcon_idle == NULL) {
|
||||
nvgpu_warn(g, "Invalid op on falcon 0x%x ", flcn->flcn_id);
|
||||
return -EINVAL;
|
||||
@@ -84,6 +90,10 @@ int nvgpu_falcon_mem_scrub_wait(struct nvgpu_falcon *flcn)
|
||||
struct nvgpu_timeout timeout;
|
||||
int status = 0;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check IMEM/DMEM scrubbing complete status */
|
||||
nvgpu_timeout_init(flcn->g, &timeout,
|
||||
MEM_SCRUBBING_TIMEOUT_MAX /
|
||||
@@ -106,10 +116,17 @@ exit:
|
||||
|
||||
int nvgpu_falcon_reset(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
int status = 0;
|
||||
|
||||
if (flcn->flcn_ops.reset != NULL) {
|
||||
status = flcn->flcn_ops.reset(flcn);
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->reset != NULL) {
|
||||
status = flcn_ops->reset(flcn);
|
||||
if (status == 0) {
|
||||
status = nvgpu_falcon_mem_scrub_wait(flcn);
|
||||
}
|
||||
@@ -125,7 +142,13 @@ int nvgpu_falcon_reset(struct nvgpu_falcon *flcn)
|
||||
void nvgpu_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable,
|
||||
u32 intr_mask, u32 intr_dest)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->set_irq != NULL) {
|
||||
flcn->intr_mask = intr_mask;
|
||||
@@ -154,10 +177,16 @@ static bool falcon_get_cpu_halted_status(struct nvgpu_falcon *flcn)
|
||||
|
||||
int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct gk20a *g;
|
||||
struct nvgpu_timeout to;
|
||||
int status = 0;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
g = flcn->g;
|
||||
|
||||
nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
|
||||
do {
|
||||
if (falcon_get_cpu_halted_status(flcn)) {
|
||||
@@ -178,10 +207,16 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
||||
unsigned int timeout)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
struct nvgpu_timeout to;
|
||||
int status = 0;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->clear_halt_interrupt_status == NULL) {
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
@@ -207,10 +242,15 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
||||
int nvgpu_falcon_copy_from_emem(struct nvgpu_falcon *flcn,
|
||||
u32 src, u8 *dst, u32 size, u8 port)
|
||||
{
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_dops =
|
||||
&flcn->flcn_engine_dep_ops;
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_dops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_dops = &flcn->flcn_engine_dep_ops;
|
||||
|
||||
if (flcn_dops->copy_from_emem != NULL) {
|
||||
status = flcn_dops->copy_from_emem(flcn, src, dst, size, port);
|
||||
}
|
||||
@@ -221,10 +261,15 @@ int nvgpu_falcon_copy_from_emem(struct nvgpu_falcon *flcn,
|
||||
int nvgpu_falcon_copy_to_emem(struct nvgpu_falcon *flcn,
|
||||
u32 dst, u8 *src, u32 size, u8 port)
|
||||
{
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_dops =
|
||||
&flcn->flcn_engine_dep_ops;
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_dops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_dops = &flcn->flcn_engine_dep_ops;
|
||||
|
||||
if (flcn_dops->copy_to_emem != NULL) {
|
||||
status = flcn_dops->copy_to_emem(flcn, dst, src, size, port);
|
||||
}
|
||||
@@ -235,7 +280,13 @@ int nvgpu_falcon_copy_to_emem(struct nvgpu_falcon *flcn,
|
||||
int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||
u32 src, u8 *dst, u32 size, u8 port)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
return flcn_ops->copy_from_dmem(flcn, src, dst, size, port);
|
||||
}
|
||||
@@ -243,7 +294,13 @@ int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||
int nvgpu_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
|
||||
u32 dst, u8 *src, u32 size, u8 port)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
return flcn_ops->copy_to_dmem(flcn, dst, src, size, port);
|
||||
}
|
||||
@@ -251,9 +308,15 @@ int nvgpu_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
|
||||
int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
||||
u32 src, u8 *dst, u32 size, u8 port)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->copy_from_imem != NULL) {
|
||||
status = flcn_ops->copy_from_imem(flcn, src, dst, size, port);
|
||||
} else {
|
||||
@@ -267,9 +330,15 @@ int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
||||
int nvgpu_falcon_copy_to_imem(struct nvgpu_falcon *flcn,
|
||||
u32 dst, u8 *src, u32 size, u8 port, bool sec, u32 tag)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->copy_to_imem != NULL) {
|
||||
status = flcn_ops->copy_to_imem(flcn, dst, src, size, port,
|
||||
sec, tag);
|
||||
@@ -327,21 +396,35 @@ static void falcon_print_mem(struct nvgpu_falcon *flcn, u32 src,
|
||||
|
||||
void nvgpu_falcon_print_dmem(struct nvgpu_falcon *flcn, u32 src, u32 size)
|
||||
{
|
||||
if (flcn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_info(flcn->g, " PRINT DMEM ");
|
||||
falcon_print_mem(flcn, src, size, MEM_DMEM);
|
||||
}
|
||||
|
||||
void nvgpu_falcon_print_imem(struct nvgpu_falcon *flcn, u32 src, u32 size)
|
||||
{
|
||||
if (flcn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_info(flcn->g, " PRINT IMEM ");
|
||||
falcon_print_mem(flcn, src, size, MEM_IMEM);
|
||||
}
|
||||
|
||||
int nvgpu_falcon_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->bootstrap != NULL) {
|
||||
status = flcn_ops->bootstrap(flcn, boot_vector);
|
||||
} else {
|
||||
@@ -354,9 +437,15 @@ int nvgpu_falcon_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
|
||||
|
||||
u32 nvgpu_falcon_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
u32 data = 0;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->mailbox_read != NULL) {
|
||||
data = flcn_ops->mailbox_read(flcn, mailbox_index);
|
||||
} else {
|
||||
@@ -370,7 +459,13 @@ u32 nvgpu_falcon_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index)
|
||||
void nvgpu_falcon_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index,
|
||||
u32 data)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->mailbox_write != NULL) {
|
||||
flcn_ops->mailbox_write(flcn, mailbox_index, data);
|
||||
@@ -382,7 +477,13 @@ void nvgpu_falcon_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index,
|
||||
|
||||
void nvgpu_falcon_dump_stats(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->dump_falcon_stats != NULL) {
|
||||
flcn_ops->dump_falcon_stats(flcn);
|
||||
@@ -395,9 +496,15 @@ void nvgpu_falcon_dump_stats(struct nvgpu_falcon *flcn)
|
||||
int nvgpu_falcon_bl_bootstrap(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_bl_info *bl_info)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
int status = 0;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->bl_bootstrap != NULL) {
|
||||
status = flcn_ops->bl_bootstrap(flcn, bl_info);
|
||||
}
|
||||
@@ -412,7 +519,13 @@ int nvgpu_falcon_bl_bootstrap(struct nvgpu_falcon *flcn,
|
||||
|
||||
void nvgpu_falcon_get_ctls(struct nvgpu_falcon *flcn, u32 *sctl, u32 *cpuctl)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_ops *flcn_ops;
|
||||
|
||||
if (flcn == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->get_falcon_ctls != NULL) {
|
||||
flcn_ops->get_falcon_ctls(flcn, sctl, cpuctl);
|
||||
@@ -432,54 +545,77 @@ u32 nvgpu_falcon_get_id(struct nvgpu_falcon *flcn)
|
||||
return flcn->flcn_id;
|
||||
}
|
||||
|
||||
int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
static struct nvgpu_falcon **falcon_get_instance(struct gk20a *g, u32 flcn_id)
|
||||
{
|
||||
struct nvgpu_falcon *flcn = NULL;
|
||||
struct gpu_ops *gops = &g->ops;
|
||||
int err = 0;
|
||||
struct nvgpu_falcon **flcn_p = NULL;
|
||||
|
||||
switch (flcn_id) {
|
||||
case FALCON_ID_PMU:
|
||||
flcn = &g->pmu_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
g->pmu.flcn = &g->pmu_flcn;
|
||||
flcn_p = &g->pmu.flcn;
|
||||
break;
|
||||
case FALCON_ID_SEC2:
|
||||
flcn = &g->sec2_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
g->sec2.flcn = &g->sec2_flcn;
|
||||
flcn_p = &g->sec2.flcn;
|
||||
break;
|
||||
case FALCON_ID_FECS:
|
||||
flcn = &g->fecs_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
flcn_p = &g->fecs_flcn;
|
||||
break;
|
||||
case FALCON_ID_GPCCS:
|
||||
flcn = &g->gpccs_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
flcn_p = &g->gpccs_flcn;
|
||||
break;
|
||||
case FALCON_ID_NVDEC:
|
||||
flcn = &g->nvdec_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
flcn_p = &g->nvdec_flcn;
|
||||
break;
|
||||
case FALCON_ID_MINION:
|
||||
flcn = &g->minion_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
flcn_p = &g->minion_flcn;
|
||||
break;
|
||||
case FALCON_ID_GSPLITE:
|
||||
flcn = &g->gsp_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
flcn_p = &g->gsp_flcn;
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
|
||||
err = -ENODEV;
|
||||
break;
|
||||
};
|
||||
|
||||
if (err != 0) {
|
||||
return err;
|
||||
return flcn_p;
|
||||
}
|
||||
|
||||
int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
{
|
||||
struct nvgpu_falcon **flcn_p = NULL, *flcn = NULL;
|
||||
struct gpu_ops *gops = &g->ops;
|
||||
|
||||
flcn_p = falcon_get_instance(g, flcn_id);
|
||||
if (flcn_p == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* call to HAL method to assign flcn base & ops to selected falcon */
|
||||
flcn = (struct nvgpu_falcon *)
|
||||
nvgpu_kmalloc(g, sizeof(struct nvgpu_falcon));
|
||||
|
||||
if (flcn == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
flcn->flcn_id = flcn_id;
|
||||
flcn->g = g;
|
||||
|
||||
*flcn_p = flcn;
|
||||
|
||||
/* call to HAL method to assign flcn base & ops to selected falcon */
|
||||
return gops->falcon.falcon_hal_sw_init(flcn);
|
||||
}
|
||||
|
||||
void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
|
||||
{
|
||||
struct nvgpu_falcon **flcn_p = NULL;
|
||||
struct gpu_ops *gops = &g->ops;
|
||||
|
||||
flcn_p = falcon_get_instance(g, flcn_id);
|
||||
if ((flcn_p == NULL) || (*flcn_p == NULL)) {
|
||||
return;
|
||||
}
|
||||
|
||||
gops->falcon.falcon_hal_sw_free(*flcn_p);
|
||||
nvgpu_kfree(g, *flcn_p);
|
||||
*flcn_p = NULL;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -756,3 +756,16 @@ int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void gk20a_falcon_hal_sw_free(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
|
||||
if (flcn->is_falcon_supported) {
|
||||
nvgpu_mutex_destroy(&flcn->copy_lock);
|
||||
flcn->is_falcon_supported = false;
|
||||
} else {
|
||||
nvgpu_log_info(g, "falcon 0x%x not supported on %s",
|
||||
flcn->flcn_id, g->name);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -24,6 +24,7 @@
|
||||
|
||||
void gk20a_falcon_ops(struct nvgpu_falcon *flcn);
|
||||
int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
|
||||
void gk20a_falcon_hal_sw_free(struct nvgpu_falcon *flcn);
|
||||
void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn);
|
||||
void gk20a_falcon_get_ctls(struct nvgpu_falcon *flcn, u32 *sctl, u32 *cpuctl);
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV100 FB
|
||||
*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -161,7 +161,7 @@ int gv100_fb_memory_unlock(struct gk20a *g)
|
||||
g->ops.mc.enable(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_NVDEC));
|
||||
|
||||
/* nvdec falcon reset */
|
||||
nvgpu_falcon_reset(&g->nvdec_flcn);
|
||||
nvgpu_falcon_reset(g->nvdec_flcn);
|
||||
|
||||
hsbin_hdr = (struct bin_hdr *)mem_unlock_fw->data;
|
||||
fw_hdr = (struct acr_fw_header *)(mem_unlock_fw->data +
|
||||
@@ -184,10 +184,10 @@ int gv100_fb_memory_unlock(struct gk20a *g)
|
||||
}
|
||||
|
||||
/* Clear interrupts */
|
||||
nvgpu_falcon_set_irq(&g->nvdec_flcn, false, 0x0, 0x0);
|
||||
nvgpu_falcon_set_irq(g->nvdec_flcn, false, 0x0, 0x0);
|
||||
|
||||
/* Copy Non Secure IMEM code */
|
||||
nvgpu_falcon_copy_to_imem(&g->nvdec_flcn, 0,
|
||||
nvgpu_falcon_copy_to_imem(g->nvdec_flcn, 0,
|
||||
(u8 *)&mem_unlock_ucode[
|
||||
mem_unlock_ucode_header[OS_CODE_OFFSET] >> 2],
|
||||
mem_unlock_ucode_header[OS_CODE_SIZE], 0, false,
|
||||
@@ -196,34 +196,34 @@ int gv100_fb_memory_unlock(struct gk20a *g)
|
||||
/* Put secure code after non-secure block */
|
||||
sec_imem_dest = GET_NEXT_BLOCK(mem_unlock_ucode_header[OS_CODE_SIZE]);
|
||||
|
||||
nvgpu_falcon_copy_to_imem(&g->nvdec_flcn, sec_imem_dest,
|
||||
nvgpu_falcon_copy_to_imem(g->nvdec_flcn, sec_imem_dest,
|
||||
(u8 *)&mem_unlock_ucode[
|
||||
mem_unlock_ucode_header[APP_0_CODE_OFFSET] >> 2],
|
||||
mem_unlock_ucode_header[APP_0_CODE_SIZE], 0, true,
|
||||
GET_IMEM_TAG(mem_unlock_ucode_header[APP_0_CODE_OFFSET]));
|
||||
|
||||
/* load DMEM: ensure that signatures are patched */
|
||||
nvgpu_falcon_copy_to_dmem(&g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[
|
||||
nvgpu_falcon_copy_to_dmem(g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[
|
||||
mem_unlock_ucode_header[OS_DATA_OFFSET] >> 2],
|
||||
mem_unlock_ucode_header[OS_DATA_SIZE], 0);
|
||||
|
||||
/* Write non-zero value to mailbox register which is updated by
|
||||
* mem_unlock bin to denote its return status.
|
||||
*/
|
||||
nvgpu_falcon_mailbox_write(&g->nvdec_flcn,
|
||||
nvgpu_falcon_mailbox_write(g->nvdec_flcn,
|
||||
FALCON_MAILBOX_0, 0xdeadbeef);
|
||||
|
||||
/* set BOOTVEC to start of non-secure code */
|
||||
nvgpu_falcon_bootstrap(&g->nvdec_flcn, 0);
|
||||
nvgpu_falcon_bootstrap(g->nvdec_flcn, 0);
|
||||
|
||||
/* wait for complete & halt */
|
||||
nvgpu_falcon_wait_for_halt(&g->nvdec_flcn, MEM_UNLOCK_TIMEOUT);
|
||||
nvgpu_falcon_wait_for_halt(g->nvdec_flcn, MEM_UNLOCK_TIMEOUT);
|
||||
|
||||
/* check mem unlock status */
|
||||
val = nvgpu_falcon_mailbox_read(&g->nvdec_flcn, FALCON_MAILBOX_0);
|
||||
val = nvgpu_falcon_mailbox_read(g->nvdec_flcn, FALCON_MAILBOX_0);
|
||||
if (val != 0U) {
|
||||
nvgpu_err(g, "memory unlock failed, err %x", val);
|
||||
nvgpu_falcon_dump_stats(&g->nvdec_flcn);
|
||||
nvgpu_falcon_dump_stats(g->nvdec_flcn);
|
||||
err = -1;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@@ -101,6 +101,12 @@ int gk20a_prepare_poweroff(struct gk20a *g)
|
||||
ret |= nvgpu_mm_suspend(g);
|
||||
ret |= gk20a_fifo_suspend(g);
|
||||
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_FECS);
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_GSPLITE);
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_NVDEC);
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_SEC2);
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_PMU);
|
||||
|
||||
gk20a_ce_suspend(g);
|
||||
|
||||
/* Disable GPCPLL */
|
||||
@@ -151,27 +157,27 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
||||
err = nvgpu_falcon_sw_init(g, FALCON_ID_PMU);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to sw init FALCON_ID_PMU");
|
||||
goto done;
|
||||
goto exit;
|
||||
}
|
||||
err = nvgpu_falcon_sw_init(g, FALCON_ID_SEC2);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to sw init FALCON_ID_SEC2");
|
||||
goto done;
|
||||
goto done_pmu;
|
||||
}
|
||||
err = nvgpu_falcon_sw_init(g, FALCON_ID_NVDEC);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC");
|
||||
goto done;
|
||||
goto done_sec2;
|
||||
}
|
||||
err = nvgpu_falcon_sw_init(g, FALCON_ID_GSPLITE);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to sw init FALCON_ID_GSPLITE");
|
||||
goto done;
|
||||
goto done_nvdec;
|
||||
}
|
||||
err = nvgpu_falcon_sw_init(g, FALCON_ID_FECS);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to sw init FALCON_ID_FECS");
|
||||
goto done;
|
||||
goto done_gsp;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
@@ -437,7 +443,19 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
||||
g->ops.fifo.channel_resume(g);
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
done:
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_FECS);
|
||||
done_gsp:
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_GSPLITE);
|
||||
done_nvdec:
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_NVDEC);
|
||||
done_sec2:
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_SEC2);
|
||||
done_pmu:
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_PMU);
|
||||
exit:
|
||||
if (err != 0) {
|
||||
g->power_on = false;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -470,7 +470,7 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
|
||||
}
|
||||
|
||||
/* nvdec falcon reset */
|
||||
nvgpu_falcon_reset(&g->minion_flcn);
|
||||
nvgpu_falcon_reset(g->minion_flcn);
|
||||
|
||||
/* Read ucode header */
|
||||
minion_hdr->os_code_offset = minion_extract_word(nvgpu_minion_fw,
|
||||
@@ -593,17 +593,17 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
|
||||
" - Ucode Data Size = %u", minion_hdr->ucode_data_size);
|
||||
|
||||
/* Clear interrupts */
|
||||
nvgpu_falcon_set_irq(&g->minion_flcn, true, MINION_FALCON_INTR_MASK,
|
||||
nvgpu_falcon_set_irq(g->minion_flcn, true, MINION_FALCON_INTR_MASK,
|
||||
MINION_FALCON_INTR_DEST);
|
||||
|
||||
/* Copy Non Secure IMEM code */
|
||||
nvgpu_falcon_copy_to_imem(&g->minion_flcn, 0,
|
||||
nvgpu_falcon_copy_to_imem(g->minion_flcn, 0,
|
||||
(u8 *)&ndev->minion_img[minion_hdr->os_code_offset],
|
||||
minion_hdr->os_code_size, 0, false,
|
||||
GET_IMEM_TAG(minion_hdr->os_code_offset));
|
||||
|
||||
/* Copy Non Secure DMEM code */
|
||||
nvgpu_falcon_copy_to_dmem(&g->minion_flcn, 0,
|
||||
nvgpu_falcon_copy_to_dmem(g->minion_flcn, 0,
|
||||
(u8 *)&ndev->minion_img[minion_hdr->os_data_offset],
|
||||
minion_hdr->os_data_size, 0);
|
||||
|
||||
@@ -615,21 +615,21 @@ static u32 gv100_nvlink_minion_load(struct gk20a *g)
|
||||
u32 app_data_size = minion_hdr->app_data_sizes[app];
|
||||
|
||||
if (app_code_size)
|
||||
nvgpu_falcon_copy_to_imem(&g->minion_flcn,
|
||||
nvgpu_falcon_copy_to_imem(g->minion_flcn,
|
||||
app_code_start,
|
||||
(u8 *)&ndev->minion_img[app_code_start],
|
||||
app_code_size, 0, true,
|
||||
GET_IMEM_TAG(app_code_start));
|
||||
|
||||
if (app_data_size)
|
||||
nvgpu_falcon_copy_to_dmem(&g->minion_flcn,
|
||||
nvgpu_falcon_copy_to_dmem(g->minion_flcn,
|
||||
app_data_start,
|
||||
(u8 *)&ndev->minion_img[app_data_start],
|
||||
app_data_size, 0);
|
||||
}
|
||||
|
||||
/* set BOOTVEC to start of non-secure code */
|
||||
nvgpu_falcon_bootstrap(&g->minion_flcn, 0x0);
|
||||
nvgpu_falcon_bootstrap(g->minion_flcn, 0x0);
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
|
||||
NVGPU_TIMER_CPU_TIMER);
|
||||
@@ -2246,6 +2246,8 @@ int gv100_nvlink_reg_init(struct gk20a *g)
|
||||
*/
|
||||
int gv100_nvlink_shutdown(struct gk20a *g)
|
||||
{
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_MINION);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2661,18 +2663,18 @@ int gv100_nvlink_early_init(struct gk20a *g)
|
||||
err = nvgpu_bios_get_lpwr_nvlink_table_hdr(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Failed to read LWPR_NVLINK_TABLE header\n");
|
||||
goto nvlink_init_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = nvgpu_bios_get_nvlink_config_data(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to read nvlink vbios data");
|
||||
goto nvlink_init_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = g->ops.nvlink.discover_ioctrl(g);
|
||||
if (err != 0)
|
||||
goto nvlink_init_exit;
|
||||
goto exit;
|
||||
|
||||
/* Enable NVLINK in MC */
|
||||
mc_reset_nvlink_mask = BIT32(g->nvlink.ioctrl_table[0].reset_enum);
|
||||
@@ -2683,13 +2685,13 @@ int gv100_nvlink_early_init(struct gk20a *g)
|
||||
err = g->ops.nvlink.discover_link(g);
|
||||
if ((err != 0) || (g->nvlink.discovered_links == 0)) {
|
||||
nvgpu_err(g, "No links available");
|
||||
goto nvlink_init_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = nvgpu_falcon_sw_init(g, FALCON_ID_MINION);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to sw init FALCON_ID_MINION");
|
||||
goto nvlink_init_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
g->nvlink.discovered_links &= ~g->nvlink.link_disable_mask;
|
||||
@@ -2740,6 +2742,8 @@ int gv100_nvlink_early_init(struct gk20a *g)
|
||||
gv100_nvlink_prog_alt_clk(g);
|
||||
|
||||
nvlink_init_exit:
|
||||
nvgpu_falcon_sw_free(g, FALCON_ID_MINION);
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -1408,7 +1408,7 @@ static void gm20b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
|
||||
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc);
|
||||
|
||||
/* set on which falcon ACR need to execute*/
|
||||
hs_acr->acr_flcn = &g->pmu_flcn;
|
||||
hs_acr->acr_flcn = g->pmu.flcn;
|
||||
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gm20b_pmu_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
@@ -1348,7 +1348,7 @@ static void nvgpu_gp106_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_a
|
||||
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
|
||||
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
hs_acr->acr_flcn = &g->sec2_flcn;
|
||||
hs_acr->acr_flcn = g->sec2.flcn;
|
||||
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gp106_sec2_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -144,7 +144,7 @@ static void gv11b_acr_default_sw_init(struct gk20a *g, struct hs_acr *hs_acr)
|
||||
hs_acr->ptr_bl_dmem_desc = &hs_acr->bl_dmem_desc_v1;
|
||||
hs_acr->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
hs_acr->acr_flcn = &g->pmu_flcn;
|
||||
hs_acr->acr_flcn = g->pmu.flcn;
|
||||
hs_acr->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gm20b_pmu_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -78,7 +78,7 @@ static void nvgpu_tu104_acr_ahesasc_sw_init(struct gk20a *g,
|
||||
acr_ahesasc->ptr_bl_dmem_desc = &acr_ahesasc->bl_dmem_desc_v1;
|
||||
acr_ahesasc->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
acr_ahesasc->acr_flcn = &g->sec2_flcn;
|
||||
acr_ahesasc->acr_flcn = g->sec2.flcn;
|
||||
acr_ahesasc->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
tu104_sec2_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
@@ -102,7 +102,7 @@ static void nvgpu_tu104_acr_asb_sw_init(struct gk20a *g,
|
||||
acr_asb->ptr_bl_dmem_desc = &acr_asb->bl_dmem_desc_v1;
|
||||
acr_asb->bl_dmem_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
|
||||
acr_asb->acr_flcn = &g->gsp_flcn;
|
||||
acr_asb->acr_flcn = g->gsp_flcn;
|
||||
acr_asb->acr_flcn_setup_hw_and_bl_bootstrap =
|
||||
gv100_gsp_setup_hw_and_bl_bootstrap;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -310,7 +310,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
|
||||
/* Reset PMU engine */
|
||||
err = nvgpu_falcon_reset(&g->pmu_flcn);
|
||||
err = nvgpu_falcon_reset(g->pmu.flcn);
|
||||
|
||||
/* Bootstrap PMU from SEC2 RTOS*/
|
||||
err = nvgpu_sec2_bootstrap_ls_falcons(g, &g->sec2,
|
||||
@@ -324,7 +324,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
|
||||
* clear halt interrupt to avoid PMU-RTOS ucode
|
||||
* hitting breakpoint due to PMU halt
|
||||
*/
|
||||
err = nvgpu_falcon_clear_halt_intr_status(&g->pmu_flcn,
|
||||
err = nvgpu_falcon_clear_halt_intr_status(g->pmu.flcn,
|
||||
gk20a_get_gr_idle_timeout(g));
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/*
|
||||
* GM20B PMU
|
||||
*
|
||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
@@ -363,7 +363,7 @@ static int gm20b_bl_bootstrap(struct gk20a *g,
|
||||
pwr_pmu_new_instblk_target_sys_coh_f() :
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f())) ;
|
||||
|
||||
nvgpu_falcon_bl_bootstrap(&g->pmu_flcn, bl_info);
|
||||
nvgpu_falcon_bl_bootstrap(g->pmu.flcn, bl_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -127,7 +127,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
|
||||
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
||||
|
||||
do {
|
||||
err = nvgpu_falcon_queue_push(&g->sec2_flcn, queue, cmd,
|
||||
err = nvgpu_falcon_queue_push(g->sec2.flcn, queue, cmd,
|
||||
cmd->hdr.size);
|
||||
if ((err == -EAGAIN) && (nvgpu_timeout_expired(&timeout) == 0)) {
|
||||
nvgpu_usleep_range(1000U, 2000U);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics
|
||||
*
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -103,7 +103,7 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
nvgpu_falcon_dump_stats(&g->fecs_flcn);
|
||||
nvgpu_falcon_dump_stats(g->fecs_flcn);
|
||||
|
||||
for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
|
||||
nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GM20B Graphics
|
||||
*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -732,6 +732,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
#endif
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_free = gk20a_falcon_hal_sw_free,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = gm20b_priv_ring_enable,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -77,7 +77,7 @@ static int sec2_flcn_bl_bootstrap(struct gk20a *g,
|
||||
data |= BIT32(3);
|
||||
gk20a_writel(g, psec_falcon_engctl_r(), data);
|
||||
|
||||
err = nvgpu_falcon_bl_bootstrap(&g->sec2_flcn, bl_info);
|
||||
err = nvgpu_falcon_bl_bootstrap(g->sec2.flcn, bl_info);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -90,7 +90,7 @@ int gp106_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_falcon_reset(&g->sec2_flcn);
|
||||
nvgpu_falcon_reset(g->sec2.flcn);
|
||||
|
||||
data = gk20a_readl(g, psec_fbif_ctl_r());
|
||||
data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GP10B Tegra HAL interface
|
||||
*
|
||||
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -807,6 +807,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
#endif
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_free = gk20a_falcon_hal_sw_free,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = gm20b_priv_ring_enable,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -68,7 +68,7 @@ static int gsp_flcn_bl_bootstrap(struct gk20a *g,
|
||||
data |= pgsp_falcon_engctl_switch_context_true_f();
|
||||
gk20a_writel(g, pgsp_falcon_engctl_r(), data);
|
||||
|
||||
status = nvgpu_falcon_bl_bootstrap(&g->gsp_flcn, bl_info);
|
||||
status = nvgpu_falcon_bl_bootstrap(g->gsp_flcn, bl_info);
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -80,7 +80,7 @@ int gv100_gsp_setup_hw_and_bl_bootstrap(struct gk20a *g,
|
||||
u32 data = 0;
|
||||
int err = 0;
|
||||
|
||||
err = nvgpu_falcon_reset(&g->gsp_flcn);
|
||||
err = nvgpu_falcon_reset(g->gsp_flcn);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV100 Tegra HAL interface
|
||||
*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -1025,6 +1025,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
},
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = gv100_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_free = gk20a_falcon_hal_sw_free,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = gm20b_priv_ring_enable,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B Tegra HAL interface
|
||||
*
|
||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -943,6 +943,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
#endif
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_free = gk20a_falcon_hal_sw_free,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = gm20b_priv_ring_enable,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -296,5 +296,6 @@ u32 nvgpu_falcon_queue_get_index(struct nvgpu_falcon_queue *queue);
|
||||
u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue);
|
||||
|
||||
int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id);
|
||||
void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id);
|
||||
|
||||
#endif /* NVGPU_FALCON_H */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* GK20A Graphics
|
||||
*
|
||||
@@ -1409,6 +1409,7 @@ struct gpu_ops {
|
||||
} xve;
|
||||
struct {
|
||||
int (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn);
|
||||
void (*falcon_hal_sw_free)(struct nvgpu_falcon *flcn);
|
||||
} falcon;
|
||||
struct {
|
||||
void (*enable_priv_ring)(struct gk20a *g);
|
||||
@@ -1606,13 +1607,11 @@ struct gk20a {
|
||||
struct nvgpu_netlist_vars *netlist_vars;
|
||||
bool netlist_valid;
|
||||
|
||||
struct nvgpu_falcon pmu_flcn;
|
||||
struct nvgpu_falcon sec2_flcn;
|
||||
struct nvgpu_falcon fecs_flcn;
|
||||
struct nvgpu_falcon gpccs_flcn;
|
||||
struct nvgpu_falcon nvdec_flcn;
|
||||
struct nvgpu_falcon minion_flcn;
|
||||
struct nvgpu_falcon gsp_flcn;
|
||||
struct nvgpu_falcon *fecs_flcn;
|
||||
struct nvgpu_falcon *gpccs_flcn;
|
||||
struct nvgpu_falcon *nvdec_flcn;
|
||||
struct nvgpu_falcon *minion_flcn;
|
||||
struct nvgpu_falcon *gsp_flcn;
|
||||
struct clk_gk20a clk;
|
||||
struct fifo_gk20a fifo;
|
||||
struct nvgpu_nvlink_dev nvlink;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* TU104 Tegra HAL interface
|
||||
*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -1060,6 +1060,7 @@ static const struct gpu_ops tu104_ops = {
|
||||
},
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = tu104_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_free = gk20a_falcon_hal_sw_free,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = gm20b_priv_ring_enable,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -199,7 +199,7 @@ static int tu104_sec2_flcn_bl_bootstrap(struct gk20a *g,
|
||||
data |= (1U << 3U);
|
||||
gk20a_writel(g, psec_falcon_engctl_r(), data);
|
||||
|
||||
return nvgpu_falcon_bl_bootstrap(&g->sec2_flcn, bl_info);
|
||||
return nvgpu_falcon_bl_bootstrap(g->sec2.flcn, bl_info);
|
||||
}
|
||||
|
||||
int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
|
||||
@@ -210,7 +210,7 @@ int tu104_sec2_setup_hw_and_bl_bootstrap(struct gk20a *g,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_falcon_reset(&g->sec2_flcn);
|
||||
nvgpu_falcon_reset(g->sec2.flcn);
|
||||
|
||||
data = gk20a_readl(g, psec_fbif_ctl_r());
|
||||
data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
|
||||
@@ -322,7 +322,7 @@ void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
|
||||
u32 intr_mask;
|
||||
u32 intr_dest;
|
||||
|
||||
nvgpu_falcon_set_irq(&g->sec2_flcn, false, 0x0, 0x0);
|
||||
nvgpu_falcon_set_irq(g->sec2.flcn, false, 0x0, 0x0);
|
||||
|
||||
if (enable) {
|
||||
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
|
||||
@@ -355,7 +355,7 @@ void tu104_sec2_enable_irq(struct nvgpu_sec2 *sec2, bool enable)
|
||||
psec_falcon_irqmset_swgen0_f(1) |
|
||||
psec_falcon_irqmset_swgen1_f(1);
|
||||
|
||||
nvgpu_falcon_set_irq(&g->sec2_flcn, true, intr_mask, intr_dest);
|
||||
nvgpu_falcon_set_irq(g->sec2.flcn, true, intr_mask, intr_dest);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -403,7 +403,7 @@ void tu104_sec2_isr(struct gk20a *g)
|
||||
|
||||
if ((intr & psec_falcon_irqstat_halt_true_f()) != 0U) {
|
||||
nvgpu_err(g, "sec2 halt intr not implemented");
|
||||
nvgpu_falcon_dump_stats(&g->sec2_flcn);
|
||||
nvgpu_falcon_dump_stats(g->sec2.flcn);
|
||||
}
|
||||
if ((intr & psec_falcon_irqstat_exterr_true_f()) != 0U) {
|
||||
nvgpu_err(g,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -615,7 +615,8 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
},
|
||||
#endif
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_init = NULL,
|
||||
.falcon_hal_sw_free = NULL,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = NULL,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -696,7 +696,8 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
},
|
||||
#endif
|
||||
.falcon = {
|
||||
.falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
|
||||
.falcon_hal_sw_init = NULL,
|
||||
.falcon_hal_sw_free = NULL,
|
||||
},
|
||||
.priv_ring = {
|
||||
.enable_priv_ring = NULL,
|
||||
|
||||
Reference in New Issue
Block a user