mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: falcon reset support
- Added flacon reset dependent interface & HAL methods to perform falcon reset. - method to wait for idle - method to reset falcon - method to set irq - method to read status of CPU - Updated falcon ops pointer to point gk20a falcon HAL methods - Added members to know support of falcon & interrupt. - Added falcon dependency ops member to support flacon speicifc methods JIRA NVGPU-99 JIRA NVGPU-101 Change-Id: I411477e5696a61ee73caebfdab625763b522c255 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1469453 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
5efa7c8d5e
commit
be04b9b1b5
@@ -16,6 +16,106 @@
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_timeout timeout;
|
||||
u32 idle_stat;
|
||||
|
||||
if (!flcn_ops->is_falcon_idle) {
|
||||
nvgpu_warn(g, "Invalid op on falcon 0x%x ", flcn->flcn_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
/* wait for falcon idle */
|
||||
do {
|
||||
idle_stat = flcn_ops->is_falcon_idle(flcn);
|
||||
|
||||
if (idle_stat)
|
||||
break;
|
||||
|
||||
if (nvgpu_timeout_expired_msg(&timeout,
|
||||
"waiting for falcon idle: 0x%08x", idle_stat))
|
||||
return -EBUSY;
|
||||
|
||||
nvgpu_usleep_range(100, 200);
|
||||
} while (1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_flcn_reset(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
int status = -EINVAL;
|
||||
|
||||
if (flcn->flcn_ops.reset)
|
||||
status = flcn->flcn_ops.reset(flcn);
|
||||
else
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable,
|
||||
u32 intr_mask, u32 intr_dest)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
|
||||
if (flcn_ops->set_irq) {
|
||||
flcn->intr_mask = intr_mask;
|
||||
flcn->intr_dest = intr_dest;
|
||||
flcn_ops->set_irq(flcn, enable);
|
||||
} else
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
bool status = false;
|
||||
|
||||
if (flcn_ops->is_falcon_scrubbing_done)
|
||||
status = flcn_ops->is_falcon_scrubbing_done(flcn);
|
||||
else
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
bool status = false;
|
||||
|
||||
if (flcn_ops->is_falcon_cpu_halted)
|
||||
status = flcn_ops->is_falcon_cpu_halted(flcn);
|
||||
else
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
bool status = false;
|
||||
|
||||
if (flcn_ops->is_falcon_idle)
|
||||
status = flcn_ops->is_falcon_idle(flcn);
|
||||
else
|
||||
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
|
||||
flcn->flcn_id);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
{
|
||||
struct nvgpu_falcon *flcn = NULL;
|
||||
@@ -25,6 +125,7 @@ void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
case FALCON_ID_PMU:
|
||||
flcn = &g->pmu_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
g->pmu.flcn = &g->pmu_flcn;
|
||||
break;
|
||||
case FALCON_ID_SEC2:
|
||||
flcn = &g->sec2_flcn;
|
||||
@@ -37,7 +138,7 @@ void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
case FALCON_ID_GPCCS:
|
||||
flcn = &g->gpccs_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
|
||||
break;
|
||||
|
||||
@@ -10,56 +10,145 @@
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
#include <nvgpu/falcon.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
#include <nvgpu/hw/gk20a/hw_falcon_gk20a.h>
|
||||
|
||||
static int gk20a_flcn_reset(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 base_addr = flcn->flcn_base;
|
||||
u32 unit_status = 0;
|
||||
int status = 0;
|
||||
|
||||
if (flcn->flcn_engine_dep_ops.reset_eng)
|
||||
/* falcon & engine reset */
|
||||
status = flcn->flcn_engine_dep_ops.reset_eng(g);
|
||||
else {
|
||||
/* do falcon CPU hard reset */
|
||||
unit_status = gk20a_readl(g, base_addr +
|
||||
falcon_falcon_cpuctl_r());
|
||||
gk20a_writel(g, base_addr + falcon_falcon_cpuctl_r(),
|
||||
(unit_status | falcon_falcon_cpuctl_hreset_f(1)));
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void gk20a_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 base_addr = flcn->flcn_base;
|
||||
|
||||
if (!flcn->is_interrupt_enabled) {
|
||||
nvgpu_warn(g, "Interrupt not supported on flcn 0x%x ",
|
||||
flcn->flcn_id);
|
||||
/* Keep interrupt disabled */
|
||||
enable = false;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
gk20a_writel(g, base_addr + falcon_falcon_irqmset_r(),
|
||||
flcn->intr_mask);
|
||||
gk20a_writel(g, base_addr + falcon_falcon_irqdest_r(),
|
||||
flcn->intr_dest);
|
||||
} else
|
||||
gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(),
|
||||
0xffffffff);
|
||||
}
|
||||
|
||||
static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 base_addr = flcn->flcn_base;
|
||||
|
||||
return (gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()) &
|
||||
falcon_falcon_cpuctl_halt_intr_m() ?
|
||||
true : false);
|
||||
}
|
||||
|
||||
static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 base_addr = flcn->flcn_base;
|
||||
u32 unit_status = 0;
|
||||
bool status = false;
|
||||
|
||||
unit_status = gk20a_readl(g,
|
||||
base_addr + falcon_falcon_idlestate_r());
|
||||
|
||||
if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 &&
|
||||
falcon_falcon_idlestate_ext_busy_v(unit_status) == 0)
|
||||
status = true;
|
||||
else
|
||||
status = false;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 base_addr = flcn->flcn_base;
|
||||
u32 unit_status = 0;
|
||||
bool status = false;
|
||||
|
||||
unit_status = gk20a_readl(g,
|
||||
base_addr + falcon_falcon_dmactl_r());
|
||||
|
||||
if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() |
|
||||
falcon_falcon_dmactl_imem_scrubbing_m()))
|
||||
status = false;
|
||||
else
|
||||
status = true;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
|
||||
struct nvgpu_falcon_version_ops *flcn_vops = &flcn->flcn_vops;
|
||||
|
||||
flcn_ops->reset = NULL;
|
||||
flcn_ops->enable_irq = NULL;
|
||||
flcn_ops->fbif_transcfg = NULL;
|
||||
flcn_ops->read_hwcfg = NULL;
|
||||
flcn_ops->write_hwcfg = NULL;
|
||||
flcn_ops->copy_from_dmem = NULL;
|
||||
flcn_ops->copy_to_dmem = NULL;
|
||||
flcn_ops->dma_copy = NULL;
|
||||
flcn_ops->mailbox_read = NULL;
|
||||
flcn_ops->mailbox_write = NULL;
|
||||
flcn_ops->get_unit_status = NULL;
|
||||
flcn_ops->dump_falcon_stats = NULL;
|
||||
|
||||
flcn_vops->start_cpu_secure = NULL;
|
||||
flcn_vops->write_dmatrfbase = NULL;
|
||||
flcn_ops->reset = gk20a_flcn_reset;
|
||||
flcn_ops->set_irq = gk20a_flcn_set_irq;
|
||||
flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted;
|
||||
flcn_ops->is_falcon_idle = gk20a_is_falcon_idle;
|
||||
flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done;
|
||||
}
|
||||
|
||||
static void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_PMU:
|
||||
flcn->flcn_base = FALCON_PWR_BASE;
|
||||
flcn->is_falcon_supported = true;
|
||||
flcn->is_interrupt_enabled = true;
|
||||
break;
|
||||
case FALCON_ID_SEC2:
|
||||
flcn->flcn_base = FALCON_SEC_BASE;
|
||||
break;
|
||||
case FALCON_ID_FECS:
|
||||
flcn->flcn_base = FALCON_FECS_BASE;
|
||||
flcn->is_falcon_supported = true;
|
||||
flcn->is_interrupt_enabled = false;
|
||||
break;
|
||||
case FALCON_ID_GPCCS:
|
||||
flcn->flcn_base = FALCON_GPCCS_BASE;
|
||||
flcn->is_falcon_supported = true;
|
||||
flcn->is_interrupt_enabled = false;
|
||||
break;
|
||||
default:
|
||||
flcn->is_falcon_supported = false;
|
||||
nvgpu_err(g, "Invalid flcn request");
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_mutex_init(&flcn->copy_lock);
|
||||
|
||||
gk20a_falcon_ops(flcn);
|
||||
if (flcn->is_falcon_supported) {
|
||||
gk20a_falcon_ops(flcn);
|
||||
} else
|
||||
nvgpu_info(g, "flcn-Id 0x%x not supported on current chip",
|
||||
flcn->flcn_id);
|
||||
}
|
||||
|
||||
void gk20a_falcon_init_hal(struct gpu_ops *gops)
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
#ifndef __FALCON_H__
|
||||
#define __FALCON_H__
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
#include <nvgpu/lock.h>
|
||||
|
||||
/*
|
||||
* Falcon Id Defines
|
||||
*/
|
||||
@@ -86,16 +89,6 @@ enum flcn_hwcfg_write {
|
||||
FALCON_ITF_EN
|
||||
};
|
||||
|
||||
/*
|
||||
* Falcon sub unit Id Defines
|
||||
*/
|
||||
enum flcn_unit_status {
|
||||
IS_FALCON_IN_RESET = 0x0,
|
||||
IS_FALCON_CPU_HALTED,
|
||||
IS_FALCON_IDLE,
|
||||
IS_FALCON_MEM_SURBBING_DONE
|
||||
};
|
||||
|
||||
#define FALCON_MEM_SCRUBBING_TIMEOUT_MAX 1000
|
||||
#define FALCON_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
|
||||
|
||||
@@ -128,16 +121,17 @@ struct nvgpu_falcon_version_ops {
|
||||
void (*write_dmatrfbase)(struct nvgpu_falcon *flcn, u32 addr);
|
||||
};
|
||||
|
||||
/* ops which are falcon engine specific */
|
||||
struct nvgpu_falcon_engine_dependency_ops {
|
||||
int (*reset_eng)(struct gk20a *g);
|
||||
};
|
||||
|
||||
struct nvgpu_falcon_ops {
|
||||
void (*reset)(struct nvgpu_falcon *flcn, bool enable);
|
||||
void (*enable_irq)(struct nvgpu_falcon *flcn, bool enable);
|
||||
void (*fbif_transcfg)(struct nvgpu_falcon *flcn);
|
||||
u32 (*read_hwcfg)(struct nvgpu_falcon *flcn,
|
||||
enum flcn_hwcfg_read cfg_type);
|
||||
void (*write_hwcfg)(struct nvgpu_falcon *flcn,
|
||||
enum flcn_hwcfg_write cfg_type, u32 cfg_data);
|
||||
bool (*get_unit_status)(struct nvgpu_falcon *flcn,
|
||||
enum flcn_unit_status unit_id);
|
||||
int (*reset)(struct nvgpu_falcon *flcn);
|
||||
void (*set_irq)(struct nvgpu_falcon *flcn, bool enable);
|
||||
bool (*is_falcon_cpu_halted)(struct nvgpu_falcon *flcn);
|
||||
bool (*is_falcon_idle)(struct nvgpu_falcon *flcn);
|
||||
bool (*is_falcon_scrubbing_done)(struct nvgpu_falcon *flcn);
|
||||
int (*copy_from_dmem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst,
|
||||
u32 size, u8 port);
|
||||
int (*copy_to_dmem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src,
|
||||
@@ -159,20 +153,25 @@ struct nvgpu_falcon {
|
||||
u32 flcn_id;
|
||||
u32 flcn_base;
|
||||
u32 flcn_core_rev;
|
||||
bool is_falcon_supported;
|
||||
bool is_interrupt_enabled;
|
||||
u32 intr_mask;
|
||||
u32 intr_dest;
|
||||
bool isr_enabled;
|
||||
struct nvgpu_mutex isr_mutex;
|
||||
struct nvgpu_mutex copy_lock;
|
||||
struct nvgpu_falcon_ops flcn_ops;
|
||||
struct nvgpu_falcon_version_ops flcn_vops;
|
||||
struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops;
|
||||
};
|
||||
|
||||
int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn);
|
||||
int nvgpu_flcn_enable_hw(struct nvgpu_falcon *flcn, bool enable);
|
||||
int nvgpu_flcn_reset(struct nvgpu_falcon *flcn);
|
||||
void nvgpu_flcn_enable_irq(struct nvgpu_falcon *flcn, bool enable);
|
||||
void nvgpu_flcn_fbif_transcfg(struct nvgpu_falcon *flcn);
|
||||
bool nvgpu_flcn_get_unit_status(struct nvgpu_falcon *flcn,
|
||||
enum flcn_unit_status unit_id);
|
||||
void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable,
|
||||
u32 intr_mask, u32 intr_dest);
|
||||
bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn);
|
||||
bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn);
|
||||
bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn);
|
||||
int nvgpu_flcn_copy_from_mem(struct nvgpu_falcon *flcn,
|
||||
enum flcn_mem_type mem_type, u32 src, u8 *dst, u32 size, u8 port);
|
||||
int nvgpu_flcn_copy_to_mem(struct nvgpu_falcon *flcn,
|
||||
|
||||
Reference in New Issue
Block a user