gpu: nvgpu: make flcn read/write non chip specific

Current falcon type agnostic readl/writel has the
name gk20a_falcon_read/writel and is static.
This change will:
* rename it as nvgpu_falcon_read/writel
* make it non static.
* replace corresponding usage.

JIRA NVGPU-5736

Change-Id: I825c55a1f7eb95d54584f20070984ddefa607fa1
Signed-off-by: smadhavan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2421149
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
smadhavan
2020-09-29 12:43:50 +05:30
committed by Alex Waterman
parent c0b9ae2f17
commit 1a6a819709
3 changed files with 115 additions and 84 deletions

View File

@@ -22,6 +22,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/timers.h>
#include <nvgpu/falcon.h>
#include <nvgpu/io.h>
#include <nvgpu/static_analysis.h>
#include "falcon_sw_gk20a.h"
@@ -47,6 +48,18 @@ static bool is_falcon_valid(struct nvgpu_falcon *flcn)
return true;
}
u32 nvgpu_falcon_readl(struct nvgpu_falcon *flcn, u32 offset)
{
return nvgpu_readl(flcn->g,
nvgpu_safe_add_u32(flcn->flcn_base, offset));
}
void nvgpu_falcon_writel(struct nvgpu_falcon *flcn,
u32 offset, u32 val)
{
nvgpu_writel(flcn->g, nvgpu_safe_add_u32(flcn->flcn_base, offset), val);
}
int nvgpu_falcon_reset(struct nvgpu_falcon *flcn)
{
struct gk20a *g;

View File

@@ -19,7 +19,6 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/falcon.h>
#include <nvgpu/string.h>
@@ -39,31 +38,19 @@ u32 gk20a_falcon_imemc_blk_field(u32 blk)
return falcon_falcon_imemc_blk_f(blk);
}
static inline u32 gk20a_falcon_readl(struct nvgpu_falcon *flcn, u32 offset)
{
return nvgpu_readl(flcn->g,
nvgpu_safe_add_u32(flcn->flcn_base, offset));
}
static inline void gk20a_falcon_writel(struct nvgpu_falcon *flcn,
u32 offset, u32 val)
{
nvgpu_writel(flcn->g, nvgpu_safe_add_u32(flcn->flcn_base, offset), val);
}
void gk20a_falcon_reset(struct nvgpu_falcon *flcn)
{
u32 unit_status = 0U;
/* do falcon CPU hard reset */
unit_status = gk20a_falcon_readl(flcn, falcon_falcon_cpuctl_r());
gk20a_falcon_writel(flcn, falcon_falcon_cpuctl_r(),
unit_status = nvgpu_falcon_readl(flcn, falcon_falcon_cpuctl_r());
nvgpu_falcon_writel(flcn, falcon_falcon_cpuctl_r(),
(unit_status | falcon_falcon_cpuctl_hreset_f(1)));
}
bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn)
{
return ((gk20a_falcon_readl(flcn, falcon_falcon_cpuctl_r()) &
return ((nvgpu_falcon_readl(flcn, falcon_falcon_cpuctl_r()) &
falcon_falcon_cpuctl_halt_intr_m()) != 0U);
}
@@ -72,7 +59,7 @@ bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
u32 unit_status = 0U;
bool status = false;
unit_status = gk20a_falcon_readl(flcn, falcon_falcon_idlestate_r());
unit_status = nvgpu_falcon_readl(flcn, falcon_falcon_idlestate_r());
if ((falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0U) &&
(falcon_falcon_idlestate_ext_busy_v(unit_status) == 0U)) {
@@ -89,7 +76,7 @@ bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
u32 unit_status = 0U;
bool status = false;
unit_status = gk20a_falcon_readl(flcn, falcon_falcon_dmactl_r());
unit_status = nvgpu_falcon_readl(flcn, falcon_falcon_dmactl_r());
if ((unit_status &
(falcon_falcon_dmactl_dmem_scrubbing_m() |
@@ -108,7 +95,7 @@ u32 gk20a_falcon_get_mem_size(struct nvgpu_falcon *flcn,
u32 mem_size = 0U;
u32 hwcfg_val = 0U;
hwcfg_val = gk20a_falcon_readl(flcn, falcon_falcon_hwcfg_r());
hwcfg_val = nvgpu_falcon_readl(flcn, falcon_falcon_hwcfg_r());
if (mem_type == MEM_DMEM) {
mem_size = falcon_falcon_hwcfg_dmem_size_v(hwcfg_val)
@@ -127,7 +114,7 @@ u8 gk20a_falcon_get_ports_count(struct nvgpu_falcon *flcn,
u8 ports = 0U;
u32 hwcfg1_val = 0U;
hwcfg1_val = gk20a_falcon_readl(flcn, falcon_falcon_hwcfg1_r());
hwcfg1_val = nvgpu_falcon_readl(flcn, falcon_falcon_hwcfg1_r());
if (mem_type == MEM_DMEM) {
ports = (u8) falcon_falcon_hwcfg1_dmem_ports_v(hwcfg1_val);
@@ -154,7 +141,7 @@ static void falcon_copy_to_dmem_unaligned_src(struct nvgpu_falcon *flcn,
nvgpu_memcpy((u8 *)&src_tmp[0], &src[offset],
sizeof(src_tmp));
for (i = 0; i < ARRAY_SIZE(src_tmp); i++) {
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_dmemd_r(port),
src_tmp[i]);
}
@@ -170,7 +157,7 @@ static void falcon_copy_to_dmem_unaligned_src(struct nvgpu_falcon *flcn,
nvgpu_memcpy((u8 *)&src_tmp[0], &src[offset],
(u64)elems * elem_size);
for (i = 0; i < elems; i++) {
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_dmemd_r(port),
src_tmp[i]);
}
@@ -195,7 +182,7 @@ int gk20a_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
dst &= addr_mask;
gk20a_falcon_writel(flcn, falcon_falcon_dmemc_r(port),
nvgpu_falcon_writel(flcn, falcon_falcon_dmemc_r(port),
dst | falcon_falcon_dmemc_aincw_f(1));
if (likely(nvgpu_mem_is_word_aligned(flcn->g, src))) {
@@ -203,7 +190,7 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415")
src_u32 = (u32 *)src;
for (i = 0; i < words; i++) {
gk20a_falcon_writel(flcn, falcon_falcon_dmemd_r(port),
nvgpu_falcon_writel(flcn, falcon_falcon_dmemd_r(port),
src_u32[i]);
}
} else {
@@ -213,11 +200,11 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415")
if (bytes > 0U) {
data = 0;
nvgpu_memcpy((u8 *)&data, &src[words << 2U], bytes);
gk20a_falcon_writel(flcn, falcon_falcon_dmemd_r(port), data);
nvgpu_falcon_writel(flcn, falcon_falcon_dmemd_r(port), data);
}
size = ALIGN(size, 4U);
data = gk20a_falcon_readl(flcn, falcon_falcon_dmemc_r(port)) &
data = nvgpu_falcon_readl(flcn, falcon_falcon_dmemc_r(port)) &
addr_mask;
if (data != (nvgpu_safe_add_u32(dst, size) & addr_mask)) {
nvgpu_warn(flcn->g, "copy failed. bytes written %d, expected %d",
@@ -246,12 +233,12 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
for (i = 0; i < ARRAY_SIZE(src_tmp); i++) {
if ((j++ % 64U) == 0U) {
/* tag is always 256B aligned */
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemt_r(port), tag);
tag = nvgpu_safe_add_u32(tag, 1U);
}
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemd_r(port),
src_tmp[i]);
}
@@ -269,12 +256,12 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
for (i = 0; i < elems; i++) {
if ((j++ % 64U) == 0U) {
/* tag is always 256B aligned */
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemt_r(port), tag);
tag = nvgpu_safe_add_u32(tag, 1U);
}
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemd_r(port),
src_tmp[i]);
}
@@ -282,7 +269,7 @@ static void falcon_copy_to_imem_unaligned_src(struct nvgpu_falcon *flcn,
/* WARNING : setting remaining bytes in block to 0x0 */
while ((j % 64U) != 0U) {
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemd_r(port), 0);
j++;
}
@@ -305,7 +292,7 @@ int gk20a_falcon_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
nvgpu_log_info(flcn->g, "upload %d words to 0x%x block %d, tag 0x%x",
words, dst, blk, tag);
gk20a_falcon_writel(flcn, falcon_falcon_imemc_r(port),
nvgpu_falcon_writel(flcn, falcon_falcon_imemc_r(port),
falcon_falcon_imemc_offs_f(dst >> 2) |
g->ops.falcon.imemc_blk_field(blk) |
/* Set Auto-Increment on write */
@@ -319,18 +306,18 @@ NVGPU_COV_WHITELIST(deviate, NVGPU_MISRA(Rule, 11_3), "TID-415")
for (i = 0U; i < words; i++) {
if ((i % 64U) == 0U) {
/* tag is always 256B aligned */
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemt_r(port), tag);
tag = nvgpu_safe_add_u32(tag, 1U);
}
gk20a_falcon_writel(flcn, falcon_falcon_imemd_r(port),
nvgpu_falcon_writel(flcn, falcon_falcon_imemd_r(port),
src_u32[i]);
}
/* WARNING : setting remaining bytes in block to 0x0 */
while ((i % 64U) != 0U) {
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_imemd_r(port), 0);
i++;
}
@@ -347,20 +334,20 @@ void gk20a_falcon_bootstrap(struct nvgpu_falcon *flcn,
{
nvgpu_log_info(flcn->g, "boot vec 0x%x", boot_vector);
gk20a_falcon_writel(flcn, falcon_falcon_dmactl_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_dmactl_r(),
falcon_falcon_dmactl_require_ctx_f(0));
gk20a_falcon_writel(flcn, falcon_falcon_bootvec_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_bootvec_r(),
falcon_falcon_bootvec_vec_f(boot_vector));
gk20a_falcon_writel(flcn, falcon_falcon_cpuctl_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_cpuctl_r(),
falcon_falcon_cpuctl_startcpu_f(1));
}
u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
u32 mailbox_index)
{
return gk20a_falcon_readl(flcn, (mailbox_index != 0U) ?
return nvgpu_falcon_readl(flcn, (mailbox_index != 0U) ?
falcon_falcon_mailbox1_r() :
falcon_falcon_mailbox0_r());
}
@@ -368,7 +355,7 @@ u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn,
u32 mailbox_index, u32 data)
{
gk20a_falcon_writel(flcn, (mailbox_index != 0U) ?
nvgpu_falcon_writel(flcn, (mailbox_index != 0U) ?
falcon_falcon_mailbox1_r() :
falcon_falcon_mailbox0_r(), data);
}
@@ -377,10 +364,10 @@ void gk20a_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable,
u32 intr_mask, u32 intr_dest)
{
if (enable) {
gk20a_falcon_writel(flcn, falcon_falcon_irqmset_r(), intr_mask);
gk20a_falcon_writel(flcn, falcon_falcon_irqdest_r(), intr_dest);
nvgpu_falcon_writel(flcn, falcon_falcon_irqmset_r(), intr_mask);
nvgpu_falcon_writel(flcn, falcon_falcon_irqdest_r(), intr_dest);
} else {
gk20a_falcon_writel(flcn, falcon_falcon_irqmclr_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_irqmclr_r(),
0xffffffffU);
}
}
@@ -396,7 +383,7 @@ static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn)
g = flcn->g;
block_count = falcon_falcon_hwcfg_imem_size_v(
gk20a_falcon_readl(flcn,
nvgpu_falcon_readl(flcn,
falcon_falcon_hwcfg_r()));
/* block_count must be multiple of 8 */
@@ -406,11 +393,11 @@ static void gk20a_falcon_dump_imblk(struct nvgpu_falcon *flcn)
for (i = 0U; i < block_count; i += 8U) {
for (j = 0U; j < 8U; j++) {
gk20a_falcon_writel(flcn, falcon_falcon_imctl_debug_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_imctl_debug_r(),
falcon_falcon_imctl_debug_cmd_f(0x2) |
falcon_falcon_imctl_debug_addr_blk_f(i + j));
data[j] = gk20a_falcon_readl(flcn,
data[j] = nvgpu_falcon_readl(flcn,
falcon_falcon_imstat_r());
}
@@ -430,24 +417,24 @@ static void gk20a_falcon_dump_pc_trace(struct nvgpu_falcon *flcn)
g = flcn->g;
if ((gk20a_falcon_readl(flcn, falcon_falcon_sctl_r()) & 0x02U) != 0U) {
if ((nvgpu_falcon_readl(flcn, falcon_falcon_sctl_r()) & 0x02U) != 0U) {
nvgpu_err(g, " falcon is in HS mode, PC TRACE dump not supported");
return;
}
trace_pc_count = falcon_falcon_traceidx_maxidx_v(
gk20a_falcon_readl(flcn,
nvgpu_falcon_readl(flcn,
falcon_falcon_traceidx_r()));
nvgpu_err(g,
"PC TRACE (TOTAL %d ENTRIES. entry 0 is the most recent branch):",
trace_pc_count);
for (i = 0; i < trace_pc_count; i++) {
gk20a_falcon_writel(flcn, falcon_falcon_traceidx_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_traceidx_r(),
falcon_falcon_traceidx_idx_f(i));
pc = falcon_falcon_tracepc_pc_v(
gk20a_falcon_readl(flcn, falcon_falcon_tracepc_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_tracepc_r()));
nvgpu_err(g, "FALCON_TRACEPC(%d) : %#010x", i, pc);
}
}
@@ -470,92 +457,92 @@ void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn)
nvgpu_err(g, "FALCON ICD REGISTERS DUMP");
for (i = 0U; i < 4U; i++) {
gk20a_falcon_writel(flcn,
nvgpu_falcon_writel(flcn,
falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_PC));
nvgpu_err(g, "FALCON_REG_PC : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_SP));
nvgpu_err(g, "FALCON_REG_SP : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
}
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_IMB));
nvgpu_err(g, "FALCON_REG_IMB : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_DMB));
nvgpu_err(g, "FALCON_REG_DMB : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_CSW));
nvgpu_err(g, "FALCON_REG_CSW : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_CTX));
nvgpu_err(g, "FALCON_REG_CTX : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(FALCON_REG_EXCI));
nvgpu_err(g, "FALCON_REG_EXCI : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
for (i = 0U; i < 6U; i++) {
gk20a_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
nvgpu_falcon_writel(flcn, falcon_falcon_icd_cmd_r(),
falcon_falcon_icd_cmd_opc_rreg_f() |
falcon_falcon_icd_cmd_idx_f(
falcon_falcon_icd_cmd_opc_rstat_f()));
nvgpu_err(g, "FALCON_REG_RSTAT[%d] : 0x%x", i,
gk20a_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_icd_rdata_r()));
}
nvgpu_err(g, " FALCON REGISTERS DUMP");
nvgpu_err(g, "falcon_falcon_os_r : %d",
gk20a_falcon_readl(flcn, falcon_falcon_os_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_os_r()));
nvgpu_err(g, "falcon_falcon_cpuctl_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_cpuctl_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_cpuctl_r()));
nvgpu_err(g, "falcon_falcon_idlestate_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_idlestate_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_idlestate_r()));
nvgpu_err(g, "falcon_falcon_mailbox0_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_mailbox0_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_mailbox0_r()));
nvgpu_err(g, "falcon_falcon_mailbox1_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_mailbox1_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_mailbox1_r()));
nvgpu_err(g, "falcon_falcon_irqstat_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_irqstat_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_irqstat_r()));
nvgpu_err(g, "falcon_falcon_irqmode_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_irqmode_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_irqmode_r()));
nvgpu_err(g, "falcon_falcon_irqmask_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_irqmask_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_irqmask_r()));
nvgpu_err(g, "falcon_falcon_irqdest_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_irqdest_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_irqdest_r()));
nvgpu_err(g, "falcon_falcon_debug1_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_debug1_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_debug1_r()));
nvgpu_err(g, "falcon_falcon_debuginfo_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_debuginfo_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_debuginfo_r()));
nvgpu_err(g, "falcon_falcon_bootvec_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_bootvec_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_bootvec_r()));
nvgpu_err(g, "falcon_falcon_hwcfg_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_hwcfg_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_hwcfg_r()));
nvgpu_err(g, "falcon_falcon_engctl_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_engctl_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_engctl_r()));
nvgpu_err(g, "falcon_falcon_curctx_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_curctx_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_curctx_r()));
nvgpu_err(g, "falcon_falcon_nxtctx_r : 0x%x",
gk20a_falcon_readl(flcn, falcon_falcon_nxtctx_r()));
nvgpu_falcon_readl(flcn, falcon_falcon_nxtctx_r()));
/*
* Common Falcon code accesses each engine's falcon registers
* using engine's falcon base address + offset.
@@ -564,10 +551,10 @@ void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn)
* the mask 0xFFF
*/
nvgpu_err(g, "falcon_falcon_exterrstat_r : 0x%x",
gk20a_falcon_readl(flcn,
nvgpu_falcon_readl(flcn,
(falcon_falcon_exterrstat_r() & 0x0FFF)));
nvgpu_err(g, "falcon_falcon_exterraddr_r : 0x%x",
gk20a_falcon_readl(flcn,
nvgpu_falcon_readl(flcn,
(falcon_falcon_exterraddr_r() & 0x0FFF)));
}
#endif

View File

@@ -237,6 +237,37 @@ struct nvgpu_falcon {
struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops;
};
/**
* @brief Read the falcon register.
*
* @param flcn [in] The falcon.
* @param offset [in] offset of the register.
*
* This function is called to read a register with common falcon offset.
*
* Steps:
* - Read and return data from register at \a offset from the base of
* \a flcn.
*
* @return register data.
*/
u32 nvgpu_falcon_readl(struct nvgpu_falcon *flcn, u32 offset);
/**
* @brief Write the falcon register.
*
* @param flcn [in] The falcon.
* @param offset [in] Index of the register.
* @param data [in] Data to be written to the register.
*
* This function is called to write to a register with common falcon offset.
*
* Steps:
* - Write \a data to register at \a offet from the base of the \a flcn.
*/
void nvgpu_falcon_writel(struct nvgpu_falcon *flcn,
u32 offset, u32 val);
/**
* @brief Reset the falcon CPU or Engine.
*