gpu: nvgpu: gr: basic falcon hal functions

Created gr falcon hal unit with moving following hal functions
from gr to gr falcon:
u32 (*fecs_base_addr)(void);
u32 (*gpccs_base_addr)(void);
void (*dump_stats)(struct gk20a *g);
u32 (*fecs_ctxsw_mailbox_size)(void);
u32 (*get_fecs_ctx_state_store_major_rev_id)(struct gk20a *g);

Modified chip hals to populate these new functions and related code
now refers to gr falcon hals.

Modified kernel headers to have following defs for
fecs/gpccs base address in gm20b/gp10b/gv11b/tu104:
static inline u32 gr_fecs_irqsset_r(void);
static inline u32 gr_gpcs_gpccs_irqsset_r(void);

Created base gm20b hals for fecs/gpccs_base_addr and
removed redundant gp106 related hals.

JIRA NVGPU-1881

Change-Id: I16e820cc1c89223f57988f1e5723fd8fdcbfe89d
Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2081245
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seshendra Gadagottu
2019-03-25 14:56:09 -07:00
committed by mobile promotions
parent e4313b3a15
commit b82f2075ae
25 changed files with 212 additions and 94 deletions

View File

@@ -200,6 +200,7 @@ nvgpu-y += \
hal/therm/therm_gp10b.o \
hal/therm/therm_gp106.o \
hal/therm/therm_gv11b.o \
hal/gr/falcon/gr_falcon_gm20b.o \
hal/ltc/ltc_gm20b.o \
hal/ltc/ltc_gp10b.o \
hal/ltc/ltc_gv11b.o \

View File

@@ -335,6 +335,7 @@ srcs += common/sim.c \
hal/gr/intr/gr_intr_gv11b.c \
hal/gr/intr/gr_intr_tu104.c \
hal/gr/hwpm_map/hwpm_map_gv100.c \
hal/gr/falcon/gr_falcon_gm20b.c \
hal/gr/zbc/zbc_gm20b.c \
hal/gr/zbc/zbc_gp10b.c \
hal/gr/zbc/zbc_gv11b.c \

View File

@@ -56,12 +56,12 @@ void gk20a_falcon_sw_init(struct nvgpu_falcon *flcn)
flcn->is_interrupt_enabled = true;
break;
case FALCON_ID_FECS:
flcn->flcn_base = g->ops.gr.fecs_falcon_base_addr();
flcn->flcn_base = g->ops.gr.falcon.fecs_base_addr();
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
case FALCON_ID_GPCCS:
flcn->flcn_base = g->ops.gr.gpccs_falcon_base_addr();
flcn->flcn_base = g->ops.gr.falcon.gpccs_base_addr();
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;

View File

@@ -62,12 +62,12 @@ void gp106_falcon_sw_init(struct nvgpu_falcon *flcn)
flcn->is_interrupt_enabled = false;
break;
case FALCON_ID_FECS:
flcn->flcn_base = g->ops.gr.fecs_falcon_base_addr();
flcn->flcn_base = g->ops.gr.falcon.fecs_base_addr();
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
case FALCON_ID_GPCCS:
flcn->flcn_base = g->ops.gr.gpccs_falcon_base_addr();
flcn->flcn_base = g->ops.gr.falcon.gpccs_base_addr();
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;

View File

@@ -126,7 +126,8 @@ static int nvgpu_netlist_init_ctx_vars_fw(struct gk20a *g)
} else {
net = NETLIST_SLOT_A;
max = MAX_NETLIST;
major_v_hw = g->ops.gr.get_fecs_ctx_state_store_major_rev_id(g);
major_v_hw =
g->ops.gr.falcon.get_fecs_ctx_state_store_major_rev_id(g);
netlist_vars->dynamic = true;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -52,5 +52,5 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
nvgpu_err(g, "elpg state: %d", pmu->pmu_pg.elpg_stat);
/* PMU may crash due to FECS crash. Dump FECS status */
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
}

View File

@@ -654,7 +654,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
mmfault_info.access_type, mmfault_info.inst_ptr);
if (ctxsw) {
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
nvgpu_err(g, " gr_status_r: 0x%x",
gk20a_readl(g, gr_status_r()));
}

View File

@@ -183,19 +183,6 @@ static void gr_report_ctxsw_error(struct gk20a *g, u32 err_type, u32 chid,
}
}
void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
{
unsigned int i;
nvgpu_falcon_dump_stats(&g->fecs_flcn);
for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
}
}
static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
{
u32 i, ucode_u32_size;
@@ -430,14 +417,14 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
nvgpu_err(g,
"timeout waiting on mailbox=%d value=0x%08x",
mailbox_id, reg);
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
gk20a_gr_debug_dump(g);
return -1;
} else if (check == WAIT_UCODE_ERROR) {
nvgpu_err(g,
"ucode method failed on mailbox=%d value=0x%08x",
mailbox_id, reg);
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
return -1;
}
@@ -2663,7 +2650,7 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
/* currently, recovery is not initiated */
nvgpu_err(g, "fecs watchdog triggered for channel %u, "
"cannot ctxsw anymore !!", chid);
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
} else if ((gr_fecs_intr &
gr_fecs_host_int_status_ctxsw_intr_f(CTXSW_INTR0)) != 0U) {
u32 mailbox_value = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(6));
@@ -2712,7 +2699,7 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch,
nvgpu_err(g,
"unhandled fecs error interrupt 0x%08x for channel %u",
gr_fecs_intr, chid);
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
}
gk20a_writel(g, gr_fecs_host_int_clear_r(), gr_fecs_intr);
@@ -5860,18 +5847,3 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g)
return global_esr_mask;
}
u32 gk20a_gr_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g)
{
return nvgpu_readl(g, gr_fecs_ctx_state_store_major_rev_id_r());
}
u32 gr_gk20a_fecs_falcon_base_addr(void)
{
return gr_fecs_irqsset_r();
}
u32 gr_gk20a_gpccs_falcon_base_addr(void)
{
return gr_gpcs_gpccs_irqsset_r();
}

View File

@@ -210,8 +210,6 @@ struct gr_gk20a {
u32 max_ctxsw_ring_buffer_size;
};
void gk20a_fecs_dump_falcon_stats(struct gk20a *g);
struct gk20a_ctxsw_ucode_segment {
u32 offset;
u32 size;
@@ -491,7 +489,4 @@ void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
struct gr_ctx_buffer_desc *desc, size_t size);
u32 gk20a_gr_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g);
u32 gr_gk20a_fecs_falcon_base_addr(void);
u32 gr_gk20a_gpccs_falcon_base_addr(void);
#endif /*__GR_GK20A_H__*/

View File

@@ -58,6 +58,7 @@
#include "hal/fifo/ctxsw_timeout_gk20a.h"
#include "hal/gr/zbc/zbc_gm20b.h"
#include "hal/gr/zcull/zcull_gm20b.h"
#include "hal/gr/falcon/gr_falcon_gm20b.h"
#include "hal/gr/init/gr_init_gm20b.h"
#include "hal/gr/intr/gr_intr_gm20b.h"
#include "hal/gr/config/gr_config_gm20b.h"
@@ -251,8 +252,6 @@ static const struct gpu_ops gm20b_ops = {
.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
.set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask,
.fecs_falcon_base_addr = gr_gk20a_fecs_falcon_base_addr,
.gpccs_falcon_base_addr = gr_gk20a_gpccs_falcon_base_addr,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gm20b_set_gpc_tpc_mask,
@@ -313,15 +312,11 @@ static const struct gpu_ops gm20b_ops = {
.decode_priv_addr = gr_gk20a_decode_priv_addr,
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
.fecs_ctxsw_mailbox_size = gr_fecs_ctxsw_mailbox__size_1_v,
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
.get_fecs_ctx_state_store_major_rev_id =
gk20a_gr_get_fecs_ctx_state_store_major_rev_id,
.init_gfxp_rtv_cb = NULL,
.log_mme_exception = NULL,
.halt_pipe = gr_gk20a_halt_pipe,
@@ -479,6 +474,15 @@ static const struct gpu_ops gm20b_ops = {
gm20b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gm20b_gr_intr_enable_exceptions,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,
.gpccs_base_addr = gm20b_gr_falcon_gpccs_base_addr,
.dump_stats = gm20b_gr_falcon_fecs_dump_stats,
.fecs_ctxsw_mailbox_size =
gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size,
.get_fecs_ctx_state_store_major_rev_id =
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id,
},
},
.fb = {
.init_hw = gm20b_fb_init_hw,

View File

@@ -224,13 +224,3 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
fail:
return err;
}
u32 gr_gp106_fecs_falcon_base_addr(void)
{
return gr_fecs_irqsset_r();
}
u32 gr_gp106_gpccs_falcon_base_addr(void)
{
return gr_gpcs_gpccs_irqsset_r();
}

View File

@@ -1,7 +1,7 @@
/*
* GP106 GPU GR
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -39,7 +39,5 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
struct vm_gk20a *vm, u32 class,
u32 graphics_preempt_mode,
u32 compute_preempt_mode);
u32 gr_gp106_fecs_falcon_base_addr(void);
u32 gr_gp106_gpccs_falcon_base_addr(void);
#endif /* NVGPU_GR_GP106_H */

View File

@@ -70,6 +70,7 @@
#include "hal/gr/config/gr_config_gm20b.h"
#include "hal/gr/zbc/zbc_gp10b.h"
#include "hal/gr/zcull/zcull_gm20b.h"
#include "hal/gr/falcon/gr_falcon_gm20b.h"
#include "hal/gr/init/gr_init_gm20b.h"
#include "hal/gr/init/gr_init_gp10b.h"
#include "hal/gr/intr/gr_intr_gm20b.h"
@@ -275,8 +276,6 @@ static const struct gpu_ops gp10b_ops = {
.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs,
.set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask,
.fecs_falcon_base_addr = gr_gk20a_fecs_falcon_base_addr,
.gpccs_falcon_base_addr = gr_gk20a_gpccs_falcon_base_addr,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gp10b_set_gpc_tpc_mask,
@@ -347,15 +346,11 @@ static const struct gpu_ops gp10b_ops = {
.decode_priv_addr = gr_gk20a_decode_priv_addr,
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
.fecs_ctxsw_mailbox_size = gr_fecs_ctxsw_mailbox__size_1_v,
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
.get_fecs_ctx_state_store_major_rev_id =
gk20a_gr_get_fecs_ctx_state_store_major_rev_id,
.init_gfxp_rtv_cb = NULL,
.log_mme_exception = NULL,
.get_ctx_spill_size = gp10b_gr_get_ctx_spill_size,
@@ -556,6 +551,15 @@ static const struct gpu_ops gp10b_ops = {
gm20b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gm20b_gr_intr_enable_exceptions,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,
.gpccs_base_addr = gm20b_gr_falcon_gpccs_base_addr,
.dump_stats = gm20b_gr_falcon_fecs_dump_stats,
.fecs_ctxsw_mailbox_size =
gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size,
.get_fecs_ctx_state_store_major_rev_id =
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id,
},
},
.fb = {
.init_hw = gm20b_fb_init_hw,

View File

@@ -70,6 +70,7 @@
#include "hal/gr/intr/gr_intr_gv11b.h"
#include "hal/gr/zcull/zcull_gm20b.h"
#include "hal/gr/zcull/zcull_gv11b.h"
#include "hal/gr/falcon/gr_falcon_gm20b.h"
#include "hal/gr/hwpm_map/hwpm_map_gv100.h"
#include "hal/falcon/falcon_gk20a.h"
#include "hal/gsp/gsp_gv100.h"
@@ -387,8 +388,6 @@ static const struct gpu_ops gv100_ops = {
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
.set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
.fecs_falcon_base_addr = gr_gp106_fecs_falcon_base_addr,
.gpccs_falcon_base_addr = gr_gp106_gpccs_falcon_base_addr,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask,
@@ -473,16 +472,12 @@ static const struct gpu_ops gv100_ops = {
.decode_priv_addr = gr_gv11b_decode_priv_addr,
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
.split_fbpa_broadcast_addr = gr_gv100_split_fbpa_broadcast_addr,
.fecs_ctxsw_mailbox_size = gr_fecs_ctxsw_mailbox__size_1_v,
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
.get_fecs_ctx_state_store_major_rev_id =
gk20a_gr_get_fecs_ctx_state_store_major_rev_id,
.init_gfxp_rtv_cb = NULL,
.log_mme_exception = NULL,
.get_ctx_spill_size = gp10b_gr_get_ctx_spill_size,
@@ -699,6 +694,15 @@ static const struct gpu_ops gv100_ops = {
gv11b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gv11b_gr_intr_enable_exceptions,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,
.gpccs_base_addr = gm20b_gr_falcon_gpccs_base_addr,
.dump_stats = gm20b_gr_falcon_fecs_dump_stats,
.fecs_ctxsw_mailbox_size =
gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size,
.get_fecs_ctx_state_store_major_rev_id =
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id,
},
},
.fb = {
.init_hw = gv11b_fb_init_hw,

View File

@@ -60,6 +60,7 @@
#include "hal/fifo/ctxsw_timeout_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/fecs_trace/fecs_trace_gv11b.h"
#include "hal/gr/falcon/gr_falcon_gm20b.h"
#include "hal/gr/config/gr_config_gm20b.h"
#include "hal/gr/zbc/zbc_gp10b.h"
#include "hal/gr/zbc/zbc_gv11b.h"
@@ -338,8 +339,6 @@ static const struct gpu_ops gv11b_ops = {
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
.set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
.fecs_falcon_base_addr = gr_gk20a_fecs_falcon_base_addr,
.gpccs_falcon_base_addr = gr_gk20a_gpccs_falcon_base_addr,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask,
@@ -433,16 +432,12 @@ static const struct gpu_ops gv11b_ops = {
.decode_priv_addr = gr_gv11b_decode_priv_addr,
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
.fecs_ctxsw_mailbox_size = gr_fecs_ctxsw_mailbox__size_1_v,
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
.get_offset_in_gpccs_segment =
gr_gk20a_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
.get_fecs_ctx_state_store_major_rev_id =
gk20a_gr_get_fecs_ctx_state_store_major_rev_id,
.init_gfxp_rtv_cb = NULL,
.log_mme_exception = NULL,
.get_ctx_spill_size = gv11b_gr_get_ctx_spill_size,
@@ -659,6 +654,15 @@ static const struct gpu_ops gv11b_ops = {
gv11b_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gv11b_gr_intr_enable_exceptions,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,
.gpccs_base_addr = gm20b_gr_falcon_gpccs_base_addr,
.dump_stats = gm20b_gr_falcon_fecs_dump_stats,
.fecs_ctxsw_mailbox_size =
gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size,
.get_fecs_ctx_state_store_major_rev_id =
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id,
},
},
.fb = {
.init_hw = gv11b_fb_init_hw,

View File

@@ -172,7 +172,7 @@ void gk20a_fifo_intr_handle_chsw_error(struct gk20a *g)
nvgpu_report_host_error(g, 0,
GPU_HOST_PFIFO_CHSW_ERROR, intr);
nvgpu_err(g, "chsw: %08x", intr);
g->ops.gr.dump_gr_falcon_stats(g);
g->ops.gr.falcon.dump_stats(g);
nvgpu_writel(g, fifo_intr_chsw_error_r(), intr);
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/io.h>
#include <nvgpu/debug.h>
#include "gr_falcon_gm20b.h"
#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
u32 gm20b_gr_falcon_fecs_base_addr(void)
{
return gr_fecs_irqsset_r();
}
u32 gm20b_gr_falcon_gpccs_base_addr(void)
{
return gr_gpcs_gpccs_irqsset_r();
}
void gm20b_gr_falcon_fecs_dump_stats(struct gk20a *g)
{
unsigned int i;
nvgpu_falcon_dump_stats(&g->fecs_flcn);
for (i = 0; i < g->ops.gr.falcon.fecs_ctxsw_mailbox_size(); i++) {
nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
i, nvgpu_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
}
}
u32 gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g)
{
return nvgpu_readl(g, gr_fecs_ctx_state_store_major_rev_id_r());
}
u32 gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size(void)
{
return gr_fecs_ctxsw_mailbox__size_1_v();
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_GR_FALCON_GM20B_H
#define NVGPU_GR_FALCON_GM20B_H
#include <nvgpu/types.h>
struct gk20a;
u32 gm20b_gr_falcon_fecs_base_addr(void);
u32 gm20b_gr_falcon_gpccs_base_addr(void);
void gm20b_gr_falcon_fecs_dump_stats(struct gk20a *g);
u32 gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id(struct gk20a *g);
u32 gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size(void);
#endif /* NVGPU_GR_FALCON_GM20B_H */

View File

@@ -453,7 +453,6 @@ struct gpu_ops {
u32 num_fbpas,
u32 *priv_addr_table,
u32 *priv_addr_table_index);
u32 (*fecs_ctxsw_mailbox_size)(void);
int (*init_sw_bundle64)(struct gk20a *g);
int (*alloc_global_ctx_buffers)(struct gk20a *g);
int (*commit_global_ctx_buffers)(struct gk20a *g,
@@ -464,8 +463,6 @@ struct gpu_ops {
u32 num_ppcs, u32 reg_list_ppc_count,
u32 *__offset_in_segment);
void (*set_debug_mode)(struct gk20a *g, bool enable);
void (*dump_gr_falcon_stats)(struct gk20a *g);
u32 (*get_fecs_ctx_state_store_major_rev_id)(struct gk20a *g);
int (*init_gfxp_rtv_cb)(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm);
void (*log_mme_exception)(struct gk20a *g);
@@ -576,6 +573,14 @@ struct gpu_ops {
int (*init_sm_id_table)(struct nvgpu_gr_config *gr_config);
} config;
struct {
u32 (*fecs_base_addr)(void);
u32 (*gpccs_base_addr)(void);
void (*dump_stats)(struct gk20a *g);
u32 (*fecs_ctxsw_mailbox_size)(void);
u32 (*get_fecs_ctx_state_store_major_rev_id)
(struct gk20a *g);
} falcon;
#ifdef CONFIG_GK20A_CTXSW_TRACE
struct {
int (*init)(struct gk20a *g);
@@ -740,8 +745,6 @@ struct gpu_ops {
} intr;
u32 (*get_ctxsw_checksum_mismatch_mailbox_val)(void);
u32 (*fecs_falcon_base_addr)(void);
u32 (*gpccs_falcon_base_addr)(void);
struct {
int (*report_ecc_parity_err)(struct gk20a *g,

View File

@@ -854,10 +854,18 @@ static inline u32 gr_fecs_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 gr_fecs_irqsset_r(void)
{
return 0x00409000U;
}
static inline u32 gr_fecs_falcon_hwcfg_r(void)
{
return 0x00409108U;
}
static inline u32 gr_gpcs_gpccs_irqsset_r(void)
{
return 0x0041a000U;
}
static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
{
return 0x0041a108U;

View File

@@ -1034,10 +1034,18 @@ static inline u32 gr_fecs_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 gr_fecs_irqsset_r(void)
{
return 0x00409000U;
}
static inline u32 gr_fecs_falcon_hwcfg_r(void)
{
return 0x00409108U;
}
static inline u32 gr_gpcs_gpccs_irqsset_r(void)
{
return 0x0041a000U;
}
static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
{
return 0x0041a108U;

View File

@@ -1298,10 +1298,18 @@ static inline u32 gr_fecs_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 gr_fecs_irqsset_r(void)
{
return 0x00409000U;
}
static inline u32 gr_fecs_falcon_hwcfg_r(void)
{
return 0x00409108U;
}
static inline u32 gr_gpcs_gpccs_irqsset_r(void)
{
return 0x0041a000U;
}
static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
{
return 0x0041a108U;

View File

@@ -1870,10 +1870,18 @@ static inline u32 gr_fecs_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 gr_fecs_irqsset_r(void)
{
return 0x00409000U;
}
static inline u32 gr_fecs_falcon_hwcfg_r(void)
{
return 0x00409108U;
}
static inline u32 gr_gpcs_gpccs_irqsset_r(void)
{
return 0x0041a000U;
}
static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
{
return 0x0041a108U;

View File

@@ -1354,10 +1354,18 @@ static inline u32 gr_fecs_bootvec_vec_f(u32 v)
{
return (v & 0xffffffffU) << 0U;
}
static inline u32 gr_fecs_irqsset_r(void)
{
return 0x00409000U;
}
static inline u32 gr_fecs_falcon_hwcfg_r(void)
{
return 0x00409108U;
}
static inline u32 gr_gpcs_gpccs_irqsset_r(void)
{
return 0x0041a000U;
}
static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
{
return 0x0041a108U;

View File

@@ -62,6 +62,7 @@
#include "hal/fifo/ctxsw_timeout_gv11b.h"
#include "hal/gr/fecs_trace/fecs_trace_gm20b.h"
#include "hal/gr/fecs_trace/fecs_trace_gv11b.h"
#include "hal/gr/falcon/gr_falcon_gm20b.h"
#include "hal/gr/config/gr_config_gm20b.h"
#include "hal/gr/config/gr_config_gv100.h"
#include "hal/gr/zbc/zbc_gp10b.h"
@@ -407,8 +408,6 @@ static const struct gpu_ops tu104_ops = {
.get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
.get_sm_dsm_perf_ctrl_regs = gr_tu104_get_sm_dsm_perf_ctrl_regs,
.set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
.fecs_falcon_base_addr = gr_gp106_fecs_falcon_base_addr,
.gpccs_falcon_base_addr = gr_gp106_gpccs_falcon_base_addr,
.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode,
.set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask,
@@ -502,16 +501,12 @@ static const struct gpu_ops tu104_ops = {
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
.split_fbpa_broadcast_addr = gr_gv100_split_fbpa_broadcast_addr,
.init_sw_bundle64 = gr_tu104_init_sw_bundle64,
.fecs_ctxsw_mailbox_size = gr_fecs_ctxsw_mailbox__size_1_v,
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
.get_offset_in_gpccs_segment =
gr_tu104_get_offset_in_gpccs_segment,
.set_debug_mode = gm20b_gr_set_debug_mode,
.dump_gr_falcon_stats = gk20a_fecs_dump_falcon_stats,
.get_fecs_ctx_state_store_major_rev_id =
gk20a_gr_get_fecs_ctx_state_store_major_rev_id,
.log_mme_exception = gr_tu104_log_mme_exception,
.get_ctx_spill_size = gp10b_gr_get_ctx_spill_size,
.get_ctx_pagepool_size = gp10b_gr_get_ctx_pagepool_size,
@@ -731,6 +726,15 @@ static const struct gpu_ops tu104_ops = {
tu104_gr_intr_enable_gpc_exceptions,
.enable_exceptions = gv11b_gr_intr_enable_exceptions,
},
.falcon = {
.fecs_base_addr = gm20b_gr_falcon_fecs_base_addr,
.gpccs_base_addr = gm20b_gr_falcon_gpccs_base_addr,
.dump_stats = gm20b_gr_falcon_fecs_dump_stats,
.fecs_ctxsw_mailbox_size =
gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size,
.get_fecs_ctx_state_store_major_rev_id =
gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id,
},
},
.fb = {
.init_hw = gv11b_fb_init_hw,