gpu: nvgpu: enable Orin support in safety build

Most of the Orin chip specific code is compiled out of safety build
with CONFIG_NVGPU_NON_FUSA and CONFIG_NVGPU_HAL_NON_FUSA. Remove the
config protection from Orin/GA10B specific code. Currently all code
is enabled. Code not required in safety will be compiled out later
in separate activity.

Other noteworthy changes in this patch related to safety build:

- In ga10b_ce_request_idle(), add a log print to dump num_pce so that
  compiler does not complain about unused variable num_pce.
- In ga10b_fifo_ctxsw_timeout_isr(), protect variables active_eng_id and
  recover under CONFIG_NVGPU_KERNEL_MODE_SUBMIT to fix compilation
  errors of unused variables.
- Compile out HAL gops.pbdma.force_ce_split() from safety since this HAL
  is GA100 specific and not required for GA10B.
- Compile out gr_ga100_process_context_buffer_priv_segment() with
  CONFIG_NVGPU_DEBUGGER.
- Compile out VAB support with CONFIG_NVGPU_HAL_NON_FUSA.
- In ga10b_gr_intr_handle_sw_method(), protect left_shift_by_2 variable
  with appropriate configs to fix unused variable compilation error.
- In ga10b_intr_isr_stall_host2soc_3(), compile ELPG function calls
  with CONFIG_NVGPU_POWER_PG.
- In ga10b_pmu_handle_swgen1_irq(), move whole function body under
  CONFIG_NVGPU_FALCON_DEBUG to fix unused variable compilation errors.
- Add below TU104 specific files in safety build since some of the code
  in those files is required for GA10B. Unnecessary code will be
  compiled out later on.
	hal/gr/init/gr_init_tu104.c
	hal/class/class_tu104.c
	hal/mc/mc_tu104.c
	hal/fifo/usermode_tu104.c
	hal/gr/falcon/gr_falcon_tu104.c
- Compile out GA10B specific debugger/profiler related files from
  safety build.
- Disable CONFIG_NVGPU_FALCON_DEBUG from safety debug build temporarily
  to work around compilation errors seen with keeping this config
  enabled. Config will be re-enabled in safety debug build later.

Jira NVGPU-7276

Change-Id: I35f2489830ac083d52504ca411c3f1d96e72fc48
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2627048
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2021-11-15 17:58:21 +05:30
committed by mobile promotions
parent 71cd434f4f
commit 3d9c67a0e7
64 changed files with 198 additions and 309 deletions

View File

@@ -32,15 +32,11 @@
#include "acr_sw_gp10b.h"
#endif
#include "acr_sw_gv11b.h"
#ifdef CONFIG_NVGPU_DGPU
#include "acr_sw_tu104.h"
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
#include "acr_sw_ga10b.h"
#ifdef CONFIG_NVGPU_DGPU
#include "acr_sw_tu104.h"
#include "acr_sw_ga100.h"
#endif
#endif
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include <nvgpu_next_acr.h>
@@ -151,11 +147,9 @@ int nvgpu_acr_init(struct gk20a *g)
case NVGPU_GPUID_GV11B:
nvgpu_gv11b_acr_sw_init(g, g->acr);
break;
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_GPUID_GA10B:
nvgpu_ga10b_acr_sw_init(g, g->acr);
break;
#endif /* CONFIG_NVGPU_NON_FUSA */
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_TU104:
nvgpu_tu104_acr_sw_init(g, g->acr);

View File

@@ -105,7 +105,7 @@ int nvgpu_acr_lsf_pmu_ucode_details(struct gk20a *g, void *lsf_ucode_img)
exit:
return err;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
s32 nvgpu_acr_lsf_pmu_ncore_ucode_details(struct gk20a *g, void *lsf_ucode_img)
{
struct lsf_ucode_desc *lsf_desc = NULL;
@@ -163,7 +163,6 @@ exit:
return err;
}
#endif
#endif
int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
{
@@ -171,9 +170,7 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch,
g->params.gpu_impl);
struct lsf_ucode_desc *lsf_desc = NULL;
#if defined(CONFIG_NVGPU_NON_FUSA)
struct lsf_ucode_desc_wrapper *lsf_desc_wrapper = NULL;
#endif
struct nvgpu_firmware *fecs_sig = NULL;
struct flcn_ucode_img *p_img =
(struct flcn_ucode_img *)lsf_ucode_img;
@@ -187,7 +184,6 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_WARN);
break;
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_GPUID_GA10B:
if (!nvgpu_is_enabled(g, NVGPU_PKC_LS_SIG_ENABLED)) {
fecs_sig = nvgpu_request_firmware(g,
@@ -199,7 +195,6 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
NVGPU_REQUEST_FIRMWARE_NO_WARN);
}
break;
#endif
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_TU104:
fecs_sig = nvgpu_request_firmware(g, TU104_FECS_UCODE_SIG,
@@ -234,7 +229,6 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
min_t(size_t, sizeof(*lsf_desc), fecs_sig->size));
lsf_desc->falcon_id = FALCON_ID_FECS;
#if defined(CONFIG_NVGPU_NON_FUSA)
} else {
lsf_desc_wrapper =
nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_wrapper));
@@ -246,7 +240,6 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
min_t(size_t, sizeof(*lsf_desc_wrapper), fecs_sig->size));
lsf_desc_wrapper->lsf_ucode_desc_v2.falcon_id = FALCON_ID_FECS;
#endif
}
p_img->desc = nvgpu_kzalloc(g, sizeof(struct ls_falcon_ucode_desc));
@@ -286,11 +279,9 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
if (!nvgpu_is_enabled(g, NVGPU_PKC_LS_SIG_ENABLED)) {
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
#if defined(CONFIG_NVGPU_NON_FUSA)
} else {
p_img->lsf_desc_wrapper =
(struct lsf_ucode_desc_wrapper *)lsf_desc_wrapper;
#endif
}
nvgpu_acr_dbg(g, "fecs fw loaded\n");
@@ -301,10 +292,8 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
free_lsf_desc:
if (!nvgpu_is_enabled(g, NVGPU_PKC_LS_SIG_ENABLED)) {
nvgpu_kfree(g, lsf_desc);
#if defined(CONFIG_NVGPU_NON_FUSA)
} else {
nvgpu_kfree(g, lsf_desc_wrapper);
#endif
}
rel_sig:
nvgpu_release_firmware(g, fecs_sig);
@@ -316,9 +305,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
u32 tmp_size;
u32 ver = nvgpu_safe_add_u32(g->params.gpu_arch, g->params.gpu_impl);
struct lsf_ucode_desc *lsf_desc = NULL;
#if defined(CONFIG_NVGPU_NON_FUSA)
struct lsf_ucode_desc_wrapper *lsf_desc_wrapper = NULL;
#endif
struct nvgpu_firmware *gpccs_sig = NULL;
struct flcn_ucode_img *p_img =
(struct flcn_ucode_img *)lsf_ucode_img;
@@ -340,7 +327,6 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_WARN);
break;
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_GPUID_GA10B:
if (!nvgpu_is_enabled(g, NVGPU_PKC_LS_SIG_ENABLED)) {
gpccs_sig = nvgpu_request_firmware(g,
@@ -352,7 +338,6 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
NVGPU_REQUEST_FIRMWARE_NO_WARN);
}
break;
#endif
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_TU104:
gpccs_sig = nvgpu_request_firmware(g, TU104_GPCCS_UCODE_SIG,
@@ -387,7 +372,6 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
nvgpu_memcpy((u8 *)lsf_desc, gpccs_sig->data,
min_t(size_t, sizeof(*lsf_desc), gpccs_sig->size));
lsf_desc->falcon_id = FALCON_ID_GPCCS;
#if defined(CONFIG_NVGPU_NON_FUSA)
} else {
lsf_desc_wrapper =
nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_wrapper));
@@ -398,7 +382,6 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
nvgpu_memcpy((u8 *)lsf_desc_wrapper, gpccs_sig->data,
min_t(size_t, sizeof(*lsf_desc_wrapper), gpccs_sig->size));
lsf_desc_wrapper->lsf_ucode_desc_v2.falcon_id = FALCON_ID_GPCCS;
#endif
}
nvgpu_acr_dbg(g, "gpccs fw copied to desc buffer\n");

View File

@@ -87,10 +87,8 @@ struct flcn_ucode_img {
u32 data_size;
struct lsf_ucode_desc *lsf_desc;
bool is_next_core_img;
#if defined(CONFIG_NVGPU_NON_FUSA)
struct lsf_ucode_desc_wrapper *lsf_desc_wrapper;
struct falcon_next_core_ucode_desc *ndesc;
#endif
};
struct lsfm_managed_ucode_img {
@@ -144,10 +142,8 @@ struct ls_flcn_mgr {
int nvgpu_acr_prepare_ucode_blob(struct gk20a *g);
#ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_acr_lsf_pmu_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#if defined(CONFIG_NVGPU_NON_FUSA)
s32 nvgpu_acr_lsf_pmu_ncore_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#endif
#endif
int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img);
int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#ifdef CONFIG_NVGPU_DGPU

View File

@@ -31,10 +31,8 @@
#include <nvgpu/acr.h>
#include <nvgpu/bug.h>
#include <nvgpu/soc.h>
#if defined(CONFIG_NVGPU_FALCON_NON_FUSA) && defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include <nvgpu/riscv.h>
#include <nvgpu/io.h>
#endif
#include "acr_bootstrap.h"
#include "acr_priv.h"
@@ -266,7 +264,6 @@ err_free_ucode:
return err;
}
#if defined(CONFIG_NVGPU_FALCON_NON_FUSA) && defined(CONFIG_NVGPU_HAL_NON_FUSA)
#define RISCV_BR_COMPLETION_TIMEOUT_NON_SILICON_MS 10000 /*in msec */
#define RISCV_BR_COMPLETION_TIMEOUT_SILICON_MS 100 /*in msec */
@@ -409,4 +406,3 @@ exit:
return err;
}
#endif

View File

@@ -113,11 +113,9 @@ struct hs_acr {
struct flcn_acr_desc *acr_dmem_desc;
};
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_mem acr_falcon2_sysmem_desc;
struct flcn2_acr_desc acr_sysmem_desc;
struct nvgpu_mem ls_pmu_desc;
#endif
/* Falcon used to execute ACR ucode */
struct nvgpu_falcon *acr_flcn;
@@ -134,9 +132,6 @@ int nvgpu_acr_wait_for_completion(struct gk20a *g, struct hs_acr *acr_desc,
int nvgpu_acr_bootstrap_hs_ucode(struct gk20a *g, struct nvgpu_acr *acr,
struct hs_acr *acr_desc);
#if defined(CONFIG_NVGPU_FALCON_NON_FUSA) && defined(CONFIG_NVGPU_HAL_NON_FUSA)
int nvgpu_acr_bootstrap_hs_ucode_riscv(struct gk20a *g, struct nvgpu_acr *acr);
#endif
#endif /* ACR_BOOTSTRAP_H */

View File

@@ -195,7 +195,6 @@ void nvgpu_cic_mon_intr_stall_handle(struct gk20a *g)
(void)nvgpu_cic_rm_broadcast_last_irq_stall(g);
}
#ifdef CONFIG_NVGPU_NON_FUSA
void nvgpu_cic_mon_intr_enable(struct gk20a *g)
{
unsigned long flags = 0;
@@ -272,4 +271,3 @@ bool nvgpu_cic_mon_intr_get_unit_info(struct gk20a *g, u32 unit, u32 *subtree,
return true;
}
#endif

View File

@@ -29,10 +29,7 @@
#ifdef CONFIG_NVGPU_DGPU
#include "falcon_sw_tu104.h"
#endif
#ifdef CONFIG_NVGPU_NON_FUSA
#include "falcon_sw_ga10b.h"
#endif /* CONFIG_NVGPU_NON_FUSA */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include <nvgpu_next_falcon.h>
@@ -394,7 +391,6 @@ u32 nvgpu_falcon_get_id(struct nvgpu_falcon *flcn)
return flcn->flcn_id;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
bool nvgpu_falcon_is_falcon2_enabled(struct nvgpu_falcon *flcn)
{
return flcn->is_falcon2_enabled ? true : false;
@@ -405,7 +401,6 @@ bool nvgpu_falcon_is_feature_supported(struct nvgpu_falcon *flcn,
{
return nvgpu_test_bit(feature, (void *)&flcn->fuse_settings);
}
#endif
struct nvgpu_falcon *nvgpu_falcon_get_instance(struct gk20a *g, u32 flcn_id)
{
@@ -455,9 +450,6 @@ static int falcon_sw_chip_init(struct gk20a *g, struct nvgpu_falcon *flcn)
case NVGPU_GPUID_GP10B:
gk20a_falcon_sw_init(flcn);
break;
case NVGPU_GPUID_GA10B:
ga10b_falcon_sw_init(flcn);
break;
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_TU104:
case NVGPU_GPUID_GA100:
@@ -465,6 +457,9 @@ static int falcon_sw_chip_init(struct gk20a *g, struct nvgpu_falcon *flcn)
break;
#endif /* CONFIG_NVGPU_DGPU */
#endif /* CONFIG_NVGPU_NON_FUSA */
case NVGPU_GPUID_GA10B:
ga10b_falcon_sw_init(flcn);
break;
case NVGPU_GPUID_GV11B:
gk20a_falcon_sw_init(flcn);
break;
@@ -561,6 +556,18 @@ void nvgpu_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable,
g->ops.falcon.set_irq(flcn, enable, intr_mask, intr_dest);
}
int nvgpu_falcon_get_mem_size(struct nvgpu_falcon *flcn,
enum falcon_mem_type type, u32 *size)
{
if (!is_falcon_valid(flcn)) {
return -EINVAL;
}
*size = flcn->g->ops.falcon.get_mem_size(flcn, type);
return 0;
}
#ifdef CONFIG_NVGPU_DGPU
int nvgpu_falcon_copy_from_emem(struct nvgpu_falcon *flcn,
u32 src, u8 *dst, u32 size, u8 port)
@@ -642,18 +649,6 @@ int nvgpu_falcon_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
return 0;
}
int nvgpu_falcon_get_mem_size(struct nvgpu_falcon *flcn,
enum falcon_mem_type type, u32 *size)
{
if (!is_falcon_valid(flcn)) {
return -EINVAL;
}
*size = flcn->g->ops.falcon.get_mem_size(flcn, type);
return 0;
}
int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
unsigned int timeout)
{

View File

@@ -707,7 +707,6 @@ u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg)
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
int nvgpu_engine_init_one_dev_extra(struct gk20a *g,
const struct nvgpu_device *dev)
{
@@ -752,7 +751,6 @@ int nvgpu_engine_init_one_dev_extra(struct gk20a *g,
return 0;
}
#endif
static int nvgpu_engine_init_one_dev(struct nvgpu_fifo *f,
const struct nvgpu_device *dev)
@@ -781,7 +779,6 @@ static int nvgpu_engine_init_one_dev(struct nvgpu_fifo *f,
}
}
#if defined(CONFIG_NVGPU_NON_FUSA)
{
/*
* Fill Ampere+ device fields.
@@ -791,7 +788,6 @@ static int nvgpu_engine_init_one_dev(struct nvgpu_fifo *f,
return err;
}
}
#endif
f->host_engines[dev->engine_id] = dev;
f->active_engines[f->num_engines] = dev;

View File

@@ -861,7 +861,6 @@ void nvgpu_runlist_cleanup_sw(struct gk20a *g)
f->max_runlists = 0;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
static void nvgpu_runlist_init_engine_info(struct gk20a *g,
struct nvgpu_runlist *runlist,
const struct nvgpu_device *dev)
@@ -919,7 +918,6 @@ static u32 nvgpu_runlist_get_pbdma_mask(struct gk20a *g,
}
return pbdma_mask;
}
#endif /* CONFIG_NVGPU_NON_FUSA */
void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
{
@@ -943,13 +941,11 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
if (dev->runlist_id == runlist->id) {
runlist->eng_bitmask |= BIT32(dev->engine_id);
#ifdef CONFIG_NVGPU_NON_FUSA
/*
* Populate additional runlist fields on
* Ampere+ chips.
*/
nvgpu_runlist_init_engine_info(g, runlist, dev);
#endif /* CONFIG_NVGPU_NON_FUSA */
}
}
@@ -966,12 +962,10 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
runlist->id,
&runlist->pbdma_bitmask);
}
#ifdef CONFIG_NVGPU_NON_FUSA
else {
runlist->pbdma_bitmask =
nvgpu_runlist_get_pbdma_mask(g, runlist);
}
#endif /* CONFIG_NVGPU_NON_FUSA */
nvgpu_log(g, gpu_dbg_info, " Active engine bitmask: 0x%x", runlist->eng_bitmask);
nvgpu_log(g, gpu_dbg_info, " PBDMA bitmask: 0x%x", runlist->pbdma_bitmask);
}

View File

@@ -139,7 +139,6 @@ int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config)
return err;
}
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
int nvgpu_gr_init_sm_id_early_config(struct gk20a *g, struct nvgpu_gr_config *config)
{
u32 tpc_index, gpc_index;
@@ -169,5 +168,3 @@ int nvgpu_gr_init_sm_id_early_config(struct gk20a *g, struct nvgpu_gr_config *co
return err;
}
#endif

View File

@@ -223,11 +223,9 @@ static int gr_init_setup_hw(struct gk20a *g, struct nvgpu_gr *gr)
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.eng_config != NULL) {
g->ops.gr.init.eng_config(g);
}
#endif
g->ops.gr.init.gpc_mmu(g);
@@ -565,10 +563,8 @@ static int gr_init_prepare_hw_impl(struct gk20a *g)
sw_non_ctx_load->l[i].value);
}
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_gr_init_reset_enable_hw_non_ctx_local(g);
nvgpu_gr_init_reset_enable_hw_non_ctx_global(g);
#endif
nvgpu_log_info(g, "end: netlist: sw_non_ctx_load: register writes");
err = g->ops.gr.falcon.wait_mem_scrubbing(g);
@@ -621,7 +617,6 @@ static int gr_reset_engine(struct gk20a *g)
return err;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.reset_gpcs != NULL) {
err = g->ops.gr.init.reset_gpcs(g);
if (err != 0) {
@@ -629,7 +624,6 @@ static int gr_reset_engine(struct gk20a *g)
return err;
}
}
#endif
err = g->ops.mc.enable_dev(g, dev, true);
if (err != 0) {
@@ -797,7 +791,6 @@ int nvgpu_gr_reset(struct gk20a *g)
}
#endif
#if defined(CONFIG_NVGPU_NON_FUSA)
static int gr_init_sm_id_config_early(struct gk20a *g, struct nvgpu_gr *gr)
{
int err;
@@ -811,7 +804,6 @@ static int gr_init_sm_id_config_early(struct gk20a *g, struct nvgpu_gr *gr)
return 0;
}
#endif
static int gr_init_ctxsw_falcon_support(struct gk20a *g, struct nvgpu_gr *gr)
{
@@ -853,7 +845,6 @@ static int gr_init_support_impl(struct gk20a *g)
}
}
#if defined(CONFIG_NVGPU_NON_FUSA)
/*
* Move sm id programming before loading ctxsw and gpccs firmwares. This
* is the actual sequence expected by ctxsw ucode.
@@ -862,7 +853,6 @@ static int gr_init_support_impl(struct gk20a *g)
if (err != 0) {
return err;
}
#endif
err = gr_init_ctxsw_falcon_support(g, gr);
if (err != 0) {
@@ -1214,7 +1204,6 @@ u32 nvgpu_gr_get_tpc_num(struct gk20a *g, u32 addr)
return 0;
}
#ifdef CONFIG_NVGPU_NON_FUSA
void nvgpu_gr_init_reset_enable_hw_non_ctx_local(struct gk20a *g)
{
u32 i = 0U;
@@ -1268,4 +1257,3 @@ void nvgpu_gr_init_reset_enable_hw_non_ctx_global(struct gk20a *g)
return;
}
#endif

View File

@@ -576,21 +576,17 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
if (err != 0) {
goto restore_fe_go_idle;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.auto_go_idle != NULL) {
g->ops.gr.init.auto_go_idle(g, false);
}
#endif
err = nvgpu_gr_obj_ctx_alloc_sw_bundle(g);
if (err != 0) {
goto restore_fe_go_idle;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.auto_go_idle != NULL) {
g->ops.gr.init.auto_go_idle(g, true);
}
#endif
/* restore fe_go_idle */
g->ops.gr.init.fe_go_idle_timeout(g, true);
@@ -617,11 +613,9 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
restore_fe_go_idle:
/* restore fe_go_idle */
g->ops.gr.init.fe_go_idle_timeout(g, true);
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.auto_go_idle != NULL) {
g->ops.gr.init.auto_go_idle(g, true);
}
#endif
clean_up:
return err;

View File

@@ -897,6 +897,32 @@ u32 *nvgpu_netlist_get_gpccs_data_list(struct gk20a *g)
return g->netlist_vars->ucode.gpccs.data.l;
}
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_local_compute_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_local_compute_load;
}
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_global_compute_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_global_compute_load;
}
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_local_gfx_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_local_gfx_load;
}
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_global_gfx_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_global_gfx_load;
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#ifdef CONFIG_NVGPU_DEBUGGER
struct netlist_aiv_list *nvgpu_netlist_get_sys_ctxsw_regs(struct gk20a *g)
{
@@ -1304,31 +1330,4 @@ u32 nvgpu_netlist_get_sys_ctxsw_regs_count(struct gk20a *g)
return count;
}
#endif /* CONFIG_NVGPU_DEBUGGER */
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_local_compute_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_local_compute_load;
}
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_global_compute_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_global_compute_load;
}
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_local_gfx_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_local_gfx_load;
}
struct netlist_av_list *nvgpu_netlist_get_sw_non_ctx_global_gfx_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->sw_non_ctx_global_gfx_load;
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#endif

View File

@@ -136,14 +136,12 @@ struct nvgpu_netlist_vars {
struct netlist_av_list sw_method_init;
struct netlist_aiv_list sw_ctx_load;
struct netlist_av_list sw_non_ctx_load;
#if defined(CONFIG_NVGPU_NON_FUSA)
struct netlist_av_list sw_non_ctx_local_compute_load;
struct netlist_av_list sw_non_ctx_global_compute_load;
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_av_list sw_non_ctx_local_gfx_load;
struct netlist_av_list sw_non_ctx_global_gfx_load;
#endif /* CONFIG_NVGPU_GRAPHICS */
#endif
struct netlist_av_list sw_veid_bundle_init;
#ifdef CONFIG_NVGPU_DEBUGGER
struct {

View File

@@ -1033,11 +1033,9 @@ int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
nvgpu_gv11b_pg_sw_init(g, *pg_p);
break;
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_GPUID_GA10B:
nvgpu_ga10b_pg_sw_init(g, *pg_p);
break;
#endif /* CONFIG_NVGPU_NON_FUSA */
default:
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)

View File

@@ -110,11 +110,9 @@ void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g)
if (g->ops.cg.blcg_fifo_load_gating_prod != NULL) {
g->ops.cg.blcg_fifo_load_gating_prod(g, true);
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.blcg_runlist_load_gating_prod != NULL) {
g->ops.cg.blcg_runlist_load_gating_prod(g, true);
}
#endif
done:
nvgpu_mutex_release(&g->cg_pg_lock);
}
@@ -188,7 +186,6 @@ static void nvgpu_cg_slcg_priring_load_prod(struct gk20a *g, bool enable)
if (g->ops.cg.slcg_priring_load_gating_prod != NULL) {
g->ops.cg.slcg_priring_load_gating_prod(g, enable);
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.slcg_rs_ctrl_fbp_load_gating_prod != NULL) {
g->ops.cg.slcg_rs_ctrl_fbp_load_gating_prod(g, enable);
}
@@ -207,7 +204,6 @@ static void nvgpu_cg_slcg_priring_load_prod(struct gk20a *g, bool enable)
if (g->ops.cg.slcg_rs_sys_load_gating_prod != NULL) {
g->ops.cg.slcg_rs_sys_load_gating_prod(g, enable);
}
#endif
}
@@ -236,11 +232,9 @@ void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g)
if (g->ops.cg.slcg_fifo_load_gating_prod != NULL) {
g->ops.cg.slcg_fifo_load_gating_prod(g, true);
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.slcg_runlist_load_gating_prod != NULL) {
g->ops.cg.slcg_runlist_load_gating_prod(g, true);
}
#endif
done:
nvgpu_mutex_release(&g->cg_pg_lock);
}
@@ -507,11 +501,9 @@ void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable)
nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
}
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.elcg_ce_load_gating_prod != NULL) {
g->ops.cg.elcg_ce_load_gating_prod(g, g->elcg_enabled);
}
#endif
nvgpu_mutex_release(&g->cg_pg_lock);
}
@@ -554,11 +546,9 @@ void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable)
if (g->ops.cg.blcg_gr_load_gating_prod != NULL) {
g->ops.cg.blcg_gr_load_gating_prod(g, enable);
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.blcg_runlist_load_gating_prod != NULL) {
g->ops.cg.blcg_runlist_load_gating_prod(g, enable);
}
#endif
if (g->ops.cg.blcg_ltc_load_gating_prod != NULL) {
g->ops.cg.blcg_ltc_load_gating_prod(g, enable);
}
@@ -615,14 +605,12 @@ void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable)
if (g->ops.cg.slcg_fifo_load_gating_prod != NULL) {
g->ops.cg.slcg_fifo_load_gating_prod(g, enable);
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.slcg_runlist_load_gating_prod != NULL) {
g->ops.cg.slcg_runlist_load_gating_prod(g, enable);
}
if (g->ops.cg.slcg_timer_load_gating_prod != NULL) {
g->ops.cg.slcg_timer_load_gating_prod(g, enable);
}
#endif
if (g->ops.cg.slcg_gr_load_gating_prod != NULL) {
g->ops.cg.slcg_gr_load_gating_prod(g, enable);
}
@@ -657,11 +645,9 @@ void nvgpu_cg_elcg_ce_load_enable(struct gk20a *g)
if (!g->elcg_enabled) {
goto done;
}
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.elcg_ce_load_gating_prod != NULL) {
g->ops.cg.elcg_ce_load_gating_prod(g, true);
}
#endif
done:
nvgpu_mutex_release(&g->cg_pg_lock);
}