gpu: nvgpu: remove usage of CONFIG_NVGPU_NEXT

The CONFIG_NVGPU_NEXT config is no longer required now that ga10b and
ga100 sources have been collapsed. However, the ga100, ga10b sources
are not safety certified, so mark them as NON_FUSA by replacing
CONFIG_NVGPU_NEXT with CONFIG_NVGPU_NON_FUSA.

Move CONFIG_NVGPU_MIG to Makefile.linux.config and enable MIG support
by default on standard build.

Jira NVGPU-4771

Change-Id: Idc5861fe71d9d510766cf242c6858e2faf97d7d0
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2547092
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2021-06-18 13:02:17 +00:00
committed by mobile promotions
parent ff75647d59
commit c7d43f5292
76 changed files with 182 additions and 200 deletions

View File

@@ -788,14 +788,6 @@ nvgpu-$(CONFIG_NVGPU_HAL_NON_FUSA) += \
endif
ifeq ($(CONFIG_ARCH_TEGRA_23x_SOC),y)
CONFIG_NVGPU_NEXT := y
ccflags-y += -DCONFIG_NVGPU_NEXT
# Multi Instance GPU support
CONFIG_NVGPU_MIG := y
endif
ifeq ($(CONFIG_NVGPU_NEXT),y)
ifeq ($(CONFIG_NVGPU_HAL_NON_FUSA),y)
nvgpu-y += \
common/fifo/nvgpu_next_engines.o \
@@ -939,4 +931,3 @@ nvgpu-y += \
os/linux/platform_ga10b_tegra.o \
os/linux/nvgpu_next_ioctl_prof.o \
os/linux/nvlink/hal/ga10b_mssnvlink.o
endif

View File

@@ -32,6 +32,9 @@ CONFIG_NVGPU_RECOVERY := y
# Support for compression
CONFIG_NVGPU_COMPRESSION := y
# Enable MIG Support
CONFIG_NVGPU_MIG := y
# Enable support for extraction of comptags for CDE.
ifeq ($(CONFIG_NVGPU_COMPRESSION),y)
CONFIG_NVGPU_SUPPORT_CDE := y

View File

@@ -204,12 +204,6 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_MSSNVLINK0_RST_CONTROL
CONFIG_NVGPU_DGPU := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DGPU
# Enable nvgpu_next for normal build
ifneq ($(NV_BUILD_CONFIGURATION_IS_EXTERNAL), 1)
CONFIG_NVGPU_NEXT := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_NEXT
endif
CONFIG_NVGPU_VPR := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_VPR

View File

@@ -710,7 +710,6 @@ ifeq ($(CONFIG_NVGPU_TPC_POWERGATE),1)
srcs += hal/tpc/tpc_gv11b.c
endif
ifeq ($(CONFIG_NVGPU_NEXT),1)
ifeq ($(CONFIG_NVGPU_HAL_NON_FUSA),1)
srcs += \
common/fifo/nvgpu_next_engines.c \
@@ -806,8 +805,6 @@ srcs += \
hal/cbc/cbc_ga10b.c
endif
endif
ifeq ($(CONFIG_NVGPU_IGPU_VIRT),1)
srcs += \
hal/vgpu/init/vgpu_hal_ga10b.c

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@
#include "acr_sw_tu104.h"
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -145,7 +145,7 @@ int nvgpu_acr_init(struct gk20a *g)
case NVGPU_GPUID_GV11B:
nvgpu_gv11b_acr_sw_init(g, g->acr);
break;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
nvgpu_next_acr_sw_init(g, g->acr);
break;
@@ -154,7 +154,7 @@ int nvgpu_acr_init(struct gk20a *g)
case NVGPU_GPUID_TU104:
nvgpu_tu104_acr_sw_init(g, g->acr);
break;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
nvgpu_next_dgpu_acr_sw_init(g, g->acr);
break;

View File

@@ -33,7 +33,7 @@
#include "acr_wpr.h"
#include "acr_priv.h"
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -54,7 +54,7 @@
#endif
#ifdef CONFIG_NVGPU_LS_PMU
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#define PMU_NVRISCV_WPR_RSVD_BYTES (0x8000)
#endif
@@ -106,7 +106,7 @@ int nvgpu_acr_lsf_pmu_ucode_details(struct gk20a *g, void *lsf_ucode_img)
exit:
return err;
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
s32 nvgpu_acr_lsf_pmu_ncore_ucode_details(struct gk20a *g, void *lsf_ucode_img)
{
struct lsf_ucode_desc *lsf_desc;
@@ -162,7 +162,7 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
switch (ver) {
case NVGPU_GPUID_GV11B:
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
#endif
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG,
@@ -174,7 +174,7 @@ int nvgpu_acr_lsf_fecs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
#endif
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
fecs_sig = nvgpu_request_firmware(g, NEXT_DGPU_FECS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
@@ -272,7 +272,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
switch (ver) {
case NVGPU_GPUID_GV11B:
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
#endif
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG,
@@ -284,7 +284,7 @@ int nvgpu_acr_lsf_gpccs_ucode_details(struct gk20a *g, void *lsf_ucode_img)
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
#endif
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
gpccs_sig = nvgpu_request_firmware(g, NEXT_DGPU_GPCCS_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
@@ -775,7 +775,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g,
pnode->lsb_header.app_data_size =
pnode->lsb_header.data_size;
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
/* Falcon image is cleanly partitioned between a code and
* data section where we don't need extra reserved space.
* NVRISCV image has no clear partition for code and data

View File

@@ -87,7 +87,7 @@ struct flcn_ucode_img {
u32 data_size;
struct lsf_ucode_desc *lsf_desc;
bool is_next_core_img;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
struct falcon_next_core_ucode_desc *ndesc;
#endif
};
@@ -140,7 +140,7 @@ struct ls_flcn_mgr {
int nvgpu_acr_prepare_ucode_blob(struct gk20a *g);
#ifdef CONFIG_NVGPU_LS_PMU
int nvgpu_acr_lsf_pmu_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
s32 nvgpu_acr_lsf_pmu_ncore_ucode_details(struct gk20a *g, void *lsf_ucode_img);
#endif
#endif

View File

@@ -122,7 +122,7 @@ static void acr_ucode_patch_sig(struct gk20a *g,
unsigned int *p_dbg_sig, unsigned int *p_patch_loc,
unsigned int *p_patch_ind, u32 sig_size)
{
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_acr *acr = g->acr;
#endif
unsigned int i, j, *p_sig;
@@ -137,7 +137,7 @@ static void acr_ucode_patch_sig(struct gk20a *g,
nvgpu_info(g, "DEBUG MODE\n");
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (acr->get_versioned_sig != NULL) {
p_sig = acr->get_versioned_sig(g, acr, p_sig, &sig_size);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,7 +24,7 @@
#define ACR_BOOTSTRAP_H
#include "nvgpu_acr_interface.h"
#ifdef CONFIG_NVGPU_NEXT
#ifdef CONFIG_NVGPU_NON_FUSA
#include "common/acr/nvgpu_next_acr_bootstrap.h"
#endif
@@ -115,7 +115,7 @@ struct hs_acr {
struct flcn_acr_desc *acr_dmem_desc;
};
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_mem acr_falcon2_sysmem_desc;
struct flcn2_acr_desc acr_sysmem_desc;
struct nvgpu_mem ls_pmu_desc;

View File

@@ -47,7 +47,7 @@ int nvgpu_ce_init_support(struct gk20a *g)
nvgpu_cg_blcg_ce_load_enable(g);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_cg_elcg_ce_load_enable(g);
#endif

View File

@@ -30,7 +30,7 @@
#include "falcon_sw_tu104.h"
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -401,7 +401,7 @@ u32 nvgpu_falcon_get_id(struct nvgpu_falcon *flcn)
return flcn->flcn_id;
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
bool nvgpu_falcon_is_falcon2_enabled(struct nvgpu_falcon *flcn)
{
return flcn->is_falcon2_enabled ? true : false;
@@ -462,7 +462,7 @@ static int falcon_sw_chip_init(struct gk20a *g, struct nvgpu_falcon *flcn)
case NVGPU_GPUID_GP10B:
gk20a_falcon_sw_init(flcn);
break;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
nvgpu_next_falcon_sw_init(flcn);
break;
@@ -473,7 +473,7 @@ static int falcon_sw_chip_init(struct gk20a *g, struct nvgpu_falcon *flcn)
break;
#ifdef CONFIG_NVGPU_DGPU
case NVGPU_GPUID_TU104:
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
#endif
tu104_falcon_sw_init(flcn);

View File

@@ -733,7 +733,7 @@ static int nvgpu_engine_init_one_dev(struct nvgpu_fifo *f,
}
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
{
int err = nvgpu_next_engine_init_one_dev(g, dev);
if (err != 0) {

View File

@@ -47,9 +47,7 @@
#include <nvgpu/mc.h>
#include <nvgpu/cic.h>
#include <nvgpu/device.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include <nvgpu/engines.h>
#endif
#include <nvgpu/grmgr.h>
#include "gr_priv.h"
@@ -225,7 +223,7 @@ static int gr_init_setup_hw(struct gk20a *g, struct nvgpu_gr *gr)
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, " ");
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.eng_config != NULL) {
g->ops.gr.init.eng_config(g);
}
@@ -562,7 +560,7 @@ static int gr_init_prepare_hw_impl(struct gk20a *g)
sw_non_ctx_load->l[i].value);
}
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_gr_init_reset_enable_hw_non_ctx_local(g);
nvgpu_next_gr_init_reset_enable_hw_non_ctx_global(g);
#endif
@@ -618,7 +616,7 @@ static int gr_reset_engine(struct gk20a *g)
return err;
}
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.reset_gpcs != NULL) {
err = g->ops.gr.init.reset_gpcs(g);
if (err != 0) {
@@ -796,7 +794,7 @@ int nvgpu_gr_reset(struct gk20a *g)
}
#endif
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
static int gr_init_sm_id_config_early(struct gk20a *g, struct nvgpu_gr *gr)
{
int err;
@@ -852,7 +850,7 @@ static int gr_init_support_impl(struct gk20a *g)
}
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
/*
* Move sm id programming before loading ctxsw and gpccs firmwares. This
* is the actual sequence expected by ctxsw ucode.
@@ -1154,7 +1152,7 @@ void nvgpu_gr_sw_ready(struct gk20a *g, bool enable)
}
}
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
#ifdef CONFIG_NVGPU_NON_FUSA
/* Wait until GR is initialized */
void nvgpu_gr_wait_initialized(struct gk20a *g)
{

View File

@@ -125,7 +125,7 @@ static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
if (pending_tpc.tex_exception) {
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d: TEX exception pending", gpc, tpc);
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
#ifdef CONFIG_NVGPU_NON_FUSA
if (g->ops.gr.intr.handle_tex_exception != NULL) {
g->ops.gr.intr.handle_tex_exception(g, gpc, tpc);
}

View File

@@ -158,7 +158,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
c->obj_class = class_num;
#ifndef CONFIG_NVGPU_HAL_NON_FUSA
#ifndef CONFIG_NVGPU_NON_FUSA
/*
* Only compute and graphics classes need object context.
* Return success for valid non-compute and non-graphics classes.

View File

@@ -557,7 +557,7 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
if (err != 0) {
goto restore_fe_go_idle;
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.auto_go_idle != NULL) {
g->ops.gr.init.auto_go_idle(g, false);
}
@@ -567,7 +567,7 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
goto restore_fe_go_idle;
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.auto_go_idle != NULL) {
g->ops.gr.init.auto_go_idle(g, true);
}
@@ -598,7 +598,7 @@ static int nvgpu_gr_obj_ctx_commit_hw_state(struct gk20a *g,
restore_fe_go_idle:
/* restore fe_go_idle */
g->ops.gr.init.fe_go_idle_timeout(g, true);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.gr.init.auto_go_idle != NULL) {
g->ops.gr.init.auto_go_idle(g, true);
}

View File

@@ -121,7 +121,7 @@ struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx)
return &subctx->ctx_header;
}
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
#ifdef CONFIG_NVGPU_NON_FUSA
void nvgpu_gr_subctx_set_patch_ctx(struct gk20a *g,
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx)
{

View File

@@ -40,7 +40,7 @@ int nvgpu_init_gr_manager(struct gk20a *g)
u32 ffs_bit = 0U;
u32 index;
const struct nvgpu_device *gr_dev = NULL;
#ifdef CONFIG_NVGPU_NEXT
#ifdef CONFIG_NVGPU_NON_FUSA
if (g->ops.grmgr.load_timestamp_prod != NULL) {
g->ops.grmgr.load_timestamp_prod(g);
}
@@ -186,7 +186,7 @@ int nvgpu_init_gr_manager(struct gk20a *g)
return 0;
}
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_MIG)
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_MIG)
static void nvgpu_grmgr_acquire_gr_syspipe(struct gk20a *g, u32 gr_syspipe_id)
{
g->mig.recursive_ref_count = nvgpu_safe_add_u32(
@@ -234,7 +234,7 @@ int nvgpu_grmgr_config_gr_remap_window(struct gk20a *g,
u32 gr_syspipe_id, bool enable)
{
int err = 0;
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_MIG)
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_MIG)
if (nvgpu_grmgr_is_multi_gr_enabled(g)) {
/*
* GR remap window enable/disable sequence for a GR

View File

@@ -36,9 +36,7 @@
#include <nvgpu/types.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/static_analysis.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include <nvgpu/errata.h>
#endif
#ifdef CONFIG_NVGPU_TRACE
#define nvgpu_gmmu_dbg(g, attrs, fmt, args...) \
@@ -948,7 +946,7 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
buffer_offset & (ctag_granularity - U64(1)));
}
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
attrs.cbc_comptagline_mode =
g->ops.fb.is_comptagline_mode_enabled != NULL ?
g->ops.fb.is_comptagline_mode_enabled(g) : true;
@@ -956,7 +954,7 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
#endif
attrs.l3_alloc = ((flags & NVGPU_VM_MAP_L3_ALLOC) != 0U);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_3288192) &&
(attrs.l3_alloc)) {
nvgpu_gmmu_dbg_v(g, &attrs,
@@ -1032,7 +1030,7 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm,
.aperture = APERTURE_INVALID,
};
#ifdef CONFIG_NVGPU_COMPRESSION
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
attrs.cbc_comptagline_mode =
g->ops.fb.is_comptagline_mode_enabled != NULL ?
g->ops.fb.is_comptagline_mode_enabled(g) : true;

View File

@@ -136,7 +136,7 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
nvgpu_dma_free(g, &mm->mmu_wr_mem);
nvgpu_dma_free(g, &mm->mmu_rd_mem);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_fb_vab_teardown_hal(g) != 0) {
nvgpu_err(g, "failed to teardown VAB");
}
@@ -567,7 +567,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
}
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_fb_vab_init_hal(g) != 0) {
nvgpu_err(g, "failed to init VAB");
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,7 @@
#include <nvgpu/netlist.h>
#include <nvgpu/string.h>
#include <nvgpu/static_analysis.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu/nvgpu_next_netlist.h"
#endif
@@ -214,7 +214,7 @@ static bool nvgpu_netlist_handle_sw_bundles_region_id(struct gk20a *g,
break;
default:
handled = false;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
handled = nvgpu_next_netlist_handle_sw_bundles_region_id(g,
region_id, src, size, netlist_vars, &err);
#endif
@@ -369,7 +369,7 @@ static bool nvgpu_netlist_handle_debugger_region_id(struct gk20a *g,
break;
default:
handled = false;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
handled = nvgpu_next_netlist_handle_debugger_region_id(g,
region_id, src, size, netlist_vars, &err);
#endif
@@ -549,7 +549,7 @@ clean_up:
nvgpu_kfree(g, netlist_vars->sw_method_init.l);
nvgpu_kfree(g, netlist_vars->sw_ctx_load.l);
nvgpu_kfree(g, netlist_vars->sw_non_ctx_load.l);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_netlist_deinit_ctx_vars(g);
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
@@ -576,7 +576,7 @@ clean_up:
nvgpu_kfree(g, netlist_vars->ctxsw_regs.pm_rop.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.pm_ucgpc.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.etpc.l);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_netlist_deinit_ctxsw_regs(g);
#endif
nvgpu_kfree(g, netlist_vars->ctxsw_regs.pm_cau.l);
@@ -652,7 +652,7 @@ void nvgpu_netlist_deinit_ctx_vars(struct gk20a *g)
nvgpu_kfree(g, netlist_vars->sw_method_init.l);
nvgpu_kfree(g, netlist_vars->sw_ctx_load.l);
nvgpu_kfree(g, netlist_vars->sw_non_ctx_load.l);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_netlist_deinit_ctx_vars(g);
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
@@ -678,7 +678,7 @@ void nvgpu_netlist_deinit_ctx_vars(struct gk20a *g)
nvgpu_kfree(g, netlist_vars->ctxsw_regs.perf_pma.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.pm_rop.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.pm_ucgpc.l);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_netlist_deinit_ctxsw_regs(g);
#endif
nvgpu_kfree(g, netlist_vars->ctxsw_regs.pm_cau.l);
@@ -912,7 +912,7 @@ u32 nvgpu_netlist_get_ppc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_netlist_get_ppc_ctxsw_regs(g)->count;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (count == 0U) {
count = nvgpu_next_netlist_get_ppc_ctxsw_regs_count(g);
}
@@ -924,7 +924,7 @@ u32 nvgpu_netlist_get_gpc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_netlist_get_gpc_ctxsw_regs(g)->count;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (count == 0U) {
count = nvgpu_next_netlist_get_gpc_ctxsw_regs_count(g);
}
@@ -936,7 +936,7 @@ u32 nvgpu_netlist_get_tpc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_netlist_get_tpc_ctxsw_regs(g)->count;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (count == 0U) {
count = nvgpu_next_netlist_get_tpc_ctxsw_regs_count(g);
}
@@ -948,7 +948,7 @@ u32 nvgpu_netlist_get_etpc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_netlist_get_etpc_ctxsw_regs(g)->count;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (count == 0U) {
count = nvgpu_next_netlist_get_etpc_ctxsw_regs_count(g);
}
@@ -1011,7 +1011,7 @@ void nvgpu_netlist_print_ctxsw_reg_info(struct gk20a *g)
nvgpu_netlist_get_perf_gpc_control_ctxsw_regs(g)->count);
nvgpu_log_info(g, "GRCTX_REG_LIST_PERF_PMA_CONTROL_COUNT :%d",
nvgpu_netlist_get_perf_pma_control_ctxsw_regs(g)->count);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_netlist_print_ctxsw_reg_info(g);
#endif
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -25,7 +25,7 @@
#include <nvgpu/types.h>
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "common/netlist/nvgpu_next_netlist_priv.h"
#endif
@@ -120,7 +120,7 @@ struct nvgpu_netlist_vars {
struct netlist_av_list sw_method_init;
struct netlist_aiv_list sw_ctx_load;
struct netlist_av_list sw_non_ctx_load;
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_next_netlist_vars nvgpu_next;
#endif
struct netlist_av_list sw_veid_bundle_init;
@@ -154,7 +154,7 @@ struct nvgpu_netlist_vars {
struct netlist_aiv_list perf_fbp_control;
struct netlist_aiv_list perf_gpc_control;
struct netlist_aiv_list perf_pma_control;
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_next_ctxsw_regs nvgpu_next;
#endif
} ctxsw_regs;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -71,7 +71,7 @@ int nvgpu_pmu_ns_fw_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu)
g->ops.pmu.setup_apertures(g);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_is_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED)) {
nvgpu_pmu_next_core_rtos_args_setup(g, pmu);
} else

View File

@@ -1170,7 +1170,7 @@ static int pmu_prepare_ns_ucode_blob_v1(struct gk20a *g)
nvgpu_mem_wr_n(g, &pmu->fw->ucode, 0, ucode_image,
rtos_fw->fw_image->size);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
/* alloc boot args */
err = nvgpu_pmu_next_core_rtos_args_allocate(g, pmu);
if (err != 0) {

View File

@@ -34,7 +34,7 @@
#include "lsfm_sw_tu104.h"
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -168,7 +168,7 @@ int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm)
nvgpu_tu104_lsfm_sw_init(g, *lsfm);
break;
#endif
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
nvgpu_gv100_lsfm_sw_init(g, *lsfm);
break;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -34,7 +34,7 @@
#include <nvgpu/pmu/pmuif/nvgpu_cmdif.h>
#include <nvgpu/kmem.h>
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -55,7 +55,7 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
break;
case NVGPU_GPUID_GP10B:
case NVGPU_GPUID_GV11B:
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
#endif
unit_id = PMU_UNIT_PERFMON_T18X;
@@ -144,7 +144,7 @@ int nvgpu_pmu_initialize_perfmon(struct gk20a *g, struct nvgpu_pmu *pmu,
case NVGPU_GPUID_GV11B:
nvgpu_gv11b_perfmon_sw_init(g, *perfmon_ptr);
break;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
nvgpu_next_perfmon_sw_init(g, *perfmon_ptr);
break;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -40,7 +40,7 @@
#include "pg_sw_gp10b.h"
#include "pmu_pg.h"
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -880,7 +880,7 @@ int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
nvgpu_gv11b_pg_sw_init(g, *pg_p);
break;
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
nvgpu_next_pg_sw_init(g, *pg_p);
break;

View File

@@ -182,7 +182,7 @@ int nvgpu_pmu_early_init(struct gk20a *g)
pmu->g = g;
pmu->flcn = &g->pmu_flcn;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_falcon_is_falcon2_enabled(&g->pmu_flcn)) {
nvgpu_set_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED, true);
}

View File

@@ -54,7 +54,7 @@
#include <nvgpu/sec2/lsfm.h>
#endif
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#define PMU_PRIV_LOCKDOWN_RELEASE_POLLING_US (1U)
#endif
@@ -238,7 +238,7 @@ void nvgpu_pmu_rtos_cmdline_args_init(struct gk20a *g, struct nvgpu_pmu *pmu)
}
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
void nvgpu_pmu_next_core_rtos_args_setup(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
@@ -393,7 +393,7 @@ int nvgpu_pmu_rtos_init(struct gk20a *g)
g->ops.pmu.setup_apertures(g);
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_is_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED)) {
err = nvgpu_pmu_next_core_rtos_args_allocate(g, g->pmu);
if (err != 0) {
@@ -413,7 +413,7 @@ int nvgpu_pmu_rtos_init(struct gk20a *g)
nvgpu_pmu_enable_irq(g, true);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_is_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED)) {
g->ops.falcon.bootstrap(g->pmu->flcn, 0U);
err = nvgpu_pmu_wait_for_priv_lockdown_release(g,
@@ -436,7 +436,7 @@ int nvgpu_pmu_rtos_init(struct gk20a *g)
if (err != 0) {
goto exit;
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (nvgpu_is_enabled(g, NVGPU_PMU_NEXT_CORE_ENABLED)) {
err = nvgpu_pmu_wait_for_priv_lockdown_release(g,
g->pmu->flcn, U32_MAX);

View File

@@ -110,7 +110,7 @@ void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g)
if (g->ops.cg.blcg_fifo_load_gating_prod != NULL) {
g->ops.cg.blcg_fifo_load_gating_prod(g, true);
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.blcg_runlist_load_gating_prod != NULL) {
g->ops.cg.blcg_runlist_load_gating_prod(g, true);
}
@@ -188,7 +188,7 @@ static void nvgpu_cg_slcg_priring_load_prod(struct gk20a *g, bool enable)
if (g->ops.cg.slcg_priring_load_gating_prod != NULL) {
g->ops.cg.slcg_priring_load_gating_prod(g, enable);
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.slcg_rs_ctrl_fbp_load_gating_prod != NULL) {
g->ops.cg.slcg_rs_ctrl_fbp_load_gating_prod(g, enable);
}
@@ -236,7 +236,7 @@ void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g)
if (g->ops.cg.slcg_fifo_load_gating_prod != NULL) {
g->ops.cg.slcg_fifo_load_gating_prod(g, true);
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.slcg_runlist_load_gating_prod != NULL) {
g->ops.cg.slcg_runlist_load_gating_prod(g, true);
}
@@ -290,7 +290,7 @@ done:
nvgpu_mutex_release(&g->cg_pg_lock);
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
void nvgpu_cg_slcg_timer_load_enable(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
@@ -507,7 +507,7 @@ void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable)
nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
}
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.elcg_ce_load_gating_prod != NULL) {
g->ops.cg.elcg_ce_load_gating_prod(g, g->elcg_enabled);
}
@@ -554,7 +554,7 @@ void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable)
if (g->ops.cg.blcg_gr_load_gating_prod != NULL) {
g->ops.cg.blcg_gr_load_gating_prod(g, enable);
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.blcg_runlist_load_gating_prod != NULL) {
g->ops.cg.blcg_runlist_load_gating_prod(g, enable);
}
@@ -615,7 +615,7 @@ void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable)
if (g->ops.cg.slcg_fifo_load_gating_prod != NULL) {
g->ops.cg.slcg_fifo_load_gating_prod(g, enable);
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.slcg_runlist_load_gating_prod != NULL) {
g->ops.cg.slcg_runlist_load_gating_prod(g, enable);
}
@@ -657,7 +657,7 @@ void nvgpu_cg_elcg_ce_load_enable(struct gk20a *g)
if (!g->elcg_enabled) {
goto done;
}
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
if (g->ops.cg.elcg_ce_load_gating_prod != NULL) {
g->ops.cg.elcg_ce_load_gating_prod(g, true);
}

View File

@@ -51,7 +51,7 @@ static void prepare_resource_reservation(struct gk20a *g,
nvgpu_err(g, "Failed to reset PERFMON unit");
}
nvgpu_cg_slcg_perf_load_enable(g, false);
#ifdef CONFIG_NVGPU_NEXT
#ifdef CONFIG_NVGPU_NON_FUSA
/*
* By default, disable the PMASYS legacy mode for
* NVGPU_NEXT.

View File

@@ -38,7 +38,7 @@
#include <nvgpu/gr/gr_instances.h>
#include <nvgpu/grmgr.h>
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -451,7 +451,7 @@ static int nvgpu_profiler_quiesce_hwpm_streamout_resident(struct gk20a *g,
goto fail;
}
#ifdef CONFIG_NVGPU_NEXT
#ifdef CONFIG_NVGPU_NON_FUSA
NVGPU_NEXT_PROFILER_QUIESCE(g);
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,7 +29,7 @@
int nvgpu_ptimer_init(struct gk20a *g)
{
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_cg_slcg_timer_load_enable(g);
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,7 +24,7 @@
#include <nvgpu/sim.h>
#include <nvgpu/netlist.h>
#include <nvgpu/log.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu/nvgpu_next_sim.h"
#endif
@@ -341,11 +341,11 @@ int nvgpu_init_sim_netlist_ctx_vars(struct gk20a *g)
goto fail;
}
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_init_sim_netlist_ctxsw_regs(g);
#endif
#endif /* CONFIG_NVGPU_DEBUGGER */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_init_sim_netlist_ctx_vars(g);
#endif
@@ -780,7 +780,7 @@ fail:
nvgpu_kfree(g, sw_method_init->l);
nvgpu_kfree(g, sw_ctx_load->l);
nvgpu_kfree(g, sw_non_ctx_load->l);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_init_sim_netlist_ctx_vars_free(g);
#endif
nvgpu_kfree(g, sw_veid_bundle_init->l);
@@ -811,7 +811,7 @@ fail:
nvgpu_kfree(g, perf_fbp_control_ctxsw_regs->l);
nvgpu_kfree(g, perf_gpc_control_ctxsw_regs->l);
nvgpu_kfree(g, perf_pma_control_ctxsw_regs->l);
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
nvgpu_next_init_sim_netlist_ctxsw_regs_free(g);
#endif
#endif /* CONFIG_NVGPU_DEBUGGER */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,7 @@
#include "bios_sw_gv100.h"
#include "bios_sw_tu104.h"
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -73,7 +73,7 @@ bool nvgpu_bios_check_dgpu(struct gk20a *g, u32 ver)
case NVGPU_GPUID_GV100:
case NVGPU_GPUID_TU104:
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
#endif
is_supported = true;
@@ -197,7 +197,7 @@ int nvgpu_bios_sw_init(struct gk20a *g)
nvgpu_tu104_bios_sw_init(g, g->bios);
break;
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_DGPU_GPUID:
/*
* TODO

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -63,7 +63,7 @@ int gp10b_engine_init_ce_info(struct nvgpu_fifo *f)
}
}
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
{
int err = nvgpu_next_engine_init_one_dev(g, dev);
if (err != 0) {

View File

@@ -1,7 +1,7 @@
/*
* NVIDIA GPU HAL interface.
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@
#include "hal_tu104.h"
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -66,7 +66,7 @@ int nvgpu_init_hal(struct gk20a *g)
return -ENODEV;
}
break;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
case NVGPU_NEXT_GPUID:
if (NVGPU_NEXT_INIT_HAL(g) != 0) {
return -ENODEV;
@@ -87,7 +87,7 @@ int nvgpu_init_hal(struct gk20a *g)
return -ENODEV;
}
break;
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_DGPU)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_DGPU)
case NVGPU_NEXT_DGPU_GPUID:
if (NVGPU_NEXT_DGPU_INIT_HAL(g) != 0) {
return -ENODEV;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,7 +29,7 @@
#include "pramin_tu104.h"
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -46,7 +46,7 @@ void nvgpu_pramin_ops_init(struct gk20a *g)
g->ops.pramin.data032_r = gv100_pramin_data032_r;
break;
case NVGPU_GPUID_TU104:
#ifdef CONFIG_NVGPU_NEXT
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
case NVGPU_NEXT_DGPU_GPUID:
#endif
g->ops.pramin.data032_r = tu104_pramin_data032_r;

View File

@@ -26,7 +26,7 @@
#include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/vgpu/os_init_hal_vgpu.h>
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -43,7 +43,7 @@ int vgpu_init_hal(struct gk20a *g)
case NVGPU_GPUID_GV11B:
err = vgpu_gv11b_init_hal(g);
break;
#ifdef CONFIG_NVGPU_NEXT
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
case NVGPU_NEXT_GPUID:
err = NVGPU_NEXT_VGPU_INIT_HAL(g);
break;

View File

@@ -25,7 +25,7 @@
#include <nvgpu/log.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_cic.h"
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@
#include <nvgpu/types.h>
#include <nvgpu/list.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_device.h"
#endif
@@ -153,7 +153,7 @@ struct nvgpu_device {
u32 pbdma_id;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
/* nvgpu next device info additions */
struct nvgpu_device_next next;
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -172,7 +172,7 @@ struct nvgpu_ecc {
struct nvgpu_ecc_stat **sm_icache_ecc_corrected_err_count;
/** SM icache uncorrected error count. */
struct nvgpu_ecc_stat **sm_icache_ecc_uncorrected_err_count;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/gr/nvgpu_next_gr_ecc.h"
#endif
@@ -225,7 +225,7 @@ struct nvgpu_ecc {
struct nvgpu_ecc_stat *mmu_fillunit_ecc_corrected_err_count;
/** hubmmu fillunit uncorrected error count. */
struct nvgpu_ecc_stat *mmu_fillunit_ecc_uncorrected_err_count;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_ecc.h"
#endif
} fb;

View File

@@ -23,7 +23,7 @@
#ifndef NVGPU_ENGINE_STATUS_H
#define NVGPU_ENGINE_STATUS_H
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_engine_status.h"
#endif
@@ -98,7 +98,7 @@ enum nvgpu_engine_status_ctx_status {
struct nvgpu_engine_status_info {
/** Engine status h/w register's read value. */
u32 reg_data;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
/* nvgpu next engine status additions */
struct nvgpu_next_engine_status_info nvgpu_next;

View File

@@ -31,7 +31,7 @@
#include <nvgpu/types.h>
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_engines.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -34,7 +34,7 @@ struct gk20a;
*/
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_errata.h"
#else
#define ERRATA_FLAGS_NEXT

View File

@@ -178,7 +178,7 @@
/**
* Falcon/Falcon2 fuse settings bit
*/
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#define FCD (0U)
#define FENEN (1U)
#define NVRISCV_BRE_EN (2U)
@@ -243,7 +243,7 @@ struct nvgpu_falcon {
bool is_falcon2_enabled;
/** Indicates if the falcon interrupts are enabled. */
bool is_interrupt_enabled;
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
/** Fuse settings */
unsigned long fuse_settings;
#endif
@@ -656,7 +656,7 @@ void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id);
void nvgpu_falcon_set_irq(struct nvgpu_falcon *flcn, bool enable,
u32 intr_mask, u32 intr_dest);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
bool nvgpu_falcon_is_falcon2_enabled(struct nvgpu_falcon *flcn);
bool nvgpu_falcon_is_feature_supported(struct nvgpu_falcon *flcn,
u32 feature);

View File

@@ -23,7 +23,7 @@
#ifndef NVGPU_FB_H
#define NVGPU_FB_H
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_fb.h"
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ struct gk20a;
#include <nvgpu/types.h>
#include <nvgpu/errno.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_fuse.h"
#endif

View File

@@ -146,7 +146,7 @@ struct gops_ce {
void (*ce_app_destroy)(struct gk20a *g);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_ce.h"
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -61,7 +61,7 @@ struct gops_cg {
void (*blcg_pmu_load_gating_prod)(struct gk20a *g, bool prod);
void (*blcg_xbar_load_gating_prod)(struct gk20a *g, bool prod);
void (*blcg_hshub_load_gating_prod)(struct gk20a *g, bool prod);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_cg.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -102,7 +102,7 @@ struct gops_perf {
void (*disable_all_perfmons)(struct gk20a *g);
int (*wait_for_idle_pmm_routers)(struct gk20a *g);
int (*wait_for_idle_pma)(struct gk20a *g);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_perf.h"
#endif
};

View File

@@ -53,7 +53,7 @@ struct gops_falcon {
void (*set_bcr)(struct nvgpu_falcon *flcn);
void (*dump_brom_stats)(struct nvgpu_falcon *flcn);
u32 (*get_brom_retcode)(struct nvgpu_falcon *flcn);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
bool (*is_priv_lockdown)(struct nvgpu_falcon *flcn);
#endif
u32 (*dmemc_blk_mask)(void);

View File

@@ -161,7 +161,7 @@ struct gops_fb_ecc {
u32 *uncorrected_error_mask);
};
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fb_vab.h"
#endif
@@ -440,7 +440,7 @@ struct gops_fb {
u32 invalidate_replay_val);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fb.h"
#endif

View File

@@ -214,7 +214,7 @@ struct gops_fifo {
u32 exception_mask);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fifo.h"
#endif

View File

@@ -32,7 +32,7 @@
*/
struct gk20a;
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
struct nvgpu_fuse_feature_override_ecc;
#endif
@@ -218,12 +218,12 @@ struct gops_fuse {
int (*read_ucode_version)(struct gk20a *g, u32 falcon_id,
u32 *ucode_version);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
int (*fetch_falcon_fuse_settings)(struct gk20a *g, u32 falcon_id,
unsigned long *fuse_settings);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_fuse.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -453,7 +453,7 @@ struct gops_gr_intr {
void (*flush_channel_tlb)(struct gk20a *g);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr_intr.h"
#endif
int (*handle_fecs_error)(struct gk20a *g,
@@ -834,7 +834,7 @@ struct gops_gr_init {
#endif
bool (*is_allowed_sw_bundle)(struct gk20a *g,
u32 bundle_addr, u32 bundle_value, int *context);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr_init.h"
#endif
/** @endcond */
@@ -966,7 +966,7 @@ struct gops_gr_ctxsw_prog {
struct nvgpu_mem *ctx_mem, u64 addr,
u32 aperture_mask);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr_ctxsw_prog.h"
#endif
};
@@ -1282,7 +1282,7 @@ struct gops_gr {
struct gops_gr_zbc zbc;
struct gops_gr_zcull zcull;
#endif /* CONFIG_NVGPU_GRAPHICS */
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_gr.h"
#endif
/** @endcond */

View File

@@ -76,7 +76,7 @@ struct gops_grmgr {
*/
void (*get_gpcgrp_count)(struct gk20a *g);
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_MIG)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_MIG)
#include "include/nvgpu/nvgpu_next_gops_grmgr.h"
#endif
};

View File

@@ -52,7 +52,7 @@ struct gops_ltc_intr {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
void (*configure)(struct gk20a *g);
void (*en_illegal_compstat)(struct gk20a *g, bool enable);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_ltc_intr.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
@@ -160,7 +160,7 @@ struct gops_ltc {
u32 (*pri_is_lts_tstg_addr)(struct gk20a *g, u32 addr);
int (*set_l2_sector_promotion)(struct gk20a *g, struct nvgpu_tsg *tsg,
u32 policy);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_ltc.h"
#endif
#endif

View File

@@ -24,9 +24,7 @@
#include <nvgpu/types.h>
#include <nvgpu/mc.h>
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#include <nvgpu/engines.h>
#endif
/**
* @file
@@ -260,7 +258,7 @@ struct gops_mc {
void (*fbpa_isr)(struct gk20a *g);
#endif
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_mc.h"
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -89,7 +89,7 @@ struct gops_pbdma {
struct nvgpu_channel_dump_info *info);
void (*dump_status)(struct gk20a *g,
struct nvgpu_debug_context *o);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_pbdma.h"
#endif
};

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -146,7 +146,7 @@ struct gops_priv_ring {
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_priv_ring.h"
#endif

View File

@@ -89,7 +89,7 @@ struct gops_runlist {
bool wait_preempt);
void (*init_enginfo)(struct gk20a *g, struct nvgpu_fifo *f);
u32 (*get_tsg_max_timeslice)(void);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_gops_runlist.h"
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -52,7 +52,7 @@ struct nvgpu_gr_config;
*/
int nvgpu_gr_fs_state_init(struct gk20a *g, struct nvgpu_gr_config *config);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/gr/nvgpu_next_fs_state.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -352,7 +352,7 @@ int nvgpu_gr_reset(struct gk20a *g);
#endif
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/gr/nvgpu_next_gr.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -121,7 +121,7 @@
struct gk20a;
struct nvgpu_device;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_mc.h"
#endif
@@ -216,7 +216,7 @@ struct nvgpu_mc {
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
struct nvgpu_next_mc nvgpu_next;
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -456,7 +456,7 @@ struct mm_gk20a {
/** GMMU debug read buffer. */
struct nvgpu_mem mmu_rd_mem;
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_mm.h"
#endif
};

View File

@@ -348,7 +348,7 @@ u32 *nvgpu_netlist_get_gpccs_inst_list(struct gk20a *g);
u32 *nvgpu_netlist_get_gpccs_data_list(struct gk20a *g);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include <nvgpu/nvgpu_next_netlist.h>
#endif

View File

@@ -814,7 +814,7 @@ void nvgpu_report_mmu_err(struct gk20a *g, u32 hw_unit,
void gr_intr_report_ctxsw_error(struct gk20a *g, u32 err_type, u32 chid,
u32 mailbox_value);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
#include "include/nvgpu/nvgpu_next_err.h"
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,7 +35,7 @@
struct gk20a;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "include/nvgpu/nvgpu_next_pbdma.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -447,7 +447,7 @@ int nvgpu_pmu_rtos_init(struct gk20a *g);
int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu);
void nvgpu_pmu_rtos_cmdline_args_init(struct gk20a *g, struct nvgpu_pmu *pmu);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
void nvgpu_pmu_next_core_rtos_args_setup(struct gk20a *g,
struct nvgpu_pmu *pmu);
s32 nvgpu_pmu_next_core_rtos_args_allocate(struct gk20a *g,

View File

@@ -490,7 +490,7 @@ void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable);
void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g);
void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g);
void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable);
#if defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA)
void nvgpu_cg_slcg_timer_load_enable(struct gk20a *g);
#endif
#endif

View File

@@ -34,7 +34,7 @@
*/
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include <nvgpu/nvgpu_next_runlist.h>
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
@@ -102,7 +102,7 @@ struct nvgpu_runlist {
struct nvgpu_mutex runlist_lock;
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
/* nvgpu next runlist info additions */
struct nvgpu_next_runlist nvgpu_next;
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,7 +27,7 @@
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/gk20a.h>
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include <nvgpu/nvgpu_next_sim.h>
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */

View File

@@ -40,7 +40,7 @@
#include "ioctl.h"
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "os/linux/nvgpu_next_ioctl_prof.h"
#endif
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
@@ -849,7 +849,7 @@ long nvgpu_prof_fops_ioctl(struct file *filp, unsigned int cmd,
break;
default:
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
#if defined(CONFIG_NVGPU_NON_FUSA)
err = nvgpu_next_prof_fops_ioctl(prof, cmd, (void *)buf);
#else
nvgpu_err(g, "unrecognized profiler ioctl cmd: 0x%x", cmd);

View File

@@ -86,7 +86,7 @@
#include "cde.h"
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu_next_gpuid.h"
#endif
@@ -668,7 +668,7 @@ static struct of_device_id tegra_gk20a_of_match[] = {
{ .compatible = "nvidia,gv11b-vgpu",
.data = &gv11b_vgpu_tegra_platform},
#endif
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) && defined(CONFIG_NVGPU_NON_FUSA)
{ .compatible = NVGPU_NEXT_COMPATIBLE,
.data = &NVGPU_NEXT_PLATFORM},
#ifdef CONFIG_NVGPU_GR_VIRTUALIZATION

View File

@@ -19,7 +19,6 @@
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/nvmap.h>
#include <linux/reset.h>
#include <linux/iommu.h>
#include <linux/hashtable.h>
@@ -63,6 +62,7 @@ static int ga10b_tegra_get_clocks(struct device *dev)
void ga10b_tegra_scale_init(struct device *dev)
{
#ifdef CONFIG_TEGRA_BWMG
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile;
@@ -72,16 +72,19 @@ void ga10b_tegra_scale_init(struct device *dev)
platform->g->emc3d_ratio = EMC3D_GA10B_RATIO;
gp10b_tegra_scale_init(dev);
#endif
}
static void ga10b_tegra_scale_exit(struct device *dev)
{
#ifdef CONFIG_TEGRA_BWMGR
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a_scale_profile *profile = platform->g->scale_profile;
if (profile)
tegra_bwmgr_unregister(
(struct tegra_bwmgr_client *)profile->private_data);
#endif
}
static int ga10b_tegra_probe(struct device *dev)