gpu: nvgpu: remove nvgpu_next files

Remove all nvgpu_next files and move the code into corresponding
nvgpu files.

Merge nvgpu-next-*.yaml into nvgpu-.yaml files.

Jira NVGPU-4771

Change-Id: I595311be3c7bbb4f6314811e68712ff01763801e
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2547557
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2021-06-19 10:17:51 +00:00
committed by mobile promotions
parent c7d43f5292
commit f9cac0c64d
126 changed files with 2351 additions and 4554 deletions

View File

@@ -31,9 +31,6 @@
#include <nvgpu/netlist.h>
#include <nvgpu/string.h>
#include <nvgpu/static_analysis.h>
#if defined(CONFIG_NVGPU_NON_FUSA)
#include "nvgpu/nvgpu_next_netlist.h"
#endif
#include "netlist_priv.h"
#include "netlist_defs.h"
@@ -1074,4 +1071,333 @@ void nvgpu_netlist_vars_set_regs_base_index(struct gk20a *g, u32 index)
{
g->netlist_vars->regs_base_index = index;
}
#ifdef CONFIG_NVGPU_DEBUGGER
bool nvgpu_next_netlist_handle_debugger_region_id(struct gk20a *g,
u32 region_id, u8 *src, u32 size,
struct nvgpu_netlist_vars *netlist_vars, int *err_code)
{
int err = 0;
bool handled = true;
switch (region_id) {
case NETLIST_REGIONID_CTXREG_SYS_COMPUTE:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS_COMPUTE");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.sys_compute);
break;
case NETLIST_REGIONID_CTXREG_GPC_COMPUTE:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC_COMPUTE");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.gpc_compute);
break;
case NETLIST_REGIONID_CTXREG_TPC_COMPUTE:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC_COMPUTE");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.tpc_compute);
break;
case NETLIST_REGIONID_CTXREG_PPC_COMPUTE:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC_COMPUTE");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.ppc_compute);
break;
case NETLIST_REGIONID_CTXREG_ETPC_COMPUTE:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC_COMPUTE");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.etpc_compute);
break;
case NETLIST_REGIONID_CTXREG_LTS_BC:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_LTS_BC");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.lts_bc);
break;
case NETLIST_REGIONID_CTXREG_LTS_UC:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_LTS_UC");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.lts_uc);
break;
default:
handled = false;
break;
}
if ((handled == false) && (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG))) {
handled = true;
switch (region_id) {
#ifdef CONFIG_NVGPU_GRAPHICS
case NETLIST_REGIONID_CTXREG_SYS_GFX:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_SYS_GFX");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.sys_gfx);
break;
case NETLIST_REGIONID_CTXREG_GPC_GFX:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_GPC_GFX");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.gpc_gfx);
break;
case NETLIST_REGIONID_CTXREG_TPC_GFX:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_TPC_GFX");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.tpc_gfx);
break;
case NETLIST_REGIONID_CTXREG_PPC_GFX:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_PPC_GFX");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.ppc_gfx);
break;
case NETLIST_REGIONID_CTXREG_ETPC_GFX:
nvgpu_log_info(g, "NETLIST_REGIONID_CTXREG_ETPC_GFX");
err = nvgpu_netlist_alloc_load_aiv_list(g, src, size,
&netlist_vars->ctxsw_regs.nvgpu_next.etpc_gfx);
break;
#endif
default:
handled = false;
break;
}
}
*err_code = err;
return handled;
}
void nvgpu_next_netlist_deinit_ctxsw_regs(struct gk20a *g)
{
struct nvgpu_netlist_vars *netlist_vars = g->netlist_vars;
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.sys_compute.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.gpc_compute.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.tpc_compute.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.ppc_compute.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.etpc_compute.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.lts_bc.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.lts_uc.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.sys_gfx.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.gpc_gfx.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.tpc_gfx.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.ppc_gfx.l);
nvgpu_kfree(g, netlist_vars->ctxsw_regs.nvgpu_next.etpc_gfx.l);
}
#endif /* CONFIG_NVGPU_DEBUGGER */
bool nvgpu_next_netlist_handle_sw_bundles_region_id(struct gk20a *g,
u32 region_id, u8 *src, u32 size,
struct nvgpu_netlist_vars *netlist_vars, int *err_code)
{
int err = 0;
bool handled = true;
switch(region_id) {
case NETLIST_REGIONID_SW_NON_CTX_LOCAL_COMPUTE_LOAD:
nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOCAL_COMPUTE_LOAD");
err = nvgpu_netlist_alloc_load_av_list(g, src, size,
&netlist_vars->nvgpu_next.sw_non_ctx_local_compute_load);
break;
case NETLIST_REGIONID_SW_NON_CTX_GLOBAL_COMPUTE_LOAD:
nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_GLOBAL_COMPUTE_LOAD");
err = nvgpu_netlist_alloc_load_av_list(g, src, size,
&netlist_vars->nvgpu_next.sw_non_ctx_global_compute_load);
break;
default:
handled = false;
break;
}
if ((handled == false) && (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG))) {
handled = true;
switch (region_id) {
#ifdef CONFIG_NVGPU_GRAPHICS
case NETLIST_REGIONID_SW_NON_CTX_LOCAL_GFX_LOAD:
nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_LOCAL_GFX_LOAD");
err = nvgpu_netlist_alloc_load_av_list(g, src, size,
&netlist_vars->nvgpu_next.sw_non_ctx_local_gfx_load);
break;
case NETLIST_REGIONID_SW_NON_CTX_GLOBAL_GFX_LOAD:
nvgpu_log_info(g, "NETLIST_REGIONID_SW_NON_CTX_GLOBAL_GFX_LOAD");
err = nvgpu_netlist_alloc_load_av_list(g, src, size,
&netlist_vars->nvgpu_next.sw_non_ctx_global_gfx_load);
break;
#endif
default:
handled = false;
break;
}
}
*err_code = err;
return handled;
}
void nvgpu_next_netlist_deinit_ctx_vars(struct gk20a *g)
{
struct nvgpu_netlist_vars *netlist_vars = g->netlist_vars;
nvgpu_kfree(g, netlist_vars->nvgpu_next.sw_non_ctx_local_compute_load.l);
nvgpu_kfree(g, netlist_vars->nvgpu_next.sw_non_ctx_global_compute_load.l);
#ifdef CONFIG_NVGPU_GRAPHICS
nvgpu_kfree(g, netlist_vars->nvgpu_next.sw_non_ctx_local_gfx_load.l);
nvgpu_kfree(g, netlist_vars->nvgpu_next.sw_non_ctx_global_gfx_load.l);
#endif
}
#ifdef CONFIG_NVGPU_DEBUGGER
struct netlist_aiv_list *nvgpu_next_netlist_get_sys_compute_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.sys_compute;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_gpc_compute_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.gpc_compute;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_tpc_compute_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.tpc_compute;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_ppc_compute_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.ppc_compute;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_etpc_compute_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.etpc_compute;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_lts_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.lts_bc;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_sys_gfx_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.sys_gfx;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_gpc_gfx_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.gpc_gfx;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_tpc_gfx_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.tpc_gfx;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_ppc_gfx_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.ppc_gfx;
}
struct netlist_aiv_list *nvgpu_next_netlist_get_etpc_gfx_ctxsw_regs(
struct gk20a *g)
{
return &g->netlist_vars->ctxsw_regs.nvgpu_next.etpc_gfx;
}
u32 nvgpu_next_netlist_get_sys_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_next_netlist_get_sys_compute_ctxsw_regs(g)->count;
count = nvgpu_safe_add_u32(count,
nvgpu_next_netlist_get_sys_gfx_ctxsw_regs(g)->count);
return count;
}
u32 nvgpu_next_netlist_get_ppc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_next_netlist_get_ppc_compute_ctxsw_regs(g)->count;
count = nvgpu_safe_add_u32(count,
nvgpu_next_netlist_get_ppc_gfx_ctxsw_regs(g)->count);
return count;
}
u32 nvgpu_next_netlist_get_gpc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_next_netlist_get_gpc_compute_ctxsw_regs(g)->count;
count = nvgpu_safe_add_u32(count,
nvgpu_next_netlist_get_gpc_gfx_ctxsw_regs(g)->count);
return count;
}
u32 nvgpu_next_netlist_get_tpc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_next_netlist_get_tpc_compute_ctxsw_regs(g)->count;
count = nvgpu_safe_add_u32(count,
nvgpu_next_netlist_get_tpc_gfx_ctxsw_regs(g)->count);
return count;
}
u32 nvgpu_next_netlist_get_etpc_ctxsw_regs_count(struct gk20a *g)
{
u32 count = nvgpu_next_netlist_get_etpc_compute_ctxsw_regs(g)->count;
count = nvgpu_safe_add_u32(count,
nvgpu_next_netlist_get_etpc_gfx_ctxsw_regs(g)->count);
return count;
}
void nvgpu_next_netlist_print_ctxsw_reg_info(struct gk20a *g)
{
nvgpu_log_info(g, "GRCTX_REG_LIST_SYS_(COMPUTE/GRAPICS)_COUNT :%d %d",
nvgpu_next_netlist_get_sys_compute_ctxsw_regs(g)->count,
nvgpu_next_netlist_get_sys_gfx_ctxsw_regs(g)->count);
nvgpu_log_info(g, "GRCTX_REG_LIST_GPC_(COMPUTE/GRAPHICS)_COUNT :%d %d",
nvgpu_next_netlist_get_gpc_compute_ctxsw_regs(g)->count,
nvgpu_next_netlist_get_gpc_gfx_ctxsw_regs(g)->count);
nvgpu_log_info(g, "GRCTX_REG_LIST_TPC_(COMPUTE/GRAPHICS)_COUNT :%d %d",
nvgpu_next_netlist_get_tpc_compute_ctxsw_regs(g)->count,
nvgpu_next_netlist_get_tpc_gfx_ctxsw_regs(g)->count);
nvgpu_log_info(g, "GRCTX_REG_LIST_PPC_(COMPUTE/GRAHPICS)_COUNT :%d %d",
nvgpu_next_netlist_get_ppc_compute_ctxsw_regs(g)->count,
nvgpu_next_netlist_get_ppc_gfx_ctxsw_regs(g)->count);
nvgpu_log_info(g, "GRCTX_REG_LIST_ETPC_(COMPUTE/GRAPHICS)_COUNT :%d %d",
nvgpu_next_netlist_get_etpc_compute_ctxsw_regs(g)->count,
nvgpu_next_netlist_get_etpc_gfx_ctxsw_regs(g)->count);
nvgpu_log_info(g, "GRCTX_REG_LIST_LTS_BC_COUNT :%d",
nvgpu_next_netlist_get_lts_ctxsw_regs(g)->count);
}
#endif /* CONFIG_NVGPU_DEBUGGER */
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_local_compute_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->nvgpu_next.sw_non_ctx_local_compute_load;
}
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_global_compute_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->nvgpu_next.sw_non_ctx_global_compute_load;
}
#ifdef CONFIG_NVGPU_GRAPHICS
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_local_gfx_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->nvgpu_next.sw_non_ctx_local_gfx_load;
}
struct netlist_av_list *nvgpu_next_netlist_get_sw_non_ctx_global_gfx_load_av_list(
struct gk20a *g)
{
return &g->netlist_vars->nvgpu_next.sw_non_ctx_global_gfx_load;
}
#endif /* CONFIG_NVGPU_GRAPHICS */
#endif