gpu: nvgpu: add gr manager ops-2 and mig infra-2

This CL covers the code changes related to following support,
 - Enabled gr manager ops.
 - Added gr manager init/remove support.
 - Refactor in gpu instance config infra.
 - Refactor in gr syspipe gpcs config infra.

JIRA NVGPU-5645
JIRA NVGPU-5646

Change-Id: Ib2fab2796d76fe105fc5a08f2c5f9bfa36317f7c
Signed-off-by: Lakshmanan M <lm@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2393550
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Lakshmanan M
2020-08-03 14:13:54 +05:30
committed by Alex Waterman
parent 3245d48736
commit 2a6fcec078
16 changed files with 145 additions and 44 deletions

View File

@@ -148,7 +148,7 @@ static void gr_config_set_gpc_mask(struct gk20a *g,
{ {
#ifdef CONFIG_NVGPU_DGPU #ifdef CONFIG_NVGPU_DGPU
if (g->ops.gr.config.get_gpc_mask != NULL) { if (g->ops.gr.config.get_gpc_mask != NULL) {
config->gpc_mask = g->ops.gr.config.get_gpc_mask(g, config); config->gpc_mask = g->ops.gr.config.get_gpc_mask(g);
} else } else
#endif #endif
{ {

View File

@@ -27,16 +27,15 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/grmgr.h> #include <nvgpu/grmgr.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_utils.h>
int nvgpu_init_gr_manager(struct gk20a *g) int nvgpu_init_gr_manager(struct gk20a *g)
{ {
u32 gpc_id;
struct nvgpu_gpu_instance *gpu_instance = &g->mig.gpu_instance[0]; struct nvgpu_gpu_instance *gpu_instance = &g->mig.gpu_instance[0];
struct nvgpu_gr_syspipe *gr_syspipe = &gpu_instance->gr_syspipe; struct nvgpu_gr_syspipe *gr_syspipe = &gpu_instance->gr_syspipe;
struct nvgpu_gr_config *gr_config = nvgpu_gr_get_config_ptr(g);
/* Number of gpu instance is 1 for legacy mode */ /* Number of gpu instance is 1 for legacy mode */
g->mig.gpc_count = g->ops.priv_ring.get_gpc_count(g);
g->mig.num_gpu_instances = 1U; g->mig.num_gpu_instances = 1U;
g->mig.current_gpu_instance_config_id = 0U; g->mig.current_gpu_instance_config_id = 0U;
g->mig.is_nongr_engine_sharable = false; g->mig.is_nongr_engine_sharable = false;
@@ -47,12 +46,21 @@ int nvgpu_init_gr_manager(struct gk20a *g)
gr_syspipe->gr_instance_id = 0U; gr_syspipe->gr_instance_id = 0U;
gr_syspipe->gr_syspipe_id = 0U; gr_syspipe->gr_syspipe_id = 0U;
gr_syspipe->engine_id = 0U; gr_syspipe->engine_id = 0U;
gr_syspipe->num_gpc = nvgpu_gr_config_get_gpc_count(gr_config); gr_syspipe->num_gpc = g->mig.gpc_count;
g->mig.gpcgrp_gpc_count[0] = gr_syspipe->num_gpc; g->mig.gpcgrp_gpc_count[0] = gr_syspipe->num_gpc;
gr_syspipe->logical_gpc_mask = nvgpu_gr_config_get_gpc_mask(gr_config); if (g->ops.gr.config.get_gpc_mask != NULL) {
gr_syspipe->gpc_mask = g->ops.gr.config.get_gpc_mask(g);
} else {
gr_syspipe->gpc_mask = nvgpu_safe_sub_u32(
BIT32(gr_syspipe->num_gpc),
1U);
}
/* In Legacy mode, Local GPC Id = physical GPC Id = Logical GPC Id */ /* In Legacy mode, Local GPC Id = physical GPC Id = Logical GPC Id */
gr_syspipe->gpc_mask = gr_syspipe->logical_gpc_mask; for (gpc_id = 0U; gpc_id < gr_syspipe->num_gpc; gpc_id++) {
gr_syspipe->physical_gpc_mask = gr_syspipe->gpc_mask; gr_syspipe->gpcs[gpc_id].logical_id =
gr_syspipe->gpcs[gpc_id].physical_id = gpc_id;
gr_syspipe->gpcs[gpc_id].gpcgrp_id = 0U;
}
gr_syspipe->max_veid_count_per_tsg = g->fifo.max_subctx_count; gr_syspipe->max_veid_count_per_tsg = g->fifo.max_subctx_count;
gr_syspipe->veid_start_offset = 0U; gr_syspipe->veid_start_offset = 0U;
@@ -61,8 +69,14 @@ int nvgpu_init_gr_manager(struct gk20a *g)
NVGPU_MIG_MAX_ENGINES, NVGPU_ENGINE_ASYNC_CE); NVGPU_MIG_MAX_ENGINES, NVGPU_ENGINE_ASYNC_CE);
if (gpu_instance->num_lce == 0U) { if (gpu_instance->num_lce == 0U) {
nvgpu_err(g, "nvgpu_init_gr_manager[failed]-no LCEs"); /* Fall back to GRCE */
return -ENOMEM; gpu_instance->num_lce = nvgpu_engine_get_ids(g,
gpu_instance->lce_engine_ids,
NVGPU_MIG_MAX_ENGINES, NVGPU_ENGINE_GRCE);
if (gpu_instance->num_lce == 0U) {
nvgpu_warn(g,
"No GRCE engine available on this device!");
}
} }
g->mig.max_gr_sys_pipes_supported = 1U; g->mig.max_gr_sys_pipes_supported = 1U;
@@ -73,24 +87,16 @@ int nvgpu_init_gr_manager(struct gk20a *g)
nvgpu_log(g, gpu_dbg_mig, nvgpu_log(g, gpu_dbg_mig,
"[non MIG boot] gpu_instance_id[%u] gr_instance_id[%u] " "[non MIG boot] gpu_instance_id[%u] gr_instance_id[%u] "
"gr_syspipe_id[%u] num_gpc[%u] physical_gpc_mask[%x] " "gr_syspipe_id[%u] num_gpc[%u] gr_engine_id[%u] "
"logical_gpc_mask[%x] gr_engine_id[%u] "
"max_veid_count_per_tsg[%u] veid_start_offset[%u] " "max_veid_count_per_tsg[%u] veid_start_offset[%u] "
"veid_end_offset[%u] gpcgrp_id[%u] "
"is_memory_partition_support[%d] num_lce[%u] ", "is_memory_partition_support[%d] num_lce[%u] ",
gpu_instance->gpu_instance_id, gpu_instance->gpu_instance_id,
gr_syspipe->gr_instance_id, gr_syspipe->gr_instance_id,
gr_syspipe->gr_syspipe_id, gr_syspipe->gr_syspipe_id,
gr_syspipe->num_gpc, gr_syspipe->num_gpc,
gr_syspipe->physical_gpc_mask,
gr_syspipe->logical_gpc_mask,
gr_syspipe->engine_id, gr_syspipe->engine_id,
gr_syspipe->max_veid_count_per_tsg, gr_syspipe->max_veid_count_per_tsg,
gr_syspipe->veid_start_offset, gr_syspipe->veid_start_offset,
nvgpu_safe_sub_u32(
nvgpu_safe_add_u32(gr_syspipe->veid_start_offset,
gr_syspipe->max_veid_count_per_tsg), 1U),
gr_syspipe->gpcgrp_id,
gpu_instance->is_memory_partition_supported, gpu_instance->is_memory_partition_supported,
gpu_instance->num_lce); gpu_instance->num_lce);
@@ -120,6 +126,14 @@ int nvgpu_grmgr_config_gr_remap_window(struct gk20a *g,
gr_syspipe_id = 0U; gr_syspipe_id = 0U;
} }
nvgpu_log(g, gpu_dbg_mig,
"nvgpu_grmgr_config_gr_remap_window "
"current_gr_syspipe_id[%u] requested_gr_syspipe_id[%u] "
"enable[%d] ",
g->mig.current_gr_syspipe_id,
gr_syspipe_id,
enable);
if (((g->mig.current_gr_syspipe_id != gr_syspipe_id) && if (((g->mig.current_gr_syspipe_id != gr_syspipe_id) &&
(gr_syspipe_id < (gr_syspipe_id <
g->ops.grmgr.get_max_sys_pipes(g))) || g->ops.grmgr.get_max_sys_pipes(g))) ||

View File

@@ -305,6 +305,13 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
if (tmp_ret != 0) { if (tmp_ret != 0) {
ret = tmp_ret; ret = tmp_ret;
} }
if (g->ops.grmgr.remove_gr_manager != NULL) {
tmp_ret = g->ops.grmgr.remove_gr_manager(g);
if (tmp_ret != 0) {
nvgpu_err(g, "g->ops.grmgr.remove_gr_manager-failed");
ret = tmp_ret;
}
}
tmp_ret = g->ops.mm.mm_suspend(g); tmp_ret = g->ops.mm.mm_suspend(g);
if (tmp_ret != 0) { if (tmp_ret != 0) {
ret = tmp_ret; ret = tmp_ret;
@@ -640,6 +647,7 @@ int nvgpu_finalize_poweron(struct gk20a *g)
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_acquire_tpc_pg_lock, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_acquire_tpc_pg_lock, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_power_gate_gr, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_power_gate_gr, NO_FLAG),
#endif #endif
NVGPU_INIT_TABLE_ENTRY(g->ops.grmgr.init_gr_manager, NO_FLAG),
/* prepare portion of sw required for enable hw */ /* prepare portion of sw required for enable hw */
NVGPU_INIT_TABLE_ENTRY(g->ops.gr.gr_prepare_sw, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.gr.gr_prepare_sw, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.gr.gr_enable_hw, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.gr.gr_enable_hw, NO_FLAG),

View File

@@ -1525,6 +1525,20 @@ int vgpu_gr_set_preemption_mode(struct nvgpu_channel *ch,
return err; return err;
} }
u32 vgpu_gr_get_gpc_count(struct gk20a *g)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
return priv->constants.gpc_count;
}
u32 vgpu_gr_get_gpc_mask(struct gk20a *g)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
return priv->constants.gpc_mask;
}
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
u64 vgpu_gr_gk20a_tpc_enabled_exceptions(struct gk20a *g) u64 vgpu_gr_gk20a_tpc_enabled_exceptions(struct gk20a *g)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -92,6 +92,8 @@ int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);
void vgpu_gr_handle_sm_esr_event(struct gk20a *g, void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
struct tegra_vgpu_sm_esr_info *info); struct tegra_vgpu_sm_esr_info *info);
int vgpu_init_gr_support(struct gk20a *g); int vgpu_init_gr_support(struct gk20a *g);
u32 vgpu_gr_get_gpc_count(struct gk20a *g);
u32 vgpu_gr_get_gpc_mask(struct gk20a *g);
#ifdef CONFIG_NVGPU_DEBUGGER #ifdef CONFIG_NVGPU_DEBUGGER
u64 vgpu_gr_gk20a_tpc_enabled_exceptions(struct gk20a *g); u64 vgpu_gr_gk20a_tpc_enabled_exceptions(struct gk20a *g);
int vgpu_gr_set_mmu_debug_mode(struct gk20a *g, int vgpu_gr_set_mmu_debug_mode(struct gk20a *g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -74,6 +74,12 @@ void vgpu_remove_support_common(struct gk20a *g)
nvgpu_gr_remove_support(g); nvgpu_gr_remove_support(g);
if (g->ops.grmgr.remove_gr_manager != NULL) {
if (g->ops.grmgr.remove_gr_manager(g) != 0) {
nvgpu_err(g, "g->ops.grmgr.remove_gr_manager-failed");
}
}
if (g->fifo.remove_support) { if (g->fifo.remove_support) {
g->fifo.remove_support(&g->fifo); g->fifo.remove_support(&g->fifo);
} }
@@ -203,6 +209,12 @@ int vgpu_finalize_poweron_common(struct gk20a *g)
return err; return err;
} }
err = g->ops.grmgr.init_gr_manager(g);
if (err != 0) {
nvgpu_err(g, "failed to init gk20a grmgr");
return err;
}
err = vgpu_init_gr_support(g); err = vgpu_init_gr_support(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init gk20a gr"); nvgpu_err(g, "failed to init gk20a gr");

View File

@@ -35,8 +35,7 @@ u32 gm20b_gr_config_get_tpc_count_in_gpc(struct gk20a *g,
u32 gm20b_gr_config_get_pes_tpc_mask(struct gk20a *g, u32 gm20b_gr_config_get_pes_tpc_mask(struct gk20a *g,
struct nvgpu_gr_config *config, u32 gpc_index, u32 pes_index); struct nvgpu_gr_config *config, u32 gpc_index, u32 pes_index);
u32 gm20b_gr_config_get_pd_dist_skip_table_size(void); u32 gm20b_gr_config_get_pd_dist_skip_table_size(void);
u32 gm20b_gr_config_get_gpc_mask(struct gk20a *g, u32 gm20b_gr_config_get_gpc_mask(struct gk20a *g);
struct nvgpu_gr_config *config);
#if defined(CONFIG_NVGPU_HAL_NON_FUSA) #if defined(CONFIG_NVGPU_HAL_NON_FUSA)
int gm20b_gr_config_init_sm_id_table(struct gk20a *g, int gm20b_gr_config_init_sm_id_table(struct gk20a *g,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);

View File

@@ -73,11 +73,10 @@ u32 gm20b_gr_config_get_pd_dist_skip_table_size(void)
return gr_pd_dist_skip_table__size_1_v(); return gr_pd_dist_skip_table__size_1_v();
} }
u32 gm20b_gr_config_get_gpc_mask(struct gk20a *g, u32 gm20b_gr_config_get_gpc_mask(struct gk20a *g)
struct nvgpu_gr_config *config)
{ {
u32 val; u32 val;
u32 tpc_cnt = nvgpu_gr_config_get_max_gpc_count(config); u32 tpc_cnt = g->ops.top.get_max_gpc_count(g);
/* /*
* For register NV_FUSE_STATUS_OPT_GPC a set bit with index i indicates * For register NV_FUSE_STATUS_OPT_GPC a set bit with index i indicates

View File

@@ -1217,6 +1217,8 @@ int gm20b_init_hal(struct gk20a *g)
gops->top = gm20b_ops.top; gops->top = gm20b_ops.top;
gops->grmgr = gm20b_ops.grmgr;
/* Lone functions */ /* Lone functions */
gops->chip_init_gpu_characteristics = gops->chip_init_gpu_characteristics =
gm20b_ops.chip_init_gpu_characteristics; gm20b_ops.chip_init_gpu_characteristics;

View File

@@ -1313,6 +1313,7 @@ int gp10b_init_hal(struct gk20a *g)
gops->fuse = gp10b_ops.fuse; gops->fuse = gp10b_ops.fuse;
gops->tpc = gp10b_ops.tpc; gops->tpc = gp10b_ops.tpc;
gops->top = gp10b_ops.top; gops->top = gp10b_ops.top;
gops->grmgr = gp10b_ops.grmgr;
/* Lone Functions */ /* Lone Functions */
gops->chip_init_gpu_characteristics = gops->chip_init_gpu_characteristics =

View File

@@ -1570,6 +1570,7 @@ int gv11b_init_hal(struct gk20a *g)
gops->clk_arb = gv11b_ops.clk_arb; gops->clk_arb = gv11b_ops.clk_arb;
#endif #endif
gops->top = gv11b_ops.top; gops->top = gv11b_ops.top;
gops->grmgr = gv11b_ops.grmgr;
/* Lone functions */ /* Lone functions */
gops->chip_init_gpu_characteristics = gops->chip_init_gpu_characteristics =

View File

@@ -1707,6 +1707,7 @@ int tu104_init_hal(struct gk20a *g)
#endif #endif
gops->gsp = tu104_ops.gsp; gops->gsp = tu104_ops.gsp;
gops->top = tu104_ops.top; gops->top = tu104_ops.top;
gops->grmgr = tu104_ops.grmgr;
/* clocks */ /* clocks */
gops->clk.init_clk_support = tu104_ops.clk.init_clk_support; gops->clk.init_clk_support = tu104_ops.clk.init_clk_support;

View File

@@ -305,6 +305,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
#endif #endif
}, },
.config = { .config = {
.get_gpc_mask = vgpu_gr_get_gpc_mask,
.get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask, .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
.init_sm_id_table = vgpu_gr_init_sm_id_table, .init_sm_id_table = vgpu_gr_init_sm_id_table,
}, },
@@ -836,6 +837,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.isr = NULL, .isr = NULL,
.set_ppriv_timeout_settings = NULL, .set_ppriv_timeout_settings = NULL,
.enum_ltc = NULL, .enum_ltc = NULL,
.get_gpc_count = vgpu_gr_get_gpc_count,
}, },
.fuse = { .fuse = {
.check_priv_security = NULL, .check_priv_security = NULL,
@@ -928,6 +930,7 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
gops->fuse = vgpu_gp10b_ops.fuse; gops->fuse = vgpu_gp10b_ops.fuse;
gops->top = vgpu_gp10b_ops.top; gops->top = vgpu_gp10b_ops.top;
gops->grmgr = vgpu_gp10b_ops.grmgr;
#ifdef CONFIG_NVGPU_FECS_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);

View File

@@ -380,6 +380,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
#endif #endif
}, },
.config = { .config = {
.get_gpc_mask = vgpu_gr_get_gpc_mask,
.get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask, .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
.init_sm_id_table = vgpu_gr_init_sm_id_table, .init_sm_id_table = vgpu_gr_init_sm_id_table,
}, },
@@ -962,6 +963,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.isr = NULL, .isr = NULL,
.set_ppriv_timeout_settings = NULL, .set_ppriv_timeout_settings = NULL,
.enum_ltc = NULL, .enum_ltc = NULL,
.get_gpc_count = vgpu_gr_get_gpc_count,
}, },
.fuse = { .fuse = {
.is_opt_ecc_enable = NULL, .is_opt_ecc_enable = NULL,
@@ -1048,6 +1050,7 @@ int vgpu_gv11b_init_hal(struct gk20a *g)
gops->priv_ring = vgpu_gv11b_ops.priv_ring; gops->priv_ring = vgpu_gv11b_ops.priv_ring;
gops->fuse = vgpu_gv11b_ops.fuse; gops->fuse = vgpu_gv11b_ops.fuse;
gops->top = vgpu_gv11b_ops.top; gops->top = vgpu_gv11b_ops.top;
gops->grmgr = vgpu_gv11b_ops.grmgr;
#ifdef CONFIG_NVGPU_FECS_TRACE #ifdef CONFIG_NVGPU_FECS_TRACE
nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_FECS_CTXSW_TRACE, true);

View File

@@ -780,8 +780,7 @@ struct gops_gr_config {
u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 (*get_gpc_tpc_mask)(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_config *config,
u32 gpc_index); u32 gpc_index);
u32 (*get_gpc_mask)(struct gk20a *g, u32 (*get_gpc_mask)(struct gk20a *g);
struct nvgpu_gr_config *config);
u32 (*get_tpc_count_in_gpc)(struct gk20a *g, u32 (*get_tpc_count_in_gpc)(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_config *config,
u32 gpc_index); u32 gpc_index);

View File

@@ -44,6 +44,22 @@
/** Maximum config name size. */ /** Maximum config name size. */
#define NVGPU_MIG_MAX_CONFIG_NAME_SIZE 256U #define NVGPU_MIG_MAX_CONFIG_NAME_SIZE 256U
/** Maximum number of GPC count. */
#define NVGPU_MIG_MAX_GPCS 32U
/**
* @brief GPC Id information.
* This struct describes the logical, physical and gpcgrp id of each GPC.
*/
struct nvgpu_gpc {
/** Logical GPC Id which is used to access GPC registers. */
u32 logical_id;
/** Physical GPC Id. */
u32 physical_id;
/** GPC group Id. */
u32 gpcgrp_id;
};
/** /**
* @brief GR syspipe information. * @brief GR syspipe information.
* This struct describes the number of gpc, physical_gpc_mask, veid, etc * This struct describes the number of gpc, physical_gpc_mask, veid, etc
@@ -60,16 +76,10 @@ struct nvgpu_gr_syspipe {
u32 engine_id; u32 engine_id;
/** Number of GPC assigned to this gr syspipe. */ /** Number of GPC assigned to this gr syspipe. */
u32 num_gpc; u32 num_gpc;
/**
* Mask of Physical GPCs. A set bit indicates GPC is available, /** GPC Id information (logical, physical and gpcgrp Ids). */
* otherwise it is not available. struct nvgpu_gpc gpcs[NVGPU_MIG_MAX_GPCS];
*/
u32 physical_gpc_mask;
/**
* Mask of Logical GPCs. A set bit indicates GPC is available,
* otherwise it is not available.
*/
u32 logical_gpc_mask;
/** /**
* Mask of local GPCs belongs to this syspipe. A set bit indicates * Mask of local GPCs belongs to this syspipe. A set bit indicates
* GPC is available, otherwise it is not available. * GPC is available, otherwise it is not available.
@@ -79,8 +89,6 @@ struct nvgpu_gr_syspipe {
u32 max_veid_count_per_tsg; u32 max_veid_count_per_tsg;
/** VEID start offset. */ /** VEID start offset. */
u32 veid_start_offset; u32 veid_start_offset;
/** GPC group Id. */
u32 gpcgrp_id;
}; };
/** /**
@@ -101,6 +109,24 @@ struct nvgpu_gpu_instance {
bool is_memory_partition_supported; bool is_memory_partition_supported;
}; };
/**
* @brief GPU instance static configuration information.
* This struct describes the gpu_instance_id, number of gpc, gr_syspipe_id,
* veid, etc associated to a particualr static congig.
*/
struct nvgpu_gpu_instance_static_config {
/** GPU instance Id */
u32 gpu_instance_id;
/** GR syspipe id which is used to set gr remap window */
u32 gr_syspipe_id;
/** Number of GPC assigned to this config. */
u32 num_gpc;
/** Maximum veid allocated to this gr syspipe. */
u32 max_veid_count_per_tsg;
/** VEID start offset. */
u32 veid_start_offset;
};
/** /**
* @brief GPU instance configuration information. * @brief GPU instance configuration information.
* This struct describes the number of gpu instances, gr_syspipe, LCEs, etc * This struct describes the number of gpu instances, gr_syspipe, LCEs, etc
@@ -108,12 +134,15 @@ struct nvgpu_gpu_instance {
*/ */
struct nvgpu_gpu_instance_config { struct nvgpu_gpu_instance_config {
/** Name of the gpu instance config. */ /** Name of the gpu instance config. */
const char config_name[NVGPU_MIG_MAX_CONFIG_NAME_SIZE]; char config_name[NVGPU_MIG_MAX_CONFIG_NAME_SIZE];
/** Number of gpu instance associated to this config. */ /** Number of gpu instance associated to this config. */
u32 num_gpu_instances; u32 num_gpu_instances;
/** Array of gpu instance information associated to this config. */ /**
struct nvgpu_gpu_instance * Array of gpu instance static config information associated
gpu_instance[NVGPU_MIG_MAX_GPU_INSTANCES]; * to this config (gpu_instance_id, gr_syspipe_id, num_gpc, etc).
*/
struct nvgpu_gpu_instance_static_config
gpu_instance_static_config[NVGPU_MIG_MAX_GPU_INSTANCES];
}; };
/** /**
@@ -122,8 +151,14 @@ struct nvgpu_gpu_instance_config {
* supported by a particual GPU. * supported by a particual GPU.
*/ */
struct nvgpu_mig_gpu_instance_config { struct nvgpu_mig_gpu_instance_config {
/** Total Number of GR syspipe is supported by HW after floor swept. */
u32 usable_gr_syspipe_count;
/** Usable GR sys pipe mask. */
u32 usable_gr_syspipe_mask;
/** Number of gpu instance configurations. */ /** Number of gpu instance configurations. */
u32 num_config_supported; u32 num_config_supported;
/** Total Number of GPCs (priv_ring enumerated (floor swept) value). */
u32 gpc_count;
/** GPC count associated to each GPC group. */ /** GPC count associated to each GPC group. */
u32 gpcgrp_gpc_count[NVGPU_MIG_MAX_GPCGRP]; u32 gpcgrp_gpc_count[NVGPU_MIG_MAX_GPCGRP];
/** Array of gpu instance configuration information. */ /** Array of gpu instance configuration information. */
@@ -137,6 +172,14 @@ struct nvgpu_mig_gpu_instance_config {
* by a particual GPU. * by a particual GPU.
*/ */
struct nvgpu_mig { struct nvgpu_mig {
/** Total Number of GR syspipe is supported by HW after floor swept. */
u32 usable_gr_syspipe_count;
/** Usable GR sys pipe mask. */
u32 usable_gr_syspipe_mask;
/** Array of usable GR sys pipe instance id. */
u32 usable_gr_syspipe_instance_id[NVGPU_MIG_MAX_ENGINES];
/** Total Number of GPCs (priv_ring enumerated (floor swept) value). */
u32 gpc_count;
/** GPC count associated to each GPC group. */ /** GPC count associated to each GPC group. */
u32 gpcgrp_gpc_count[NVGPU_MIG_MAX_GPCGRP]; u32 gpcgrp_gpc_count[NVGPU_MIG_MAX_GPCGRP];
/** Enabled gpu instances count. */ /** Enabled gpu instances count. */