mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Expose logical mask for MIG
1) Expose logical mask instead of physical mask when MIG is enabled. For legacy, NvGpu expose physical mask. 2) Added fb related info in struct nvgpu_gpu_instance(). 4) Added utility api to get the logical id for a given local id nvgpu_grmgr_get_gr_gpc_logical_id() 5) Added grmgr api to get max_gpc_count nvgpu_grmgr_get_max_gpc_count(). 5) Added grmgr's fbp api to get num_fbps and its enable masks. nvgpu_grmgr_get_num_fbps() nvgpu_grmgr_get_fbp_en_mask() nvgpu_grmgr_get_fbp_rop_l2_en_mask() 6) Used grmgr's fbp apis in ioctl_ctrl.c 7) Moved fbp_init_support() in nvgpu_early_init() 8) Added nvgpu_assert handling in grmgr.c 9) Added vgpu hal for get_max_gpc_count(). JIRA NVGPU-5656 Change-Id: I90ac2ad99be608001e7d5d754f6242ad26c70cdb Signed-off-by: Lakshmanan M <lm@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2538508 Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Dinesh T <dt@nvidia.com> Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com> Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
e2d8bdc38d
commit
7d473f4dcc
@@ -29,6 +29,7 @@
|
||||
#include <nvgpu/grmgr.h>
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/device.h>
|
||||
#include <nvgpu/fbp.h>
|
||||
|
||||
int nvgpu_init_gr_manager(struct gk20a *g)
|
||||
{
|
||||
@@ -45,10 +46,13 @@ int nvgpu_init_gr_manager(struct gk20a *g)
|
||||
#endif
|
||||
|
||||
/* Number of gpu instance is 1 for legacy mode */
|
||||
g->mig.max_gpc_count = g->ops.top.get_max_gpc_count(g);
|
||||
nvgpu_assert(g->mig.max_gpc_count > 0U);
|
||||
g->mig.gpc_count = g->ops.priv_ring.get_gpc_count(g);
|
||||
nvgpu_assert(g->mig.gpc_count > 0U);
|
||||
g->mig.num_gpu_instances = 1U;
|
||||
g->mig.is_nongr_engine_sharable = false;
|
||||
g->mig.max_fbps_count = nvgpu_fbp_get_max_fbps_count(g->fbp);
|
||||
|
||||
gpu_instance->gpu_instance_id = 0U;
|
||||
gpu_instance->is_memory_partition_supported = false;
|
||||
@@ -120,22 +124,34 @@ int nvgpu_init_gr_manager(struct gk20a *g)
|
||||
g->mig.recursive_ref_count = 0U;
|
||||
g->mig.cur_tid = -1;
|
||||
|
||||
gpu_instance->fbp_en_mask = nvgpu_fbp_get_fbp_en_mask(g->fbp);
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
gpu_instance->num_fbp = nvgpu_fbp_get_num_fbps(g->fbp);
|
||||
gpu_instance->fbp_rop_l2_en_mask = nvgpu_fbp_get_rop_l2_en_mask(g->fbp);
|
||||
#endif
|
||||
|
||||
g->mig.current_gr_syspipe_id = NVGPU_MIG_INVALID_GR_SYSPIPE_ID;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_mig,
|
||||
"[Physical device] gpu_instance_id[%u] gr_instance_id[%u] "
|
||||
"gr_syspipe_id[%u] num_gpc[%u] gr_engine_id[%u] "
|
||||
"max_veid_count_per_tsg[%u] veid_start_offset[%u] "
|
||||
"is_memory_partition_support[%d] num_lce[%u] ",
|
||||
"gr_syspipe_id[%u] max_gpc_count[%u] num_gpc[%u] "
|
||||
"gr_engine_id[%u] max_veid_count_per_tsg[%u] "
|
||||
"veid_start_offset[%u] is_memory_partition_support[%d] "
|
||||
"num_lce[%u] max_fbps_count[%u] num_fbp[%u] "
|
||||
"fbp_en_mask [0x%x] ",
|
||||
gpu_instance->gpu_instance_id,
|
||||
gr_syspipe->gr_instance_id,
|
||||
gr_syspipe->gr_syspipe_id,
|
||||
g->mig.max_gpc_count,
|
||||
gr_syspipe->num_gpc,
|
||||
gr_syspipe->gr_dev->engine_id,
|
||||
gr_syspipe->max_veid_count_per_tsg,
|
||||
gr_syspipe->veid_start_offset,
|
||||
gpu_instance->is_memory_partition_supported,
|
||||
gpu_instance->num_lce);
|
||||
gpu_instance->num_lce,
|
||||
g->mig.max_fbps_count,
|
||||
gpu_instance->num_fbp,
|
||||
gpu_instance->fbp_en_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -377,9 +393,37 @@ u32 nvgpu_grmgr_get_gr_gpc_phys_id(struct gk20a *g, u32 gr_instance_id,
|
||||
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
|
||||
gr_syspipe = &gpu_instance->gr_syspipe;
|
||||
|
||||
nvgpu_assert(gpc_local_id < gr_syspipe->num_gpc);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_mig,
|
||||
"gpu_instance_id[%u] gpc_local_id[%u] physical_id[%u]",
|
||||
gpu_instance_id, gpc_local_id,
|
||||
gr_syspipe->gpcs[gpc_local_id].physical_id);
|
||||
|
||||
return gr_syspipe->gpcs[gpc_local_id].physical_id;
|
||||
}
|
||||
|
||||
u32 nvgpu_grmgr_get_gr_gpc_logical_id(struct gk20a *g, u32 gr_instance_id,
|
||||
u32 gpc_local_id)
|
||||
{
|
||||
struct nvgpu_gpu_instance *gpu_instance;
|
||||
struct nvgpu_gr_syspipe *gr_syspipe;
|
||||
u32 gpu_instance_id = nvgpu_grmgr_get_gpu_instance_id(
|
||||
g, gr_instance_id);
|
||||
|
||||
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
|
||||
gr_syspipe = &gpu_instance->gr_syspipe;
|
||||
|
||||
nvgpu_assert(gpc_local_id < gr_syspipe->num_gpc);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_mig,
|
||||
"gpu_instance_id[%u] gpc_local_id[%u] logical_id[%u]",
|
||||
gpu_instance_id, gpc_local_id,
|
||||
gr_syspipe->gpcs[gpc_local_id].logical_id);
|
||||
|
||||
return gr_syspipe->gpcs[gpc_local_id].logical_id;
|
||||
}
|
||||
|
||||
u32 nvgpu_grmgr_get_gr_instance_id(struct gk20a *g, u32 gpu_instance_id)
|
||||
{
|
||||
u32 gr_instance_id = 0U;
|
||||
@@ -579,3 +623,68 @@ u32 nvgpu_grmgr_get_gr_physical_gpc_mask(struct gk20a *g, u32 gr_instance_id)
|
||||
|
||||
return physical_gpc_mask;
|
||||
}
|
||||
|
||||
u32 nvgpu_grmgr_get_num_fbps(struct gk20a *g, u32 gpu_instance_id)
|
||||
{
|
||||
struct nvgpu_gpu_instance *gpu_instance;
|
||||
|
||||
if (gpu_instance_id < g->mig.num_gpu_instances) {
|
||||
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
|
||||
|
||||
nvgpu_log(g, gpu_dbg_mig,
|
||||
"gpu_instance_id[%u] num_fbp[%u]",
|
||||
gpu_instance_id, gpu_instance->num_fbp);
|
||||
|
||||
return gpu_instance->num_fbp;
|
||||
}
|
||||
|
||||
nvgpu_err(g,
|
||||
"gpu_instance_id[%u] >= num_gpu_instances[%u]",
|
||||
gpu_instance_id, g->mig.num_gpu_instances);
|
||||
|
||||
nvgpu_assert(gpu_instance_id < g->mig.num_gpu_instances);
|
||||
|
||||
return U32_MAX;
|
||||
}
|
||||
|
||||
u32 nvgpu_grmgr_get_fbp_en_mask(struct gk20a *g, u32 gpu_instance_id)
|
||||
{
|
||||
struct nvgpu_gpu_instance *gpu_instance;
|
||||
|
||||
if (gpu_instance_id < g->mig.num_gpu_instances) {
|
||||
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
|
||||
|
||||
nvgpu_log(g, gpu_dbg_mig,
|
||||
"gpu_instance_id[%u] fbp_en_mask[0x%x]",
|
||||
gpu_instance_id, gpu_instance->fbp_en_mask);
|
||||
|
||||
return gpu_instance->fbp_en_mask;
|
||||
}
|
||||
|
||||
nvgpu_err(g,
|
||||
"gpu_instance_id[%u] >= num_gpu_instances[%u]",
|
||||
gpu_instance_id, g->mig.num_gpu_instances);
|
||||
|
||||
nvgpu_assert(gpu_instance_id < g->mig.num_gpu_instances);
|
||||
|
||||
return U32_MAX;
|
||||
}
|
||||
|
||||
u32 *nvgpu_grmgr_get_fbp_rop_l2_en_mask(struct gk20a *g, u32 gpu_instance_id)
|
||||
{
|
||||
struct nvgpu_gpu_instance *gpu_instance;
|
||||
|
||||
if (gpu_instance_id < g->mig.num_gpu_instances) {
|
||||
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
|
||||
|
||||
return gpu_instance->fbp_rop_l2_en_mask;
|
||||
}
|
||||
|
||||
nvgpu_err(g,
|
||||
"gpu_instance_id[%u] >= num_gpu_instances[%u]",
|
||||
gpu_instance_id, g->mig.num_gpu_instances);
|
||||
|
||||
nvgpu_assert(gpu_instance_id < g->mig.num_gpu_instances);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user