gpu: nvgpu: add FBP index conversion infra for MIG

Add a mapping between local ids and logical ids for FBPs.
This is enabled to support conversion for FBP local ids to
logical ids when memory partition is enabled for SMC.

Bug 200712091

Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Change-Id: Iba33327a98bf427b21f37cbf7f2d5ee5619e7ae5
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2651964
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Debarshi Dutta
2022-01-10 12:23:02 +05:30
committed by mobile promotions
parent 01dccf163d
commit 10c3c0ddbb
5 changed files with 99 additions and 3 deletions

View File

@@ -758,6 +758,65 @@ u32 nvgpu_grmgr_get_fbp_en_mask(struct gk20a *g, u32 gpu_instance_id)
return U32_MAX; return U32_MAX;
} }
u32 nvgpu_grmgr_get_fbp_logical_id(struct gk20a *g, u32 gr_instance_id,
u32 fbp_local_id)
{
struct nvgpu_gpu_instance *gpu_instance;
u32 gpu_instance_id = nvgpu_grmgr_get_gpu_instance_id(
g, gr_instance_id);
if (gpu_instance_id >= g->mig.num_gpu_instances) {
nvgpu_err(g,
"gpu_instance_id[%u] >= g->mig.num_gpu_instances[%u]",
fbp_local_id, g->mig.num_gpu_instances);
nvgpu_assert(gpu_instance_id >= g->mig.num_gpu_instances);
return U32_MAX;
}
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
if (fbp_local_id < gpu_instance->num_fbp) {
nvgpu_log(g, gpu_dbg_mig,
"gpu_instance_id[%u], fbp_local_id[%u], fbp_physical_id[%u]",
gpu_instance->gpu_instance_id, fbp_local_id,
gpu_instance->fbp_mappings[fbp_local_id]);
return gpu_instance->fbp_mappings[fbp_local_id];
} else {
nvgpu_err(g,
"fbp_local_id[%u] >= gpu_instance->num_fbp[%u]",
fbp_local_id, gpu_instance->num_fbp);
nvgpu_assert(fbp_local_id >= gpu_instance->num_fbp);
return U32_MAX;
}
}
bool nvgpu_grmgr_get_memory_partition_support_status(struct gk20a *g,
u32 gr_instance_id)
{
struct nvgpu_gpu_instance *gpu_instance;
u32 gpu_instance_id = nvgpu_grmgr_get_gpu_instance_id(
g, gr_instance_id);
if (gpu_instance_id >= g->mig.num_gpu_instances) {
nvgpu_err(g,
"gpu_instance_id[%u] >= g->mig.num_gpu_instances[%u]",
gpu_instance_id, g->mig.num_gpu_instances);
nvgpu_assert(gpu_instance_id >= g->mig.num_gpu_instances);
return false;
}
gpu_instance = &g->mig.gpu_instance[gpu_instance_id];
return gpu_instance->is_memory_partition_supported;
}
u32 *nvgpu_grmgr_get_fbp_l2_en_mask(struct gk20a *g, u32 gpu_instance_id) u32 *nvgpu_grmgr_get_fbp_l2_en_mask(struct gk20a *g, u32 gpu_instance_id)
{ {
struct nvgpu_gpu_instance *gpu_instance; struct nvgpu_gpu_instance *gpu_instance;

View File

@@ -223,8 +223,9 @@ static int calculate_new_offsets_for_perf_fbp_chiplets(struct gk20a *g,
struct nvgpu_dbg_reg_op *op, u32 reg_chiplet_base, u32 chiplet_offset) struct nvgpu_dbg_reg_op *op, u32 reg_chiplet_base, u32 chiplet_offset)
{ {
int ret = 0; int ret = 0;
u32 fbp_local_index; u32 fbp_local_index, fbp_logical_index;
u32 gr_instance_id; u32 gr_instance_id;
u32 new_offset;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
@@ -242,13 +243,30 @@ static int calculate_new_offsets_for_perf_fbp_chiplets(struct gk20a *g,
* Obtain new offset. * Obtain new offset.
*/ */
/* At present, FBP indexes doesn't need conversion */
if (fbp_local_index >= nvgpu_grmgr_get_gr_num_fbps(g, gr_instance_id)) { if (fbp_local_index >= nvgpu_grmgr_get_gr_num_fbps(g, gr_instance_id)) {
ret = -EINVAL; ret = -EINVAL;
nvgpu_err(g, "Invalid FBP Index"); nvgpu_err(g, "Invalid FBP Index");
return ret; return ret;
} }
/* At present, FBP indexes doesn't need conversion */
if (nvgpu_grmgr_get_memory_partition_support_status(g, gr_instance_id)) {
fbp_logical_index = nvgpu_grmgr_get_fbp_logical_id(g,
gr_instance_id, fbp_local_index);
new_offset = nvgpu_safe_sub_u32(op->offset,
nvgpu_safe_mult_u32(fbp_local_index, chiplet_offset));
new_offset = nvgpu_safe_add_u32(new_offset,
nvgpu_safe_mult_u32(fbp_logical_index, chiplet_offset));
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
"old offset: 0x%08x, new offset = 0x%08x, Local index = %u, logical index = %u",
op->offset, new_offset, fbp_local_index, fbp_logical_index);
op->offset = new_offset;
}
return 0; return 0;
} }

View File

@@ -574,11 +574,18 @@ static int ga10b_grmgr_get_gpu_instance(struct gk20a *g,
} }
if (gpu_instance[index].is_memory_partition_supported == false) { if (gpu_instance[index].is_memory_partition_supported == false) {
u32 tmp_fbp_index = 0;
gpu_instance[index].num_fbp = g->mig.gpu_instance[0].num_fbp; gpu_instance[index].num_fbp = g->mig.gpu_instance[0].num_fbp;
gpu_instance[index].fbp_en_mask = g->mig.gpu_instance[0].fbp_en_mask; gpu_instance[index].fbp_en_mask = g->mig.gpu_instance[0].fbp_en_mask;
nvgpu_memcpy((u8 *)gpu_instance[index].fbp_l2_en_mask, nvgpu_memcpy((u8 *)gpu_instance[index].fbp_l2_en_mask,
(u8 *)g->mig.gpu_instance[0].fbp_l2_en_mask, (u8 *)g->mig.gpu_instance[0].fbp_l2_en_mask,
nvgpu_safe_mult_u64(max_fbps_count, sizeof(u32))); nvgpu_safe_mult_u64(max_fbps_count, sizeof(u32)));
while (tmp_fbp_index < gpu_instance[index].num_fbp) {
gpu_instance[index].fbp_mappings[tmp_fbp_index] = tmp_fbp_index;
tmp_fbp_index = nvgpu_safe_add_u32(tmp_fbp_index, 1U);
}
} else { } else {
/* SMC Memory partition is not yet supported */ /* SMC Memory partition is not yet supported */
nvgpu_assert( nvgpu_assert(

View File

@@ -67,7 +67,11 @@ u32 nvgpu_grmgr_get_gr_logical_gpc_mask(struct gk20a *g, u32 gr_instance_id);
u32 nvgpu_grmgr_get_gr_physical_gpc_mask(struct gk20a *g, u32 gr_instance_id); u32 nvgpu_grmgr_get_gr_physical_gpc_mask(struct gk20a *g, u32 gr_instance_id);
u32 nvgpu_grmgr_get_num_fbps(struct gk20a *g, u32 gpu_instance_id); u32 nvgpu_grmgr_get_num_fbps(struct gk20a *g, u32 gpu_instance_id);
u32 nvgpu_grmgr_get_fbp_en_mask(struct gk20a *g, u32 gpu_instance_id); u32 nvgpu_grmgr_get_fbp_en_mask(struct gk20a *g, u32 gpu_instance_id);
u32 nvgpu_grmgr_get_fbp_logical_id(struct gk20a *g, u32 gr_instance_id,
u32 fbp_local_id);
u32 *nvgpu_grmgr_get_fbp_l2_en_mask(struct gk20a *g, u32 gpu_instance_id); u32 *nvgpu_grmgr_get_fbp_l2_en_mask(struct gk20a *g, u32 gpu_instance_id);
bool nvgpu_grmgr_get_memory_partition_support_status(struct gk20a *g,
u32 gr_instance_id);
static inline bool nvgpu_grmgr_is_mig_type_gpu_instance( static inline bool nvgpu_grmgr_is_mig_type_gpu_instance(
struct nvgpu_gpu_instance *gpu_instance) struct nvgpu_gpu_instance *gpu_instance)

View File

@@ -47,6 +47,9 @@
/** Maximum number of GPC count. */ /** Maximum number of GPC count. */
#define NVGPU_MIG_MAX_GPCS 32U #define NVGPU_MIG_MAX_GPCS 32U
/** Maximum number of FBP count. */
#define NVGPU_MIG_MAX_FBPS 12U
/** Enumerated type used to identify various gpu instance types */ /** Enumerated type used to identify various gpu instance types */
enum nvgpu_mig_gpu_instance_type { enum nvgpu_mig_gpu_instance_type {
NVGPU_MIG_TYPE_PHYSICAL = 0, NVGPU_MIG_TYPE_PHYSICAL = 0,
@@ -117,7 +120,7 @@ struct nvgpu_gpu_instance {
* it is not available. * it is not available.
* For Legacy and MIG, it currently represents physical FBP mask. * For Legacy and MIG, it currently represents physical FBP mask.
* [TODO]: When SMC memory partition will be enabled, a mapping should * [TODO]: When SMC memory partition will be enabled, a mapping should
* be created for local to {logical, physical}. * be created for local to physical.
*/ */
u32 fbp_en_mask; u32 fbp_en_mask;
/** /**
@@ -127,6 +130,11 @@ struct nvgpu_gpu_instance {
* be created for local to {logical, physical}. * be created for local to {logical, physical}.
*/ */
u32 *fbp_l2_en_mask; u32 *fbp_l2_en_mask;
/* Array to hold the logical Ids of the fbp corresponding to the local Ids
*/
u32 fbp_mappings[NVGPU_MIG_MAX_FBPS];
/** Memory area to store h/w CE engine ids. */ /** Memory area to store h/w CE engine ids. */
const struct nvgpu_device *lce_devs[NVGPU_MIG_MAX_ENGINES]; const struct nvgpu_device *lce_devs[NVGPU_MIG_MAX_ENGINES];
/* Flag to indicate whether memory partition is supported or not. */ /* Flag to indicate whether memory partition is supported or not. */