gpu: nvgpu: add macros to get current GR instance

Add macros to get current GR instance id and the pointer
nvgpu_gr_get_cur_instance_ptr()
nvgpu_gr_get_cur_instance_id()

This approach makes sure that the caller is getting GR instance pointer
under mutex g->mig.gr_syspipe_lock in MIG mode. Trying to access
current GR instance outside of this lock in MIG mode dumps a warning.

Return 0th instance in case MIG mode is disabled.

Use these macros in nvgpu instead of direct reference to
g->mig.cur_gr_instance.

Store instance id in struct nvgpu_gr. This is to retrieve GR instance
id in functions where struct nvgpu_gr pointer is already available.

Jira NVGPU-5648

Change-Id: Ibfef6a22371bfdccfdc2a7d636b0a3e8d0eff6d9
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2413140
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2020-09-11 16:50:08 +05:30
committed by Alex Waterman
parent db20451d0d
commit ebb66b5d50
5 changed files with 49 additions and 11 deletions

View File

@@ -531,7 +531,8 @@ static int gr_init_prepare_hw_impl(struct gk20a *g)
u32 i;
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "Prepare GR%u HW", g->mig.cur_gr_instance);
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "Prepare GR%u HW",
nvgpu_gr_get_cur_instance_id(g));
/** Enable interrupts */
g->ops.gr.intr.enable_interrupts(g, true);
@@ -586,12 +587,13 @@ static int gr_init_prepare_hw(struct gk20a *g)
static int gr_reset_engine(struct gk20a *g)
{
u32 cur_gr_instance_id = nvgpu_gr_get_cur_instance_id(g);
int err;
const struct nvgpu_device *dev =
nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS,
nvgpu_gr_get_syspipe_id(g, g->mig.cur_gr_instance));
nvgpu_log(g, gpu_dbg_gr, "Reset GR%u", g->mig.cur_gr_instance);
nvgpu_log(g, gpu_dbg_gr, "Reset GR%u", cur_gr_instance_id);
/* Reset GR engine: Disable then enable GR engine */
err = g->ops.mc.enable_dev(g, dev, false);
@@ -768,10 +770,10 @@ static int gr_init_ctxsw_falcon_support(struct gk20a *g, struct nvgpu_gr *gr)
static int gr_init_support_impl(struct gk20a *g)
{
struct nvgpu_gr *gr = &g->gr[g->mig.cur_gr_instance];
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
int err = 0;
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "Init support for GR%u", g->mig.cur_gr_instance);
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "Init support for GR%u", gr->instance_id);
gr->initialized = false;
@@ -825,10 +827,10 @@ static int gr_init_support_impl(struct gk20a *g)
static void gr_init_support_finalize(struct gk20a *g)
{
struct nvgpu_gr *gr = &g->gr[g->mig.cur_gr_instance];
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gr, "Finalize support for GR%u",
g->mig.cur_gr_instance);
gr->instance_id);
gr->initialized = true;
nvgpu_cond_signal(&gr->init_wq);
@@ -892,6 +894,7 @@ int nvgpu_gr_alloc(struct gk20a *g)
for (i = 0U; i < g->num_gr_instances; i++) {
gr = &g->gr[i];
gr->instance_id = i;
gr->syspipe_id = nvgpu_grmgr_get_gr_syspipe_id(g, i);
if (gr->syspipe_id == U32_MAX) {

View File

@@ -24,6 +24,7 @@
#include <nvgpu/io.h>
#include <nvgpu/static_analysis.h>
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/gr_instances.h>
#include <nvgpu/grmgr.h>
#include "gr_config_priv.h"
@@ -287,7 +288,7 @@ alloc_err:
static int gr_config_init_mig_gpcs(struct nvgpu_gr_config *config)
{
struct gk20a *g = config->g;
u32 cur_gr_instance = g->mig.cur_gr_instance;
u32 cur_gr_instance = nvgpu_gr_get_cur_instance_id(g);
u32 gpc_phys_id;
u32 gpc_id;

View File

@@ -53,6 +53,11 @@ struct nvgpu_gr {
*/
struct gk20a *g;
/**
* Instance ID of GR engine.
*/
u32 instance_id;
/**
* Condition variable for GR initialization.
* Waiters shall wait on this condition to ensure GR engine

View File

@@ -23,6 +23,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/types.h>
#include <nvgpu/gr/gr_utils.h>
#include <nvgpu/gr/gr_instances.h>
#include <nvgpu/gr/config.h>
@@ -35,23 +36,27 @@ u32 nvgpu_gr_checksum_u32(u32 a, u32 b)
struct nvgpu_gr_falcon *nvgpu_gr_get_falcon_ptr(struct gk20a *g)
{
return g->gr[g->mig.cur_gr_instance].falcon;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
return gr->falcon;
}
struct nvgpu_gr_config *nvgpu_gr_get_config_ptr(struct gk20a *g)
{
return g->gr[g->mig.cur_gr_instance].config;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
return gr->config;
}
struct nvgpu_gr_intr *nvgpu_gr_get_intr_ptr(struct gk20a *g)
{
return g->gr[g->mig.cur_gr_instance].intr;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
return gr->intr;
}
#ifdef CONFIG_NVGPU_NON_FUSA
u32 nvgpu_gr_get_override_ecc_val(struct gk20a *g)
{
return g->gr[g->mig.cur_gr_instance].fecs_feature_override_ecc_val;
struct nvgpu_gr *gr = nvgpu_gr_get_cur_instance_ptr(g);
return gr->fecs_feature_override_ecc_val;
}
void nvgpu_gr_override_ecc_val(struct nvgpu_gr *gr, u32 ecc_val)

View File

@@ -26,6 +26,30 @@
#include <nvgpu/types.h>
#include <nvgpu/grmgr.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/lock.h>
#ifdef CONFIG_NVGPU_MIG
#define nvgpu_gr_get_cur_instance_id(g) \
({ \
u32 current_gr_instance_id = 0U; \
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { \
if (nvgpu_mutex_tryacquire(&g->mig.gr_syspipe_lock) == 0) { \
current_gr_instance_id = g->mig.cur_gr_instance; \
} else { \
nvgpu_mutex_release(&g->mig.gr_syspipe_lock); \
} \
} \
current_gr_instance_id; \
})
#else
#define nvgpu_gr_get_cur_instance_id(g) (0U)
#endif
#define nvgpu_gr_get_cur_instance_ptr(g) \
({ \
u32 current_gr_instance_id = nvgpu_gr_get_cur_instance_id(g); \
&g->gr[current_gr_instance_id]; \
})
#ifdef CONFIG_NVGPU_MIG
#define nvgpu_gr_exec_for_each_instance(g, func) \