gpu: nvgpu: remove golden_image_initialized flag from gr_gk20a struct

struct gr_gk20a defines boolean flag golden_image_initialized to
indicate if golden_image is initialized or not
common.gr.obj_ctx also added a flag of its own to check if golden_image
is ready

Add new API nvgpu_gr_obj_ctx_is_golden_image_ready() in
common.gr.obj_ctx unit to get status of golden_image

Use this new API everywhere to check if golden image is ready
Remove g->gr.ctx_vars.golden_image_initialized

Also remove ctx_mutex from struct gr_gk20a

Add new flag golden_image_initialized to struct nvgpu_pmu_pg and set it
when golden image is initialized. This is needed to avoid circular
dependency between GR and PMU

Jira NVGPU-3112

Change-Id: Id391294cede6424e15a9a9de29c40d013b509534
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2099400
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-04-23 19:18:31 +05:30
committed by mobile promotions
parent 8e96d56cee
commit 45c56fd633
12 changed files with 76 additions and 74 deletions

View File

@@ -280,7 +280,6 @@ static void gr_remove_support(struct gk20a *g)
nvgpu_gr_zbc_deinit(g, gr->zbc);
nvgpu_gr_zcull_deinit(g, gr->zcull);
nvgpu_gr_obj_ctx_deinit(g, gr->golden_image);
gr->ctx_vars.golden_image_initialized = false;
}
static int gr_init_access_map(struct gk20a *g, struct nvgpu_gr *gr)
@@ -368,6 +367,16 @@ clean_up:
return -ENOMEM;
}
static int nvgpu_gr_init_ctx_state(struct gk20a *g)
{
if (g->gr->golden_image != NULL &&
nvgpu_gr_obj_ctx_is_golden_image_ready(g->gr->golden_image)) {
return 0;
}
return nvgpu_gr_falcon_init_ctx_state(g);
}
static int gr_init_setup_sw(struct gk20a *g)
{
struct nvgpu_gr *gr = g->gr;
@@ -451,12 +460,6 @@ static int gr_init_setup_sw(struct gk20a *g)
goto clean_up;
}
err = nvgpu_mutex_init(&gr->ctx_mutex);
if (err != 0) {
nvgpu_err(g, "Error in gr.ctx_mutex initialization");
goto clean_up;
}
nvgpu_spinlock_init(&gr->ch_tlb_lock);
gr->remove_support = gr_remove_support;
@@ -603,7 +606,7 @@ int nvgpu_gr_reset(struct gk20a *g)
/* this appears query for sw states but fecs actually init
ramchain, etc so this is hw init */
err = nvgpu_gr_falcon_init_ctx_state(g);
err = nvgpu_gr_init_ctx_state(g);
if (err != 0) {
return err;
}
@@ -640,7 +643,7 @@ int nvgpu_gr_init_support(struct gk20a *g)
/* this appears query for sw states but fecs actually init
ramchain, etc so this is hw init */
err = nvgpu_gr_falcon_init_ctx_state(g);
err = nvgpu_gr_init_ctx_state(g);
if (err != 0) {
return err;
}

View File

@@ -158,12 +158,10 @@ int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g)
nvgpu_log_fn(g, " ");
if (!g->gr->ctx_vars.golden_image_initialized) {
/* fecs init ramchain */
err = g->ops.gr.falcon.init_ctx_state(g);
if (err != 0) {
goto out;
}
/* fecs init ramchain */
err = g->ops.gr.falcon.init_ctx_state(g);
if (err != 0) {
goto out;
}
out:

View File

@@ -44,7 +44,6 @@ struct gr_channel_map_tlb_entry {
struct nvgpu_gr {
struct gk20a *g;
struct {
bool golden_image_initialized;
u32 golden_image_size;
u32 pm_ctxsw_image_size;
@@ -54,8 +53,6 @@ struct nvgpu_gr {
u32 zcull_image_size;
} ctx_vars;
struct nvgpu_mutex ctx_mutex; /* protect golden ctx init */
struct nvgpu_cond init_wq;
bool initialized;

View File

@@ -24,6 +24,7 @@
#include <nvgpu/log.h>
#include <nvgpu/io.h>
#include <nvgpu/mm.h>
#include <nvgpu/pmu/pmu_pg.h>
#include <nvgpu/gr/ctx.h>
#include <nvgpu/gr/subctx.h>
#include <nvgpu/gr/global_ctx.h>
@@ -524,8 +525,8 @@ restore_fe_go_idle:
}
golden_image->ready = true;
g->gr->ctx_vars.golden_image_initialized = true;
nvgpu_pmu_set_golden_image_initialized(g, true);
g->ops.gr.falcon.set_current_ctx_invalid(g);
clean_up:
@@ -678,6 +679,18 @@ u32 *nvgpu_gr_obj_ctx_get_local_golden_image_ptr(
golden_image->local_golden_image);
}
bool nvgpu_gr_obj_ctx_is_golden_image_ready(
struct nvgpu_gr_obj_ctx_golden_image *golden_image)
{
bool ready;
nvgpu_mutex_acquire(&golden_image->ctx_mutex);
ready = golden_image->ready;
nvgpu_mutex_release(&golden_image->ctx_mutex);
return ready;
}
int nvgpu_gr_obj_ctx_init(struct gk20a *g,
struct nvgpu_gr_obj_ctx_golden_image **gr_golden_image, u32 size)
{
@@ -711,6 +724,7 @@ void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,
golden_image->local_golden_image = NULL;
}
nvgpu_pmu_set_golden_image_initialized(g, false);
golden_image->ready = false;
nvgpu_kfree(g, golden_image);
}

View File

@@ -33,8 +33,6 @@
#include <nvgpu/dma.h>
#include <nvgpu/pmu/fw.h>
#include "common/gr/gr_priv.h"
/* state transition :
* OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
* ON => OFF is always synchronized
@@ -251,7 +249,6 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u8 pg_engine_id)
int nvgpu_pmu_enable_elpg(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct nvgpu_gr *gr = g->gr;
u8 pg_engine_id;
u32 pg_engine_id_list = 0;
@@ -281,7 +278,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
/* do NOT enable elpg until golden ctx is created,
* which is related with the ctx that ELPG save and restore.
*/
if (unlikely(!gr->ctx_vars.golden_image_initialized)) {
if (unlikely(!pmu->pmu_pg.golden_image_initialized)) {
goto exit_unlock;
}
@@ -901,3 +898,9 @@ void nvgpu_pmu_pg_free_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm)
nvgpu_dma_unmap_free(vm, &pmu->pmu_pg.seq_buf);
}
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized)
{
struct nvgpu_pmu *pmu = &g->pmu;
pmu->pmu_pg.golden_image_initialized = initialized;
}

View File

@@ -31,6 +31,7 @@
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/regops.h>
#include <nvgpu/gr/obj_ctx.h>
static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
{
@@ -69,13 +70,7 @@ static inline bool linear_search(u32 offset, const u32 *list, u64 size)
*/
static bool gr_context_info_available(struct nvgpu_gr *gr)
{
bool initialized;
nvgpu_mutex_acquire(&gr->ctx_mutex);
initialized = gr->ctx_vars.golden_image_initialized;
nvgpu_mutex_release(&gr->ctx_mutex);
return initialized;
return nvgpu_gr_obj_ctx_is_golden_image_ready(gr->golden_image);
}
static bool validate_reg_ops(struct gk20a *g,

View File

@@ -752,7 +752,6 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
goto clean_up;
}
nvgpu_mutex_init(&gr->ctx_mutex);
nvgpu_spinlock_init(&gr->ch_tlb_lock);
gr->remove_support = vgpu_remove_gr_support;

View File

@@ -490,7 +490,8 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
return -EINVAL;
}
if (!g->gr->ctx_vars.golden_image_initialized) {
if (!nvgpu_gr_obj_ctx_is_golden_image_ready(gr->golden_image)) {
nvgpu_log_fn(g, "no context switch header info to work with");
return -ENODEV;
}
@@ -518,12 +519,6 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
num_registers = 1;
}
if (!g->gr->ctx_vars.golden_image_initialized) {
nvgpu_log_fn(g, "no context switch header info to work with");
err = -EINVAL;
goto cleanup;
}
for (i = 0; i < num_registers; i++) {
err = gr_gk20a_find_priv_offset_in_buffer(g,
priv_registers[i],
@@ -576,7 +571,8 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
return -EINVAL;
}
if (!g->gr->ctx_vars.golden_image_initialized) {
if (!nvgpu_gr_obj_ctx_is_golden_image_ready(gr->golden_image)) {
nvgpu_log_fn(g, "no context switch header info to work with");
return -ENODEV;
}
@@ -601,12 +597,6 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
num_registers = 1;
}
if (!g->gr->ctx_vars.golden_image_initialized) {
nvgpu_log_fn(g, "no context switch header info to work with");
err = -EINVAL;
goto cleanup;
}
for (i = 0; i < num_registers; i++) {
err = nvgpu_gr_hwmp_map_find_priv_offset(g, g->gr->hwpm_map,
priv_registers[i],

View File

@@ -666,32 +666,30 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g)
int ret;
nvgpu_log_fn(g, " ");
/* query ctxsw image sizes, if golden context is not created */
if (!g->gr->ctx_vars.golden_image_initialized) {
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_IMAGE_SIZE,
0, &g->gr->ctx_vars.golden_image_size);
if (ret != 0) {
nvgpu_err(g,
"query golden image size failed");
return ret;
}
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE,
0, &g->gr->ctx_vars.pm_ctxsw_image_size);
if (ret != 0) {
nvgpu_err(g,
"query pm ctx image size failed");
return ret;
}
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
0, &g->gr->ctx_vars.zcull_image_size);
if (ret != 0) {
nvgpu_err(g,
"query zcull ctx image size failed");
return ret;
}
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_IMAGE_SIZE,
0, &g->gr->ctx_vars.golden_image_size);
if (ret != 0) {
nvgpu_err(g,
"query golden image size failed");
return ret;
}
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE,
0, &g->gr->ctx_vars.pm_ctxsw_image_size);
if (ret != 0) {
nvgpu_err(g,
"query pm ctx image size failed");
return ret;
}
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
0, &g->gr->ctx_vars.zcull_image_size);
if (ret != 0) {
nvgpu_err(g,
"query zcull ctx image size failed");
return ret;
}
nvgpu_log_fn(g, "done");

View File

@@ -86,6 +86,9 @@ size_t nvgpu_gr_obj_ctx_get_golden_image_size(
u32 *nvgpu_gr_obj_ctx_get_local_golden_image_ptr(
struct nvgpu_gr_obj_ctx_golden_image *golden_image);
bool nvgpu_gr_obj_ctx_is_golden_image_ready(
struct nvgpu_gr_obj_ctx_golden_image *golden_image);
int nvgpu_gr_obj_ctx_init(struct gk20a *g,
struct nvgpu_gr_obj_ctx_golden_image **gr_golden_image, u32 size);
void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,

View File

@@ -58,6 +58,7 @@ struct nvgpu_pmu_pg {
bool initialized;
u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE];
struct nvgpu_mem seq_buf;
bool golden_image_initialized;
};
/*PG defines used by nvpgu-pmu*/
@@ -94,4 +95,6 @@ int nvgpu_pmu_ap_send_command(struct gk20a *g,
int nvgpu_pmu_pg_init_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm);
void nvgpu_pmu_pg_free_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm);
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized);
#endif /* NVGPU_PMU_PG_H */

View File

@@ -890,10 +890,9 @@ static ssize_t tpc_fs_mask_store(struct device *dev,
g->ops.gr.set_gpc_tpc_mask(g, 0);
nvgpu_gr_obj_ctx_deinit(g, g->gr->golden_image);
g->gr->ctx_vars.golden_image_initialized = false;
nvgpu_gr_obj_ctx_set_golden_image_size(g->gr->golden_image, 0);
nvgpu_gr_obj_ctx_deinit(g, g->gr->golden_image);
g->gr->golden_image = NULL;
nvgpu_gr_config_deinit(g, g->gr->config);
/* Cause next poweron to reinit just gr */