mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: Move non-fp pmu members from gpu_ops
Move non-function pointer members out of the pmu and pmu_ver
substructs of gpu_ops. Ideally gpu_ops will have only function
ponters, better matching its intended purpose and improving
readability.
- g.ops.pmu_ver.cmd_id_zbc_table_update has been changed to
g.pmu_ver_cmd_id_zbc_table_update
- g.ops.pmu.lspmuwprinitdone has been changed to
g.pmu_lsf_pmu_wpr_init_done
- g.ops.pmu.lsfloadedfalconid has been changed to
g.pmu_lsf_loaded_falcon_id
Boolean flags have been implemented using the enabled.h API
- g.ops.pmu_ver.is_pmu_zbc_save_supported moved to
common flag NVGPU_PMU_ZBC_SAVE
- g.ops.pmu.fecsbootstrapdone moved to
common flag NVGPU_PMU_FECS_BOOTSTRAP_DONE
Jira NVGPU-74
Change-Id: I08fb20f8f382277f2c579f06d561914c000ea6e0
Signed-off-by: Sunny He <suhe@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1530981
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
192f1039e1
commit
b50b379c19
@@ -15,6 +15,7 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
@@ -356,7 +357,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
|
||||
pmu->initialized = true;
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_STARTED, true);
|
||||
|
||||
if (g->ops.pmu_ver.is_pmu_zbc_save_supported) {
|
||||
if (nvgpu_is_enabled(g, NVGPU_PMU_ZBC_SAVE)) {
|
||||
/* Save zbc table after PMU is initialized. */
|
||||
pmu->zbc_ready = true;
|
||||
gk20a_pmu_save_zbc(g, 0xf);
|
||||
@@ -507,8 +508,8 @@ int nvgpu_pmu_destroy(struct gk20a *g)
|
||||
pmu->pmu_ready = false;
|
||||
pmu->perfmon_ready = false;
|
||||
pmu->zbc_ready = false;
|
||||
g->ops.pmu.lspmuwprinitdone = false;
|
||||
g->ops.pmu.fecsbootstrapdone = false;
|
||||
g->pmu_lsf_pmu_wpr_init_done = false;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
@@ -1463,8 +1464,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
g->ops.pmu_ver.set_perfmon_cntr_group_id =
|
||||
set_perfmon_cntr_group_id_v2;
|
||||
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 16;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = true;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 16;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size =
|
||||
pmu_cmdline_size_v4;
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
|
||||
@@ -1565,8 +1566,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
g->ops.pmu_ver.set_perfmon_cntr_group_id =
|
||||
set_perfmon_cntr_group_id_v2;
|
||||
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 16;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = false;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 16;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, false);
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size =
|
||||
pmu_cmdline_size_v6;
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
|
||||
@@ -1673,8 +1674,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
g->ops.pmu_ver.set_perfmon_cntr_group_id =
|
||||
set_perfmon_cntr_group_id_v2;
|
||||
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 16;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = true;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 16;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size =
|
||||
pmu_cmdline_size_v5;
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
|
||||
@@ -1792,8 +1793,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
g->ops.pmu_ver.set_perfmon_cntr_group_id =
|
||||
set_perfmon_cntr_group_id_v2;
|
||||
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 16;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = true;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 16;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size =
|
||||
pmu_cmdline_size_v3;
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
|
||||
@@ -1895,8 +1896,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
g->ops.pmu_ver.set_perfmon_cntr_group_id =
|
||||
set_perfmon_cntr_group_id_v2;
|
||||
g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 16;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = true;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 16;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size =
|
||||
pmu_cmdline_size_v2;
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq =
|
||||
@@ -1991,8 +1992,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
pg_cmd_eng_buf_load_set_dma_offset_v0;
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx =
|
||||
pg_cmd_eng_buf_load_set_dma_idx_v0;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 16;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = true;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 16;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
|
||||
g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0;
|
||||
g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0;
|
||||
g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0;
|
||||
@@ -2093,8 +2094,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
pg_cmd_eng_buf_load_set_dma_offset_v0;
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx =
|
||||
pg_cmd_eng_buf_load_set_dma_idx_v0;
|
||||
g->ops.pmu_ver.cmd_id_zbc_table_update = 14;
|
||||
g->ops.pmu_ver.is_pmu_zbc_save_supported = true;
|
||||
g->pmu_ver_cmd_id_zbc_table_update = 14;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_ZBC_SAVE, true);
|
||||
g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0;
|
||||
g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0;
|
||||
g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0;
|
||||
|
||||
@@ -656,9 +656,6 @@ struct gpu_ops {
|
||||
u8 value);
|
||||
void (*pg_cmd_eng_buf_load_set_dma_idx)(struct pmu_pg_cmd *pg,
|
||||
u8 value);
|
||||
/*used for change of enum zbc update cmd id from ver 0 to ver1*/
|
||||
u32 cmd_id_zbc_table_update;
|
||||
bool is_pmu_zbc_save_supported;
|
||||
} pmu_ver;
|
||||
struct {
|
||||
int (*get_netlist_name)(struct gk20a *g, int index, char *name);
|
||||
@@ -822,9 +819,6 @@ struct gpu_ops {
|
||||
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
|
||||
void (*handle_ext_irq)(struct gk20a *g, u32 intr);
|
||||
void (*set_irqmask)(struct gk20a *g);
|
||||
u32 lspmuwprinitdone;
|
||||
u32 lsfloadedfalconid;
|
||||
bool fecsbootstrapdone;
|
||||
} pmu;
|
||||
struct {
|
||||
int (*init_debugfs)(struct gk20a *g);
|
||||
@@ -1197,6 +1191,10 @@ struct gk20a {
|
||||
|
||||
struct gpu_ops ops;
|
||||
u32 mc_intr_mask_restore[4];
|
||||
/*used for change of enum zbc update cmd id from ver 0 to ver1*/
|
||||
u32 pmu_ver_cmd_id_zbc_table_update;
|
||||
u32 pmu_lsf_pmu_wpr_init_done;
|
||||
u32 pmu_lsf_loaded_falcon_id;
|
||||
|
||||
int irqs_enabled;
|
||||
int irq_stall; /* can be same as irq_nonstall in case of PCI */
|
||||
|
||||
@@ -598,7 +598,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd);
|
||||
cmd.cmd.zbc.cmd_type = g->ops.pmu_ver.cmd_id_zbc_table_update;
|
||||
cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update;
|
||||
cmd.cmd.zbc.entry_mask = ZBC_MASK(entries);
|
||||
|
||||
pmu->zbc_save_done = 0;
|
||||
|
||||
@@ -755,8 +755,8 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
|
||||
}
|
||||
|
||||
flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
|
||||
g->ops.pmu.lsfloadedfalconid = 0;
|
||||
if (g->ops.pmu.fecsbootstrapdone) {
|
||||
g->pmu_lsf_loaded_falcon_id = 0;
|
||||
if (nvgpu_is_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE)) {
|
||||
/* this must be recovery so bootstrap fecs and gpccs */
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
gr_gm20b_load_gpccs_with_bootloader(g);
|
||||
@@ -776,7 +776,7 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
|
||||
|
||||
} else {
|
||||
/* cold boot or rg exit */
|
||||
g->ops.pmu.fecsbootstrapdone = true;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, true);
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
gr_gm20b_load_gpccs_with_bootloader(g);
|
||||
} else {
|
||||
|
||||
@@ -132,7 +132,7 @@ static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
|
||||
|
||||
if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS)
|
||||
g->ops.pmu.lspmuwprinitdone = 1;
|
||||
g->pmu_lsf_pmu_wpr_init_done = 1;
|
||||
gk20a_dbg_fn("done");
|
||||
}
|
||||
|
||||
@@ -171,7 +171,7 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
gm20b_dbg_pmu("reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
|
||||
|
||||
gm20b_dbg_pmu("response code = %x\n", msg->msg.acr.acrmsg.falconid);
|
||||
g->ops.pmu.lsfloadedfalconid = msg->msg.acr.acrmsg.falconid;
|
||||
g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
|
||||
gk20a_dbg_fn("done");
|
||||
}
|
||||
|
||||
@@ -205,8 +205,8 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gm20b_dbg_pmu("wprinit status = %x\n", g->ops.pmu.lspmuwprinitdone);
|
||||
if (g->ops.pmu.lspmuwprinitdone) {
|
||||
gm20b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
|
||||
if (g->pmu_lsf_pmu_wpr_init_done) {
|
||||
/* send message to load FECS falcon */
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_ACR;
|
||||
@@ -236,12 +236,12 @@ static int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
if (!(falconidmask == (1 << LSF_FALCON_ID_FECS)))
|
||||
return -EINVAL;
|
||||
/* check whether pmu is ready to bootstrap lsf if not wait for it */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
&g->ops.pmu.lspmuwprinitdone, 1);
|
||||
&g->pmu_lsf_pmu_wpr_init_done, 1);
|
||||
/* check again if it still not ready indicate an error */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
nvgpu_err(g, "PMU not ready to load LSF");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@@ -299,8 +299,8 @@ void gm20b_init_pmu_ops(struct gk20a *g)
|
||||
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
|
||||
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
|
||||
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
|
||||
gops->pmu.lspmuwprinitdone = 0;
|
||||
gops->pmu.fecsbootstrapdone = false;
|
||||
g->pmu_lsf_pmu_wpr_init_done = 0;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
|
||||
gops->pmu.write_dmatrfbase = gm20b_write_dmatrfbase;
|
||||
gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
|
||||
gops->pmu.pmu_pg_init_param = NULL;
|
||||
|
||||
@@ -233,8 +233,8 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gp106_dbg_pmu("wprinit status = %x\n", g->ops.pmu.lspmuwprinitdone);
|
||||
if (g->ops.pmu.lspmuwprinitdone) {
|
||||
gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
|
||||
if (g->pmu_lsf_pmu_wpr_init_done) {
|
||||
/* send message to load FECS falcon */
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_ACR;
|
||||
@@ -268,14 +268,14 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
|
||||
(1 << LSF_FALCON_ID_GPCCS)))
|
||||
return -EINVAL;
|
||||
g->ops.pmu.lsfloadedfalconid = 0;
|
||||
g->pmu_lsf_loaded_falcon_id = 0;
|
||||
/* check whether pmu is ready to bootstrap lsf if not wait for it */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
&g->ops.pmu.lspmuwprinitdone, 1);
|
||||
&g->pmu_lsf_pmu_wpr_init_done, 1);
|
||||
/* check again if it still not ready indicate an error */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
nvgpu_err(g, "PMU not ready to load LSF");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@@ -284,8 +284,8 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
gp106_pmu_load_multiple_falcons(g, falconidmask, flags);
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
&g->ops.pmu.lsfloadedfalconid, falconidmask);
|
||||
if (g->ops.pmu.lsfloadedfalconid != falconidmask)
|
||||
&g->pmu_lsf_loaded_falcon_id, falconidmask);
|
||||
if (g->pmu_lsf_loaded_falcon_id != falconidmask)
|
||||
return -ETIMEDOUT;
|
||||
return 0;
|
||||
}
|
||||
@@ -318,8 +318,8 @@ void gp106_init_pmu_ops(struct gk20a *g)
|
||||
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
|
||||
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
|
||||
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
|
||||
gops->pmu.lspmuwprinitdone = 0;
|
||||
gops->pmu.fecsbootstrapdone = false;
|
||||
g->pmu_lsf_pmu_wpr_init_done = 0;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
|
||||
gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
|
||||
gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics;
|
||||
gops->pmu.pmu_pg_init_param = gp106_pg_param_init;
|
||||
|
||||
@@ -148,8 +148,8 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gp10b_dbg_pmu("wprinit status = %x\n", g->ops.pmu.lspmuwprinitdone);
|
||||
if (g->ops.pmu.lspmuwprinitdone) {
|
||||
gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
|
||||
if (g->pmu_lsf_pmu_wpr_init_done) {
|
||||
/* send message to load FECS falcon */
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_ACR;
|
||||
@@ -185,14 +185,14 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
|
||||
(1 << LSF_FALCON_ID_GPCCS)))
|
||||
return -EINVAL;
|
||||
g->ops.pmu.lsfloadedfalconid = 0;
|
||||
g->pmu_lsf_loaded_falcon_id = 0;
|
||||
/* check whether pmu is ready to bootstrap lsf if not wait for it */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
&g->ops.pmu.lspmuwprinitdone, 1);
|
||||
&g->pmu_lsf_pmu_wpr_init_done, 1);
|
||||
/* check again if it still not ready indicate an error */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
nvgpu_err(g, "PMU not ready to load LSF");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@@ -201,8 +201,8 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
gp10b_pmu_load_multiple_falcons(g, falconidmask, flags);
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
&g->ops.pmu.lsfloadedfalconid, falconidmask);
|
||||
if (g->ops.pmu.lsfloadedfalconid != falconidmask)
|
||||
&g->pmu_lsf_loaded_falcon_id, falconidmask);
|
||||
if (g->pmu_lsf_loaded_falcon_id != falconidmask)
|
||||
return -ETIMEDOUT;
|
||||
return 0;
|
||||
}
|
||||
@@ -418,8 +418,8 @@ void gp10b_init_pmu_ops(struct gk20a *g)
|
||||
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
|
||||
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
|
||||
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
|
||||
gops->pmu.lspmuwprinitdone = false;
|
||||
gops->pmu.fecsbootstrapdone = false;
|
||||
g->pmu_lsf_pmu_wpr_init_done = false;
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
|
||||
gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
|
||||
gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics;
|
||||
gops->pmu.pmu_pg_init_param = gp10b_pg_gr_init;
|
||||
|
||||
@@ -48,8 +48,10 @@ struct gk20a;
|
||||
* PMU flags.
|
||||
*/
|
||||
/* perfmon enabled or disabled for PMU */
|
||||
#define NVGPU_PMU_PERFMON 48
|
||||
#define NVGPU_PMU_PSTATE 49
|
||||
#define NVGPU_PMU_PERFMON 48
|
||||
#define NVGPU_PMU_PSTATE 49
|
||||
#define NVGPU_PMU_ZBC_SAVE 50
|
||||
#define NVGPU_PMU_FECS_BOOTSTRAP_DONE 51
|
||||
|
||||
/*
|
||||
* Must be greater than the largest bit offset in the above list.
|
||||
|
||||
Reference in New Issue
Block a user