gpu: nvgpu: Reorg pmu HAL initialization

Reorganize HAL initialization to remove inheritance and construct
the gpu_ops struct at compile time. This patch only covers the
pmu sub-module of the gpu_ops struct.

Perform HAL function assignments in hal_gxxxx.c through the
population of a chip-specific copy of gpu_ops.

Jira NVGPU-74

Change-Id: I8839ac99e87153637005e23b3013237f57275c54
Signed-off-by: Sunny He <suhe@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1530982
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Sunny He
2017-08-01 17:10:42 -07:00
committed by mobile promotions
parent b50b379c19
commit 5f010177de
15 changed files with 258 additions and 266 deletions

View File

@@ -459,7 +459,7 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
pwr_pmu_msgq_tail_val_f(*tail));
}
static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
int gk20a_init_pmu_setup_hw1(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
int err = 0;
@@ -493,7 +493,7 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
}
static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
{
gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
}
@@ -521,7 +521,7 @@ int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
return 0;
}
static bool gk20a_is_pmu_supported(struct gk20a *g)
bool gk20a_is_pmu_supported(struct gk20a *g)
{
return true;
}
@@ -539,45 +539,6 @@ u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
return 0;
}
void gk20a_init_pmu_ops(struct gpu_ops *gops)
{
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
gops->pmu.pmu_setup_elpg = NULL;
gops->pmu.init_wpr_region = NULL;
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.write_dmatrfbase = gk20a_write_dmatrfbase;
gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
gops->pmu.pmu_pg_init_param = NULL;
gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
gops->pmu.pmu_is_lpwr_feature_supported = NULL;
gops->pmu.pmu_lpwr_enable_pg = NULL;
gops->pmu.pmu_lpwr_disable_pg = NULL;
gops->pmu.pmu_pg_param_post_init = NULL;
gops->pmu.dump_secure_fuses = NULL;
gops->pmu.is_lazy_bootstrap = NULL;
gops->pmu.is_priv_load = NULL;
gops->pmu.get_wpr = NULL;
gops->pmu.alloc_blob_space = NULL;
gops->pmu.pmu_populate_loader_cfg = NULL;
gops->pmu.flcn_populate_bl_dmem_desc = NULL;
gops->pmu.reset_engine = gk20a_pmu_engine_reset;
gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
}
static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{

View File

@@ -54,7 +54,9 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set);
u32 gk20a_pmu_read_idle_counter(struct gk20a *g, u32 counter_id);
void gk20a_pmu_reset_idle_counter(struct gk20a *g, u32 counter_id);
void gk20a_init_pmu_ops(struct gpu_ops *gops);
int gk20a_init_pmu_setup_hw1(struct gk20a *g);
void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr);
bool gk20a_is_pmu_supported(struct gk20a *g);
void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
u32 dst, u8 *src, u32 size, u8 port);

View File

@@ -51,11 +51,6 @@ typedef int (*get_ucode_details)(struct gk20a *g, struct flcn_ucode_img *udata);
static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img);
static int gm20b_bootstrap_hs_flcn(struct gk20a *g);
static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout);
static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz);
static int lsfm_discover_ucode_images(struct gk20a *g,
struct ls_flcn_mgr *plsfm);
static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
@@ -68,15 +63,6 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm);
static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
struct nvgpu_mem *nonwpr);
static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm);
static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size);
static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
static int gm20b_alloc_blob_space(struct gk20a *g,
size_t size, struct nvgpu_mem *mem);
static bool gm20b_is_priv_load(u32 falcon_id);
static bool gm20b_is_lazy_bootstrap(u32 falcon_id);
static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
/*Globals*/
static get_ucode_details pmu_acr_supp_ucode_list[] = {
@@ -97,7 +83,7 @@ static void start_gm20b_pmu(struct gk20a *g)
pwr_falcon_cpuctl_startcpu_f(1));
}
static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
{
struct mc_carveout_info mem_inf;
@@ -108,29 +94,11 @@ static void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
inf->size = mem_inf.size;
}
static bool gm20b_is_pmu_supported(struct gk20a *g)
bool gm20b_is_pmu_supported(struct gk20a *g)
{
return true;
}
void gm20b_init_secure_pmu(struct gpu_ops *gops)
{
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
gops->pmu.prepare_ucode = prepare_ucode_blob;
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
gops->pmu.is_priv_load = gm20b_is_priv_load;
gops->pmu.get_wpr = gm20b_wpr_info;
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
gops->pmu.pmu_populate_loader_cfg = gm20b_pmu_populate_loader_cfg;
gops->pmu.flcn_populate_bl_dmem_desc = gm20b_flcn_populate_bl_dmem_desc;
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
gops->pmu.falcon_clear_halt_interrupt_status =
clear_halt_interrupt_status;
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
}
/* TODO - check if any free blob res needed*/
static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
{
struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig;
@@ -334,7 +302,7 @@ rel_sig:
return err;
}
static bool gm20b_is_lazy_bootstrap(u32 falcon_id)
bool gm20b_is_lazy_bootstrap(u32 falcon_id)
{
bool enable_status = false;
@@ -352,7 +320,7 @@ static bool gm20b_is_lazy_bootstrap(u32 falcon_id)
return enable_status;
}
static bool gm20b_is_priv_load(u32 falcon_id)
bool gm20b_is_priv_load(u32 falcon_id)
{
bool enable_status = false;
@@ -370,7 +338,7 @@ static bool gm20b_is_priv_load(u32 falcon_id)
return enable_status;
}
static int gm20b_alloc_blob_space(struct gk20a *g,
int gm20b_alloc_blob_space(struct gk20a *g,
size_t size, struct nvgpu_mem *mem)
{
int err;
@@ -554,7 +522,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
}
static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size)
{
struct wpr_carveout_info wpr_inf;
@@ -626,7 +594,7 @@ static int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
return 0;
}
static int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid)
{
struct wpr_carveout_info wpr_inf;
@@ -1066,7 +1034,7 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
* start and end are addresses of ucode blob in non-WPR region*/
static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
int gm20b_bootstrap_hs_flcn(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
@@ -1291,7 +1259,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
return err;
}
static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
int gm20b_init_pmu_setup_hw1(struct gk20a *g,
void *desc, u32 bl_sz)
{
@@ -1461,7 +1429,7 @@ err_done:
* @param[in] timeout_ms Timeout in msec for PMU to halt
* @return '0' if PMU halts
*/
static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 data = 0;
@@ -1490,7 +1458,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
* @param[in] timeout_ms Timeout in msec for halt to clear
* @return '0' if PMU halt irq status is clear
*/
static int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
{
struct nvgpu_pmu *pmu = &g->pmu;
int status = 0;

View File

@@ -24,8 +24,21 @@
#define GM20B_FECS_UCODE_SIG "fecs_sig.bin"
#define T18x_GPCCS_UCODE_SIG "gpccs_sig.bin"
void gm20b_init_secure_pmu(struct gpu_ops *gops);
bool gm20b_is_pmu_supported(struct gk20a *g);
int prepare_ucode_blob(struct gk20a *g);
int gm20b_bootstrap_hs_flcn(struct gk20a *g);
bool gm20b_is_lazy_bootstrap(u32 falcon_id);
bool gm20b_is_priv_load(u32 falcon_id);
void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
int gm20b_alloc_blob_space(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size);
int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms);
int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
int gm20b_init_pmu_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz);
int gm20b_pmu_setup_sw(struct gk20a *g);
int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt);
int gm20b_init_nspmu_setup_hw1(struct gk20a *g);

View File

@@ -26,6 +26,7 @@
#include "gk20a/flcn_gk20a.h"
#include "gk20a/priv_ring_gk20a.h"
#include "gk20a/regops_gk20a.h"
#include "gk20a/pmu_gk20a.h"
#include "ltc_gm20b.h"
#include "gr_gm20b.h"
@@ -42,6 +43,7 @@
#include "therm_gm20b.h"
#include "bus_gm20b.h"
#include "hal_gm20b.h"
#include "acr_gm20b.h"
#include <nvgpu/debug.h>
#include <nvgpu/bug.h>
@@ -53,6 +55,8 @@
#include <nvgpu/hw/gm20b/hw_fifo_gm20b.h>
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
#define PRIV_SECURITY_DISABLE 0x01
@@ -313,6 +317,31 @@ static const struct gpu_ops gm20b_ops = {
.init_therm_setup_hw = gm20b_init_therm_setup_hw,
.elcg_init_idle_filters = gk20a_elcg_init_idle_filters,
},
.pmu = {
.pmu_setup_elpg = gm20b_pmu_setup_elpg,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release,
.write_dmatrfbase = gm20b_write_dmatrfbase,
.pmu_elpg_statistics = gk20a_pmu_elpg_statistics,
.pmu_pg_init_param = NULL,
.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
.pmu_is_lpwr_feature_supported = NULL,
.pmu_lpwr_enable_pg = NULL,
.pmu_lpwr_disable_pg = NULL,
.pmu_pg_param_post_init = NULL,
.dump_secure_fuses = pmu_dump_security_fuses_gm20b,
.reset_engine = gk20a_pmu_engine_reset,
.is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
},
.clk = {
.init_clk_support = gm20b_init_clk_support,
.suspend_clk_support = gm20b_suspend_clk_support,
@@ -422,6 +451,7 @@ int gm20b_init_hal(struct gk20a *g)
gops->gr_ctx = gm20b_ops.gr_ctx;
gops->mm = gm20b_ops.mm;
gops->therm = gm20b_ops.therm;
gops->pmu = gm20b_ops.pmu;
/*
* clk must be assigned member by member
* since some clk ops are assigned during probe prior to HAL init
@@ -483,9 +513,44 @@ int gm20b_init_hal(struct gk20a *g)
}
}
#endif
/* priv security dependent ops */
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Add in ops from gm20b acr */
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
gops->pmu.prepare_ucode = prepare_ucode_blob;
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
gops->pmu.is_priv_load = gm20b_is_priv_load;
gops->pmu.get_wpr = gm20b_wpr_info;
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
gops->pmu.pmu_populate_loader_cfg =
gm20b_pmu_populate_loader_cfg;
gops->pmu.flcn_populate_bl_dmem_desc =
gm20b_flcn_populate_bl_dmem_desc;
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
gops->pmu.falcon_clear_halt_interrupt_status =
clear_halt_interrupt_status;
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
} else {
/* Inherit from gk20a */
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
}
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = 0;
g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
gm20b_init_gr(g);
gm20b_init_pmu_ops(g);
gm20b_init_uncompressed_kind_map();
gm20b_init_kind_attr();

View File

@@ -102,7 +102,7 @@ static struct pg_init_sequence_list _pginitseq_gm20b[] = {
{ 0x0010e040, 0x00000000},
};
static int gm20b_pmu_setup_elpg(struct gk20a *g)
int gm20b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;
u32 reg_writes;
@@ -226,7 +226,7 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
return;
}
static int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{
u32 err = 0;
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
@@ -261,7 +261,7 @@ void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr)
}
/*Dump Security related fuses*/
static void pmu_dump_security_fuses_gm20b(struct gk20a *g)
void pmu_dump_security_fuses_gm20b(struct gk20a *g)
{
u32 val;
@@ -272,45 +272,3 @@ static void pmu_dump_security_fuses_gm20b(struct gk20a *g)
nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &val);
nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
}
void gm20b_init_pmu_ops(struct gk20a *g)
{
struct gpu_ops *gops = &g->ops;
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
gm20b_init_secure_pmu(gops);
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
} else {
gk20a_init_pmu_ops(gops);
gops->pmu.pmu_setup_hw_and_bootstrap =
gm20b_init_nspmu_setup_hw1;
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
}
gops->pmu.pmu_setup_elpg = gm20b_pmu_setup_elpg;
gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
g->pmu_lsf_pmu_wpr_init_done = 0;
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
gops->pmu.write_dmatrfbase = gm20b_write_dmatrfbase;
gops->pmu.pmu_elpg_statistics = gk20a_pmu_elpg_statistics;
gops->pmu.pmu_pg_init_param = NULL;
gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
gops->pmu.pmu_is_lpwr_feature_supported = NULL;
gops->pmu.pmu_lpwr_enable_pg = NULL;
gops->pmu.pmu_lpwr_disable_pg = NULL;
gops->pmu.pmu_pg_param_post_init = NULL;
gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
gops->pmu.reset_engine = gk20a_pmu_engine_reset;
gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
}

View File

@@ -18,7 +18,9 @@
struct gk20a;
void gm20b_init_pmu_ops(struct gk20a *g);
int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
int gm20b_pmu_setup_elpg(struct gk20a *g);
void pmu_dump_security_fuses_gm20b(struct gk20a *g);
void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags);
int gm20b_pmu_init_acr(struct gk20a *g);
void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr);

View File

@@ -56,9 +56,6 @@ typedef int (*get_ucode_details)(struct gk20a *g,
/*Externs*/
/*Forwards*/
static int gp106_bootstrap_hs_flcn(struct gk20a *g);
static int gp106_prepare_ucode_blob(struct gk20a *g);
/*Globals*/
static get_ucode_details pmu_acr_supp_ucode_list[] = {
@@ -67,7 +64,7 @@ static get_ucode_details pmu_acr_supp_ucode_list[] = {
gpccs_ucode_details,
};
static void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
{
inf->nonwpr_base = g->mm.vidmem.bootstrap_base;
inf->wpr_base = inf->nonwpr_base + GP106_DGPU_WPR_OFFSET;
@@ -80,7 +77,7 @@ static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
dma_addr->hi |= u64_hi32(value);
}
static int gp106_alloc_blob_space(struct gk20a *g,
int gp106_alloc_blob_space(struct gk20a *g,
size_t size, struct nvgpu_mem *mem)
{
struct wpr_carveout_info wpr_inf;
@@ -105,20 +102,6 @@ static int gp106_alloc_blob_space(struct gk20a *g,
NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem,
wpr_inf.nonwpr_base);
}
void gp106_init_secure_pmu(struct gpu_ops *gops)
{
gops->pmu.prepare_ucode = gp106_prepare_ucode_blob;
gops->pmu.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn;
gops->pmu.get_wpr = gp106_wpr_info;
gops->pmu.alloc_blob_space = gp106_alloc_blob_space;
gops->pmu.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg;
gops->pmu.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc;
gops->pmu.falcon_wait_for_halt = sec2_wait_for_halt;
gops->pmu.falcon_clear_halt_interrupt_status =
sec2_clear_halt_interrupt_status;
gops->pmu.init_falcon_setup_hw = init_sec2_setup_hw1;
}
/* TODO - check if any free blob res needed*/
int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
@@ -373,7 +356,7 @@ rel_sig:
return err;
}
static int gp106_prepare_ucode_blob(struct gk20a *g)
int gp106_prepare_ucode_blob(struct gk20a *g)
{
int err;
@@ -1040,7 +1023,7 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
* start and end are addresses of ucode blob in non-WPR region*/
static int gp106_bootstrap_hs_flcn(struct gk20a *g)
int gp106_bootstrap_hs_flcn(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;

View File

@@ -19,7 +19,14 @@
#define GP104_FECS_UCODE_SIG "gp104/fecs_sig.bin"
#define GP104_GPCCS_UCODE_SIG "gp104/gpccs_sig.bin"
void gp106_init_secure_pmu(struct gpu_ops *gops);
int gp106_bootstrap_hs_flcn(struct gk20a *g);
int gp106_prepare_ucode_blob(struct gk20a *g);
int gp106_alloc_blob_space(struct gk20a *g,
size_t size, struct nvgpu_mem *mem);
void gp106_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
void lsfm_free_ucode_img_res(struct gk20a *g,
struct flcn_ucode_img_v1 *p_img);
void lsfm_free_nonpmu_ucode_img_res(struct gk20a *g,

View File

@@ -26,6 +26,7 @@
#include "gk20a/regops_gk20a.h"
#include "gk20a/mc_gk20a.h"
#include "gk20a/fb_gk20a.h"
#include "gk20a/pmu_gk20a.h"
#include "gp10b/ltc_gp10b.h"
#include "gp10b/gr_gp10b.h"
@@ -38,6 +39,7 @@
#include "gp10b/priv_ring_gp10b.h"
#include "gp10b/fifo_gp10b.h"
#include "gp10b/fb_gp10b.h"
#include "gp10b/pmu_gp10b.h"
#include "gp106/fifo_gp106.h"
#include "gp106/regops_gp106.h"
@@ -48,7 +50,10 @@
#include "gm20b/mm_gm20b.h"
#include "gm20b/pmu_gm20b.h"
#include "gm20b/fb_gm20b.h"
#include "gm20b/acr_gm20b.h"
#include "gp106/acr_gp106.h"
#include "gp106/sec2_gp106.h"
#include "gp106/clk_gp106.h"
#include "gp106/clk_arb_gp106.h"
#include "gp106/mclk_gp106.h"
@@ -77,6 +82,7 @@
#include <nvgpu/hw/gp106/hw_ram_gp106.h>
#include <nvgpu/hw/gp106/hw_top_gp106.h>
#include <nvgpu/hw/gp106/hw_pram_gp106.h>
#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
static int gp106_get_litter_value(struct gk20a *g, int value)
@@ -398,6 +404,45 @@ static const struct gpu_ops gp106_ops = {
.get_internal_sensor_limits = gp106_get_internal_sensor_limits,
.configure_therm_alert = gp106_configure_therm_alert,
},
.pmu = {
.init_wpr_region = gm20b_pmu_init_acr,
.load_lsfalcon_ucode = gp106_load_falcon_ucode,
.is_lazy_bootstrap = gp106_is_lazy_bootstrap,
.is_priv_load = gp106_is_priv_load,
.prepare_ucode = gp106_prepare_ucode_blob,
.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn,
.get_wpr = gp106_wpr_info,
.alloc_blob_space = gp106_alloc_blob_space,
.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
.falcon_wait_for_halt = sec2_wait_for_halt,
.falcon_clear_halt_interrupt_status =
sec2_clear_halt_interrupt_status,
.init_falcon_setup_hw = init_sec2_setup_hw1,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release,
.is_pmu_supported = gp106_is_pmu_supported,
.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list,
.pmu_elpg_statistics = gp106_pmu_elpg_statistics,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_is_lpwr_feature_supported =
gp106_pmu_is_lpwr_feature_supported,
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head,
.pmu_pg_param_post_init = nvgpu_lpwr_post_init,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
.pmu_pg_init_param = gp106_pg_param_init,
.reset_engine = gp106_pmu_engine_reset,
.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg,
.write_dmatrfbase = gp10b_write_dmatrfbase,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
.is_engine_in_reset = gp106_pmu_is_engine_in_reset,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg,
},
.clk = {
.init_clk_support = gp106_init_clk_support,
.get_crystal_clk_hz = gp106_crystal_clk_hz,
@@ -532,6 +577,7 @@ int gp106_init_hal(struct gk20a *g)
gops->mm = gp106_ops.mm;
gops->pramin = gp106_ops.pramin;
gops->therm = gp106_ops.therm;
gops->pmu = gp106_ops.pmu;
/*
* clk must be assigned member by member
* since some clk ops are assigned during probe prior to HAL init
@@ -568,10 +614,11 @@ int gp106_init_hal(struct gk20a *g)
__nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
__nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, true);
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = 0;
g->bootstrap_owner = LSF_FALCON_ID_SEC2;
gp106_init_gr(g);
gp106_init_pmu_ops(g);
gp10b_init_uncompressed_kind_map();
gp10b_init_kind_attr();

View File

@@ -32,7 +32,7 @@
#include <nvgpu/hw/gp106/hw_mc_gp106.h>
#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
static bool gp106_is_pmu_supported(struct gk20a *g)
bool gp106_is_pmu_supported(struct gk20a *g)
{
return true;
}
@@ -69,7 +69,7 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
return 0;
}
static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
{
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
return PMU_PG_FEATURE_GR_RPPG_ENABLED;
@@ -80,7 +80,7 @@ static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
return 0;
}
static u32 gp106_pmu_pg_engines_list(struct gk20a *g)
u32 gp106_pmu_pg_engines_list(struct gk20a *g)
{
return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
BIT(PMU_PG_ELPG_ENGINE_ID_MS);
@@ -100,7 +100,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
msg->msg.pg.msg_type);
}
static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
@@ -168,7 +168,7 @@ void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us;
}
static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
{
bool is_feature_supported = false;
@@ -188,7 +188,7 @@ static bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
return is_feature_supported;
}
static bool gp106_is_lazy_bootstrap(u32 falcon_id)
bool gp106_is_lazy_bootstrap(u32 falcon_id)
{
bool enable_status = false;
@@ -206,7 +206,7 @@ static bool gp106_is_lazy_bootstrap(u32 falcon_id)
return enable_status;
}
static bool gp106_is_priv_load(u32 falcon_id)
bool gp106_is_priv_load(u32 falcon_id)
{
bool enable_status = false;
@@ -258,7 +258,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
gk20a_dbg_fn("done");
}
static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
@@ -289,51 +289,3 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
return -ETIMEDOUT;
return 0;
}
void gp106_init_pmu_ops(struct gk20a *g)
{
struct gpu_ops *gops = &g->ops;
gk20a_dbg_fn("");
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
gp106_init_secure_pmu(gops);
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gp106_load_falcon_ucode;
gops->pmu.is_lazy_bootstrap = gp106_is_lazy_bootstrap;
gops->pmu.is_priv_load = gp106_is_priv_load;
} else {
gk20a_init_pmu_ops(gops);
gops->pmu.pmu_setup_hw_and_bootstrap =
gm20b_init_nspmu_setup_hw1;
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
}
gops->pmu.pmu_setup_elpg = NULL;
gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
g->pmu_lsf_pmu_wpr_init_done = 0;
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics;
gops->pmu.pmu_pg_init_param = gp106_pg_param_init;
gops->pmu.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list;
gops->pmu.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list;
gops->pmu.pmu_is_lpwr_feature_supported =
gp106_pmu_is_lpwr_feature_supported;
gops->pmu.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg;
gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg;
gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init;
gops->pmu.dump_secure_fuses = NULL;
gops->pmu.is_pmu_supported = gp106_is_pmu_supported;
gops->pmu.reset_engine = gp106_pmu_engine_reset;
gops->pmu.is_engine_in_reset = gp106_pmu_is_engine_in_reset;
gk20a_dbg_fn("done");
}

View File

@@ -19,7 +19,15 @@
struct gk20a;
void gp106_init_pmu_ops(struct gk20a *g);
bool gp106_is_pmu_supported(struct gk20a *g);
u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
u32 gp106_pmu_pg_engines_list(struct gk20a *g);
int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id);
bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id);
bool gp106_is_lazy_bootstrap(u32 falcon_id);
bool gp106_is_priv_load(u32 falcon_id);
int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data);
bool gp106_pmu_is_engine_in_reset(struct gk20a *g);

View File

@@ -26,6 +26,7 @@
#include "gk20a/regops_gk20a.h"
#include "gk20a/mc_gk20a.h"
#include "gk20a/fb_gk20a.h"
#include "gk20a/pmu_gk20a.h"
#include "gp10b/gr_gp10b.h"
#include "gp10b/fecs_trace_gp10b.h"
@@ -46,6 +47,7 @@
#include "gm20b/ltc_gm20b.h"
#include "gm20b/gr_gm20b.h"
#include "gm20b/fifo_gm20b.h"
#include "gm20b/acr_gm20b.h"
#include "gm20b/pmu_gm20b.h"
#include "gm20b/clk_gm20b.h"
#include "gm20b/fb_gm20b.h"
@@ -65,6 +67,7 @@
#include <nvgpu/hw/gp10b/hw_ram_gp10b.h>
#include <nvgpu/hw/gp10b/hw_top_gp10b.h>
#include <nvgpu/hw/gp10b/hw_pram_gp10b.h>
#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
static int gp10b_get_litter_value(struct gk20a *g, int value)
{
@@ -353,6 +356,27 @@ static const struct gpu_ops gp10b_ops = {
.init_therm_setup_hw = gp10b_init_therm_setup_hw,
.elcg_init_idle_filters = gp10b_elcg_init_idle_filters,
},
.pmu = {
.pmu_setup_elpg = gp10b_pmu_setup_elpg,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
.pmu_queue_head = gk20a_pmu_queue_head,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release,
.write_dmatrfbase = gp10b_write_dmatrfbase,
.pmu_elpg_statistics = gp10b_pmu_elpg_statistics,
.pmu_pg_init_param = gp10b_pg_gr_init,
.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
.dump_secure_fuses = pmu_dump_security_fuses_gp10b,
.reset_engine = gk20a_pmu_engine_reset,
.is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
},
.regops = {
.get_global_whitelist_ranges =
gp10b_get_global_whitelist_ranges,
@@ -455,6 +479,7 @@ int gp10b_init_hal(struct gk20a *g)
gops->mm = gp10b_ops.mm;
gops->pramin = gp10b_ops.pramin;
gops->therm = gp10b_ops.therm;
gops->pmu = gp10b_ops.pmu;
gops->regops = gp10b_ops.regops;
gops->mc = gp10b_ops.mc;
gops->debug = gp10b_ops.debug;
@@ -513,9 +538,45 @@ int gp10b_init_hal(struct gk20a *g)
}
#endif
/* priv security dependent ops */
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Add in ops from gm20b acr */
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
gops->pmu.prepare_ucode = prepare_ucode_blob,
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
gops->pmu.is_priv_load = gm20b_is_priv_load,
gops->pmu.get_wpr = gm20b_wpr_info,
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
gops->pmu.pmu_populate_loader_cfg =
gm20b_pmu_populate_loader_cfg,
gops->pmu.flcn_populate_bl_dmem_desc =
gm20b_flcn_populate_bl_dmem_desc,
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
gops->pmu.falcon_clear_halt_interrupt_status =
clear_halt_interrupt_status,
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
gops->pmu.is_priv_load = gp10b_is_priv_load;
} else {
/* Inherit from gk20a */
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
}
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = 0;
g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
gp10b_init_gr(g);
gp10b_init_pmu_ops(g);
gp10b_init_uncompressed_kind_map();
gp10b_init_kind_attr();

View File

@@ -252,7 +252,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
return 0;
}
static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data)
{
struct nvgpu_pmu *pmu = &g->pmu;
@@ -269,7 +269,7 @@ static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus;
}
static int gp10b_pmu_setup_elpg(struct gk20a *g)
int gp10b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;
u32 reg_writes;
@@ -299,7 +299,7 @@ void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr)
0x0);
}
static int gp10b_init_pmu_setup_hw1(struct gk20a *g)
int gp10b_init_pmu_setup_hw1(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
int err;
@@ -337,7 +337,7 @@ static int gp10b_init_pmu_setup_hw1(struct gk20a *g)
}
static bool gp10b_is_lazy_bootstrap(u32 falcon_id)
bool gp10b_is_lazy_bootstrap(u32 falcon_id)
{
bool enable_status = false;
@@ -355,7 +355,7 @@ static bool gp10b_is_lazy_bootstrap(u32 falcon_id)
return enable_status;
}
static bool gp10b_is_priv_load(u32 falcon_id)
bool gp10b_is_priv_load(u32 falcon_id)
{
bool enable_status = false;
@@ -374,7 +374,7 @@ static bool gp10b_is_priv_load(u32 falcon_id)
}
/*Dump Security related fuses*/
static void pmu_dump_security_fuses_gp10b(struct gk20a *g)
void pmu_dump_security_fuses_gp10b(struct gk20a *g)
{
u32 val;
@@ -386,50 +386,7 @@ static void pmu_dump_security_fuses_gp10b(struct gk20a *g)
nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
}
static bool gp10b_is_pmu_supported(struct gk20a *g)
bool gp10b_is_pmu_supported(struct gk20a *g)
{
return true;
}
void gp10b_init_pmu_ops(struct gk20a *g)
{
struct gpu_ops *gops = &g->ops;
gops->pmu.is_pmu_supported = gp10b_is_pmu_supported;
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
gm20b_init_secure_pmu(gops);
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
gops->pmu.is_lazy_bootstrap = gp10b_is_lazy_bootstrap;
gops->pmu.is_priv_load = gp10b_is_priv_load;
} else {
gk20a_init_pmu_ops(gops);
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
}
gops->pmu.pmu_setup_elpg = gp10b_pmu_setup_elpg;
gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
g->pmu_lsf_pmu_wpr_init_done = false;
__nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics;
gops->pmu.pmu_pg_init_param = gp10b_pg_gr_init;
gops->pmu.pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list;
gops->pmu.pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list;
gops->pmu.pmu_is_lpwr_feature_supported = NULL;
gops->pmu.pmu_lpwr_enable_pg = NULL;
gops->pmu.pmu_lpwr_disable_pg = NULL;
gops->pmu.pmu_pg_param_post_init = NULL;
gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b;
gops->pmu.reset_engine = gk20a_pmu_engine_reset;
gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
}

View File

@@ -18,7 +18,15 @@
struct gk20a;
void gp10b_init_pmu_ops(struct gk20a *g);
bool gp10b_is_lazy_bootstrap(u32 falcon_id);
bool gp10b_is_priv_load(u32 falcon_id);
bool gp10b_is_pmu_supported(struct gk20a *g);
int gp10b_init_pmu_setup_hw1(struct gk20a *g);
void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data);
int gp10b_pmu_setup_elpg(struct gk20a *g);
void pmu_dump_security_fuses_gp10b(struct gk20a *g);
int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr);