gpu: nvgpu: Moved PMU ucode read as part of PMU s/w early init

Currently, PMU f/w ucode read is part of ACR prepare ucode blob
which makes PMU to depend on ACR to init PMU f/w version related
ops & to include PMU related members to be part of ACR data struct
to free the allocated space for PMU ucodes.

Moved PMU f/w ucode read to PMU early init function & initializing
version ops once PMU ucode descriptor is available.

JIRA NVGPU-1146

Change-Id: I465814a4d7a997d06a77d8123a00f3423bf3da1e
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2006339
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-01-29 15:49:13 +05:30
committed by mobile promotions
parent e254d482c0
commit 27f50aebbd
7 changed files with 144 additions and 177 deletions

View File

@@ -87,68 +87,29 @@ bool gm20b_is_pmu_supported(struct gk20a *g)
static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
{
struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig;
struct nvgpu_pmu *pmu = &g->pmu;
struct lsf_ucode_desc *lsf_desc;
int err;
nvgpu_pmu_dbg(g, "requesting PMU ucode in GM20B\n");
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0);
if (pmu_fw == NULL) {
nvgpu_err(g, "failed to load pmu ucode!!");
return -ENOENT;
}
g->acr.pmu_fw = pmu_fw;
nvgpu_pmu_dbg(g, "Loaded PMU ucode in for blob preparation");
nvgpu_pmu_dbg(g, "requesting PMU ucode desc in GM20B\n");
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0);
if (pmu_desc == NULL) {
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG, 0);
if (pmu_sig == NULL) {
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
}
pmu->desc = (struct pmu_ucode_desc *)pmu_desc->data;
pmu->ucode_image = (u32 *)pmu_fw->data;
g->acr.pmu_desc = pmu_desc;
err = nvgpu_init_pmu_fw_ver_ops(pmu);
if (err != 0) {
nvgpu_pmu_dbg(g, "failed to set function pointers");
goto release_sig;
}
int err = 0;
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc));
if (lsf_desc == NULL) {
err = -ENOMEM;
goto release_sig;
goto exit;
}
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)pmu_sig->data,
min_t(size_t, sizeof(*lsf_desc), pmu_sig->size));
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)pmu->fw_sig->data,
min_t(size_t, sizeof(*lsf_desc), pmu->fw_sig->size));
lsf_desc->falcon_id = FALCON_ID_PMU;
p_img->desc = pmu->desc;
p_img->data = pmu->ucode_image;
p_img->data_size = pmu->desc->image_size;
p_img->desc = (struct pmu_ucode_desc *)(void *)pmu->fw_desc->data;
p_img->data = (u32 *)(void *)pmu->fw_image->data;
p_img->data_size = p_img->desc->image_size;
p_img->fw_ver = NULL;
p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc *)lsf_desc;
nvgpu_pmu_dbg(g, "requesting PMU ucode in GM20B exit\n");
nvgpu_release_firmware(g, pmu_sig);
return 0;
release_sig:
nvgpu_release_firmware(g, pmu_sig);
release_desc:
nvgpu_release_firmware(g, pmu_desc);
g->acr.pmu_desc = NULL;
release_img_fw:
nvgpu_release_firmware(g, pmu_fw);
g->acr.pmu_fw = NULL;
exit:
return err;
}
@@ -343,18 +304,13 @@ int gm20b_alloc_blob_space(struct gk20a *g,
int prepare_ucode_blob(struct gk20a *g)
{
int err;
int err = 0;
struct ls_flcn_mgr lsfm_l, *plsfm;
struct nvgpu_pmu *pmu = &g->pmu;
struct wpr_carveout_info wpr_inf;
if (g->acr.ucode_blob.cpu_va != NULL) {
/*Recovery case, we do not need to form
non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_ver_ops(pmu);
if (err != 0) {
nvgpu_pmu_dbg(g, "failed to set function pointers\n");
}
return err;
}
plsfm = &lsfm_l;

View File

@@ -112,74 +112,29 @@ int gp106_alloc_blob_space(struct gk20a *g,
int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
{
struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig;
struct nvgpu_pmu *pmu = &g->pmu;
struct lsf_ucode_desc_v1 *lsf_desc;
int err;
gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n");
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (pmu_fw == NULL) {
nvgpu_err(g, "failed to load pmu ucode!!");
return -ENOENT;
}
g->acr.pmu_fw = pmu_fw;
gp106_dbg_pmu(g, "Loaded PMU ucode in for blob preparation");
gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (pmu_desc == NULL) {
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (pmu_sig == NULL) {
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
}
pmu->desc_v1 = (struct pmu_ucode_desc_v1 *)pmu_desc->data;
pmu->ucode_image = (u32 *)pmu_fw->data;
g->acr.pmu_desc = pmu_desc;
err = nvgpu_init_pmu_fw_ver_ops(pmu);
if (err != 0) {
nvgpu_err(g, "failed to set function pointers");
goto release_sig;
}
int err = 0;
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
if (lsf_desc == NULL) {
err = -ENOMEM;
goto release_sig;
goto exit;
}
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)pmu_sig->data,
min_t(size_t, sizeof(*lsf_desc), pmu_sig->size));
nvgpu_memcpy((u8 *)lsf_desc, (u8 *)pmu->fw_sig->data,
min_t(size_t, sizeof(*lsf_desc), pmu->fw_sig->size));
lsf_desc->falcon_id = FALCON_ID_PMU;
p_img->desc = pmu->desc_v1;
p_img->data = pmu->ucode_image;
p_img->data_size = pmu->desc_v1->app_start_offset
+ pmu->desc_v1->app_size;
p_img->desc = (struct pmu_ucode_desc_v1 *)(void *)pmu->fw_desc->data;
p_img->data = (u32 *)(void *)pmu->fw_image->data;
p_img->data_size = p_img->desc->app_start_offset + p_img->desc->app_size;
p_img->fw_ver = NULL;
p_img->header = NULL;
p_img->lsf_desc = (struct lsf_ucode_desc_v1 *)lsf_desc;
gp106_dbg_pmu(g, "requesting PMU ucode in GM20B exit\n");
nvgpu_release_firmware(g, pmu_sig);
return 0;
release_sig:
nvgpu_release_firmware(g, pmu_sig);
release_desc:
nvgpu_release_firmware(g, pmu_desc);
g->acr.pmu_desc = NULL;
release_img_fw:
nvgpu_release_firmware(g, pmu_fw);
g->acr.pmu_fw = NULL;
exit:
return err;
}
@@ -478,18 +433,13 @@ static int lsfm_discover_and_add_sub_wprs(struct gk20a *g,
int gp106_prepare_ucode_blob(struct gk20a *g)
{
int err;
int err = 0;
struct ls_flcn_mgr_v1 lsfm_l, *plsfm;
struct nvgpu_pmu *pmu = &g->pmu;
struct wpr_carveout_info wpr_inf;
if (g->acr.ucode_blob.cpu_va != NULL) {
/*Recovery case, we do not need to form
non WPR blob of ucodes*/
err = nvgpu_init_pmu_fw_ver_ops(pmu);
if (err != 0) {
gp106_dbg_pmu(g, "failed to set function pointers\n");
}
return err;
}
plsfm = &lsfm_l;

View File

@@ -37,6 +37,10 @@
/* PMU NS UCODE IMG */
#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin"
#define NVGPU_PMU_UCODE_IMAGE "gpmu_ucode_image.bin"
#define NVGPU_PMU_UCODE_DESC "gpmu_ucode_desc.bin"
#define NVGPU_PMU_UCODE_SIG "pmu_sig.bin"
/* PMU F/W version */
#define APP_VERSION_TU10X 25467803U
#define APP_VERSION_GV11B 25005711U
@@ -1114,7 +1118,7 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg,
pg->eng_buf_load_v2.dma_desc.params |= (U32(value) << U32(24));
}
int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
static int init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu, u32 app_version)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct pmu_v *pv = &g->ops.pmu_ver;
@@ -1122,7 +1126,7 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
nvgpu_log_fn(g, " ");
switch (pmu->desc->app_version) {
switch (app_version) {
case APP_VERSION_GP10B:
g->ops.pmu_ver.pg_cmd_eng_buf_load_size =
pg_cmd_eng_buf_load_size_v1;
@@ -1292,8 +1296,8 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
pmu_allocation_get_fb_addr_v3;
g->ops.pmu_ver.pmu_allocation_get_fb_size =
pmu_allocation_get_fb_size_v3;
if (pmu->desc->app_version == APP_VERSION_GV10X ||
pmu->desc->app_version == APP_VERSION_TU10X) {
if (app_version == APP_VERSION_GV10X ||
app_version == APP_VERSION_TU10X) {
g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params =
get_pmu_init_msg_pmu_queue_params_v5;
g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr =
@@ -1322,7 +1326,7 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
clk_avfs_get_vin_cal_fuse_v20;
g->ops.pmu_ver.clk.clk_vf_change_inject_data_fill =
nvgpu_clk_vf_change_inject_data_fill_gv10x;
if (pmu->desc->app_version == APP_VERSION_GV10X) {
if (app_version == APP_VERSION_GV10X) {
g->ops.pmu_ver.clk.clk_set_boot_clk =
nvgpu_clk_set_boot_fll_clk_gv10x;
} else {
@@ -1606,8 +1610,9 @@ int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
break;
default:
nvgpu_err(g, "PMU code version not supported version: %d\n",
pmu->desc->app_version);
app_version);
err = -EINVAL;
break;
}
pv->set_perfmon_cntr_index(pmu, 3); /* GR & CE2 */
pv->set_perfmon_cntr_group_id(pmu, PMU_DOMAIN_GROUP_PSTATE);
@@ -1639,21 +1644,29 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
pboardobj->destruct(pboardobj);
}
if (pmu->fw != NULL) {
nvgpu_release_firmware(g, pmu->fw);
if (pmu->fw_image != NULL) {
nvgpu_release_firmware(g, pmu->fw_image);
}
if (g->acr.pmu_fw != NULL) {
nvgpu_release_firmware(g, g->acr.pmu_fw);
if (pmu->fw_desc != NULL) {
nvgpu_release_firmware(g, pmu->fw_desc);
}
if (g->acr.pmu_desc != NULL) {
nvgpu_release_firmware(g, g->acr.pmu_desc);
if (pmu->fw_sig != NULL) {
nvgpu_release_firmware(g, pmu->fw_sig);
}
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
if (nvgpu_mem_is_valid(&pmu->ucode)) {
nvgpu_dma_unmap_free(vm, &pmu->ucode);
}
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
if (nvgpu_mem_is_valid(&pmu->seq_buf)) {
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
}
if (nvgpu_mem_is_valid(&pmu->super_surface_buf)) {
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
}
nvgpu_mutex_destroy(&pmu->elpg_mutex);
nvgpu_mutex_destroy(&pmu->pg_mutex);
@@ -1662,6 +1675,75 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
}
static int init_pmu_ucode(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct pmu_ucode_desc *desc;
int err = 0;
if (pmu->fw_image != NULL) {
goto exit;
}
if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* non-secure PMU boot uocde */
pmu->fw_image = nvgpu_request_firmware(g,
NVGPU_PMU_NS_UCODE_IMAGE, 0);
if (pmu->fw_image == NULL) {
nvgpu_err(g,
"failed to load non-secure pmu ucode!!");
goto exit;
}
desc = (struct pmu_ucode_desc *)
(void *)pmu->fw_image->data;
} else {
/* secure boot ucodes's */
nvgpu_pmu_dbg(g, "requesting PMU ucode image");
pmu->fw_image = nvgpu_request_firmware(g, NVGPU_PMU_UCODE_IMAGE, 0);
if (pmu->fw_image == NULL) {
nvgpu_err(g, "failed to load pmu ucode!!");
err = -ENOENT;
goto exit;
}
nvgpu_pmu_dbg(g, "requesting PMU ucode desc");
pmu->fw_desc = nvgpu_request_firmware(g, NVGPU_PMU_UCODE_DESC, 0);
if (pmu->fw_desc == NULL) {
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
nvgpu_pmu_dbg(g, "requesting PMU ucode sign");
pmu->fw_sig = nvgpu_request_firmware(g, NVGPU_PMU_UCODE_SIG, 0);
if (pmu->fw_sig == NULL) {
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
}
desc = (struct pmu_ucode_desc *)(void *)pmu->fw_desc->data;
}
err = init_pmu_fw_ver_ops(pmu, desc->app_version);
if (err != 0) {
nvgpu_err(g, "failed to set function pointers");
goto release_sig;
}
goto exit;
release_sig:
nvgpu_release_firmware(g, pmu->fw_sig);
release_desc:
nvgpu_release_firmware(g, pmu->fw_desc);
release_img_fw:
nvgpu_release_firmware(g, pmu->fw_image);
exit:
return err;
}
int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
{
int err = 0;
@@ -1695,10 +1777,17 @@ int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
goto fail_pmu_copy;
}
err = init_pmu_ucode(pmu);
if (err != 0) {
goto fail_seq_lock;
}
pmu->remove_support = nvgpu_remove_pmu_support;
goto exit;
fail_seq_lock:
nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
fail_pmu_copy:
nvgpu_mutex_destroy(&pmu->pmu_copy_lock);
fail_isr:
@@ -1714,52 +1803,27 @@ exit:
int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
int err = 0;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
struct pmu_ucode_desc *desc;
u32 *ucode_image = NULL;
int err = 0;
nvgpu_log_fn(g, " ");
if (pmu->fw != NULL) {
err = nvgpu_init_pmu_fw_ver_ops(pmu);
if (err != 0) {
nvgpu_err(g, "failed to set function pointers");
}
return err;
}
pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0);
if (pmu->fw == NULL) {
nvgpu_err(g, "failed to load pmu ucode!!");
return err;
}
nvgpu_log_fn(g, "firmware loaded");
pmu->desc = (struct pmu_ucode_desc *)pmu->fw->data;
pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
pmu->desc->descriptor_size);
desc = (struct pmu_ucode_desc *)(void *)pmu->fw_image->data;
ucode_image = (u32 *)(void *)((u8 *)desc + desc->descriptor_size);
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX,
&pmu->ucode);
if (err != 0) {
goto err_release_fw;
goto exit;
}
nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image,
pmu->desc->app_start_offset + pmu->desc->app_size);
err = nvgpu_init_pmu_fw_ver_ops(pmu);
if (err != 0) {
nvgpu_err(g, "failed to set function pointers");
}
return err;
err_release_fw:
nvgpu_release_firmware(g, pmu->fw);
pmu->fw = NULL;
nvgpu_mem_wr_n(g, &pmu->ucode, 0, ucode_image,
desc->app_start_offset + desc->app_size);
exit:
return err;
}

View File

@@ -187,7 +187,8 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct mm_gk20a *mm = &g->mm;
struct pmu_ucode_desc *desc = pmu->desc;
struct pmu_ucode_desc *desc =
(struct pmu_ucode_desc *)(void *)pmu->fw_image->data;
u64 addr_code, addr_data, addr_load;
u32 i, blocks, addr_args;

View File

@@ -30,6 +30,7 @@
#include <nvgpu/utils.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/firmware.h>
#include "pmu_gp10b.h"
#include "pmu_gp106.h"
@@ -201,7 +202,8 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct mm_gk20a *mm = &g->mm;
struct pmu_ucode_desc *desc = pmu->desc;
struct pmu_ucode_desc *desc =
(struct pmu_ucode_desc *)(void *)pmu->fw_image->data;
u64 addr_code_lo, addr_data_lo, addr_load_lo;
u64 addr_code_hi, addr_data_hi;
u32 i, blocks, addr_args;

View File

@@ -167,8 +167,6 @@ struct nvgpu_acr {
struct hs_acr acr_asb;
u32 pmu_args;
struct nvgpu_firmware *pmu_fw;
struct nvgpu_firmware *pmu_desc;
int (*prepare_ucode_blob)(struct gk20a *g, struct nvgpu_acr *acr);
void (*get_wpr_info)(struct gk20a *g, struct wpr_carveout_info *inf);

View File

@@ -327,10 +327,10 @@ struct nvgpu_pmu {
struct gk20a *g;
struct nvgpu_falcon *flcn;
union {
struct pmu_ucode_desc *desc;
struct pmu_ucode_desc_v1 *desc_v1;
};
struct nvgpu_firmware *fw_desc;
struct nvgpu_firmware *fw_image;
struct nvgpu_firmware *fw_sig;
struct nvgpu_mem ucode;
struct nvgpu_mem pg_buf;
@@ -359,7 +359,6 @@ struct nvgpu_pmu {
struct nvgpu_allocator dmem;
u32 *ucode_image;
bool pmu_ready;
u32 perfmon_query;
@@ -414,8 +413,6 @@ struct nvgpu_pmu {
u32 falcon_id;
u32 aelpg_param[5];
u32 override_done;
struct nvgpu_firmware *fw;
};
struct pmu_surface {
@@ -505,7 +502,6 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size);
/* PMU F/W support */
int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu);
int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu);
int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g);