gpu: nvgpu: get PMU ucode cmd line args DMEM offset

Fetch DMEM size of PMU falcon using common Falcon
interface to copy PMU ucode command lines args
at top of PMU DMEM offset.

Change needed to cleanup dependency between PMU and ACR

JIRA NVGPU-1147

Change-Id: Ie0b1bcf0bdd1afb2c37c1a7d061dc9b03f9fc679
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2012082
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-02-05 15:11:04 +05:30
committed by mobile promotions
parent ecc27b3f8b
commit a759ee0ec8
11 changed files with 57 additions and 31 deletions

View File

@@ -543,6 +543,26 @@ void nvgpu_falcon_get_ctls(struct nvgpu_falcon *flcn, u32 *sctl, u32 *cpuctl)
}
}
int nvgpu_falcon_get_dmem_size(struct nvgpu_falcon *flcn, u32 *dmem_size)
{
struct nvgpu_falcon_ops *flcn_ops;
if (flcn == NULL) {
return -EINVAL;
}
flcn_ops = &flcn->flcn_ops;
if (flcn_ops->get_mem_size != NULL) {
*dmem_size = flcn_ops->get_mem_size(flcn, MEM_DMEM);
} else {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
}
return 0;
}
struct gk20a *nvgpu_falcon_to_gk20a(struct nvgpu_falcon *flcn)
{
return flcn->g;

View File

@@ -720,6 +720,7 @@ void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
flcn_ops->mailbox_write = gk20a_falcon_mailbox_write;
flcn_ops->bl_bootstrap = gk20a_falcon_bl_bootstrap;
flcn_ops->get_falcon_ctls = gk20a_falcon_get_ctls;
flcn_ops->get_mem_size = gk20a_falcon_get_mem_size;
gk20a_falcon_engine_dependency_ops(flcn);
}

View File

@@ -196,6 +196,8 @@ struct nvgpu_falcon_ops {
struct nvgpu_falcon_bl_info *bl_info);
void (*get_falcon_ctls)(struct nvgpu_falcon *flcn, u32 *sctl,
u32 *cpuctl);
u32 (*get_mem_size)(struct nvgpu_falcon *flcn,
enum falcon_mem_type mem_type);
};
struct nvgpu_falcon {

View File

@@ -44,8 +44,6 @@
#include "pmu_gm20b.h"
#include "acr_gm20b.h"
#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
typedef int (*gm20b_get_ucode_details)(struct gk20a *g,
struct flcn_ucode_img *udata);
@@ -404,7 +402,6 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size)
{
struct wpr_carveout_info wpr_inf;
struct nvgpu_pmu *pmu = &g->pmu;
struct lsfm_managed_ucode_img *p_lsfm =
(struct lsfm_managed_ucode_img *)lsfm;
struct flcn_ucode_img *p_img = &(p_lsfm->ucode_img);
@@ -413,7 +410,6 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
struct pmu_ucode_desc *desc;
u64 tmp;
u32 addr_code, addr_data;
u32 addr_args;
if (p_img->desc == NULL) {
/*
@@ -452,13 +448,6 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
desc->app_resident_data_offset);
nvgpu_pmu_dbg(g, "bl start off %d\n", desc->bootloader_start_offset);
addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r())))
<< GK20A_PMU_DMEM_BLKSIZE2);
addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
nvgpu_pmu_dbg(g, "addr_args %x\n", addr_args);
/* Populate the loader_config state*/
ldr_cfg->dma_idx = GK20A_PMU_DMAIDX_UCODE;
ldr_cfg->code_dma_base = addr_code;
@@ -474,10 +463,9 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
/* Update the argc/argv members*/
ldr_cfg->argc = 1;
ldr_cfg->argv = addr_args;
nvgpu_pmu_get_cmd_line_args_offset(g, &ldr_cfg->argv);
*p_bl_gen_desc_size = (u32)sizeof(struct loader_config);
g->acr.pmu_args = addr_args;
return 0;
}

View File

@@ -43,9 +43,6 @@
#include "acr_gv100.h"
#include "acr_tu104.h"
#include <nvgpu/hw/gp106/hw_psec_gp106.h>
#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
/*Defines*/
#define gp106_dbg_pmu(g, fmt, arg...) \
nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
@@ -540,7 +537,6 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size)
{
struct wpr_carveout_info wpr_inf;
struct nvgpu_pmu *pmu = &g->pmu;
struct lsfm_managed_ucode_img_v2 *p_lsfm =
(struct lsfm_managed_ucode_img_v2 *)lsfm;
struct flcn_ucode_img_v1 *p_img = &(p_lsfm->ucode_img);
@@ -549,7 +545,6 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
u64 addr_base;
struct pmu_ucode_desc_v1 *desc;
u64 addr_code, addr_data;
u32 addr_args;
if (p_img->desc == NULL) {
/* This means its a header based ucode,
@@ -584,14 +579,6 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
desc->app_resident_data_offset);
gp106_dbg_pmu(g, "bl start off %d\n", desc->bootloader_start_offset);
addr_args = ((pwr_falcon_hwcfg_dmem_size_v(
gk20a_readl(g, pwr_falcon_hwcfg_r())))
<< GK20A_PMU_DMEM_BLKSIZE2);
addr_args -= g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
gp106_dbg_pmu(g, "addr_args %x\n", addr_args);
/* Populate the LOADER_CONFIG state */
(void) memset((void *) ldr_cfg, 0,
sizeof(struct flcn_bl_dmem_desc_v1));
@@ -605,11 +592,10 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
/* Update the argc/argv members*/
ldr_cfg->argc = 1;
ldr_cfg->argv = addr_args;
nvgpu_pmu_get_cmd_line_args_offset(g, &ldr_cfg->argv);
*p_bl_gen_desc_size = (u32)sizeof(struct flcn_bl_dmem_desc_v1);
g->acr.pmu_args = addr_args;
return 0;
}

View File

@@ -778,3 +778,20 @@ int nvgpu_pmu_wait_ready(struct gk20a *g)
return status;
}
void nvgpu_pmu_get_cmd_line_args_offset(struct gk20a *g,
u32 *args_offset)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 dmem_size = 0;
int err = 0;
err = nvgpu_falcon_get_dmem_size(pmu->flcn, &dmem_size);
if (err != 0) {
nvgpu_err(g, "dmem size request failed");
*args_offset = 0;
return;
}
*args_offset = dmem_size - g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
}

View File

@@ -331,6 +331,10 @@ void gm20b_pmu_setup_apertures(struct gk20a *g)
void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 cmd_line_args_offset = 0;
nvgpu_pmu_get_cmd_line_args_offset(g, &cmd_line_args_offset);
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
@@ -340,7 +344,7 @@ void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
nvgpu_falcon_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
nvgpu_falcon_copy_to_dmem(pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
}

View File

@@ -312,6 +312,9 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
void gp106_update_lspmu_cmdline_args(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 cmd_line_args_offset = 0;
nvgpu_pmu_get_cmd_line_args_offset(g, &cmd_line_args_offset);
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, 0);
@@ -325,7 +328,7 @@ void gp106_update_lspmu_cmdline_args(struct gk20a *g)
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
}
nvgpu_falcon_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
nvgpu_falcon_copy_to_dmem(pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);

View File

@@ -163,7 +163,8 @@ struct nvgpu_acr {
struct hs_acr acr_ahesasc;
struct hs_acr acr_asb;
u32 pmu_args;
struct nvgpu_firmware *pmu_fw;
struct nvgpu_firmware *pmu_desc;
int (*prepare_ucode_blob)(struct gk20a *g);
void (*get_wpr_info)(struct gk20a *g, struct wpr_carveout_info *inf);

View File

@@ -150,6 +150,7 @@ void nvgpu_falcon_dump_stats(struct nvgpu_falcon *flcn);
int nvgpu_falcon_bl_bootstrap(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_bl_info *bl_info);
void nvgpu_falcon_get_ctls(struct nvgpu_falcon *flcn, u32 *sctl, u32 *cpuctl);
int nvgpu_falcon_get_dmem_size(struct nvgpu_falcon *flcn, u32 *dmem_size);
struct gk20a *nvgpu_falcon_to_gk20a(struct nvgpu_falcon *flcn);
u32 nvgpu_falcon_get_id(struct nvgpu_falcon *flcn);

View File

@@ -550,6 +550,9 @@ void pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
void *var, u8 val);
int nvgpu_pmu_wait_ready(struct gk20a *g);
void nvgpu_pmu_get_cmd_line_args_offset(struct gk20a *g,
u32 *args_offset);
struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu);
/* super surface */