gpu: nvgpu: remove GR falcons bootstrap support using VA

- GR falcons bootstrap can be done using physical or
virtual address by setting flag usevamask in PMU interface
PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS command
- With this change always setting to physical address support & removed
virtual address support along with code removal.
- Removed Linux specific code used to get info regarding WPR VA.

JIRA NVGPU-128

Change-Id: Id58f3ddc4418d61126f2a4eacb50713d278c10a0
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1572468
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2017-10-03 17:21:16 +05:30
committed by mobile promotions
parent 5f16bb575c
commit bc4182afeb
4 changed files with 6 additions and 40 deletions

View File

@@ -350,13 +350,7 @@ int prepare_ucode_blob(struct gk20a *g)
int err;
struct ls_flcn_mgr lsfm_l, *plsfm;
struct nvgpu_pmu *pmu = &g->pmu;
phys_addr_t wpr_addr, wpr_page;
u32 wprsize;
int i;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
struct wpr_carveout_info wpr_inf;
struct page **pages;
if (g->acr.ucode_blob.cpu_va) {
/*Recovery case, we do not need to form
@@ -375,26 +369,8 @@ int prepare_ucode_blob(struct gk20a *g)
gr_gk20a_init_ctxsw_ucode(g);
g->ops.pmu.get_wpr(g, &wpr_inf);
wpr_addr = (phys_addr_t)wpr_inf.wpr_base;
wprsize = (u32)wpr_inf.size;
gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base);
gm20b_dbg_pmu("wpr carveout size :%x\n", wprsize);
pages = nvgpu_kmalloc(g, sizeof(struct page *) * (wprsize / PAGE_SIZE));
if (!pages)
return -ENOMEM;
wpr_page = wpr_addr;
for (i = 0; wpr_page < (wpr_addr + wprsize); i++, wpr_page += PAGE_SIZE)
pages[i] = phys_to_page(wpr_page);
__nvgpu_mem_create_from_pages(g, &g->pmu.wpr_buf, pages,
wprsize / PAGE_SIZE);
nvgpu_kfree(g, pages);
g->pmu.wpr_buf.gpu_va = nvgpu_gmmu_map(vm, &g->pmu.wpr_buf,
wprsize, 0, gk20a_mem_flag_none,
false, APERTURE_SYSMEM);
gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va);
gm20b_dbg_pmu("wpr carveout size :%llx\n", wpr_inf.size);
/* Discover all managed falcons*/
err = lsfm_discover_ucode_images(g, plsfm);
@@ -423,7 +399,6 @@ int prepare_ucode_blob(struct gk20a *g)
gm20b_dbg_pmu("prepare ucode blob return 0\n");
free_acr_resources(g, plsfm);
free_sgt:
nvgpu_gmmu_unmap(vm, &g->pmu.wpr_buf, g->pmu.wpr_buf.gpu_va);
return err;
}
@@ -618,10 +593,8 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
*/
addr_base = p_lsfm->lsb_header.ucode_off;
g->ops.pmu.get_wpr(g, &wpr_inf);
if (falconid == LSF_FALCON_ID_GPCCS)
addr_base += g->pmu.wpr_buf.gpu_va;
else
addr_base += wpr_inf.wpr_base;
addr_base += wpr_inf.wpr_base;
gm20b_dbg_pmu("gen loader cfg %x u32 addrbase %x ID\n", (u32)addr_base,
p_lsfm->wpr_header.falcon_id);
addr_code = u64_lo32((addr_base +

View File

@@ -623,11 +623,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
*/
addr_base = p_lsfm->lsb_header.ucode_off;
g->ops.pmu.get_wpr(g, &wpr_inf);
if (falconid == LSF_FALCON_ID_GPCCS &&
g->pmu.wpr_buf.aperture == APERTURE_SYSMEM)
addr_base += g->pmu.wpr_buf.gpu_va;
else
addr_base += wpr_inf.wpr_base;
addr_base += wpr_inf.wpr_base;
gp106_dbg_pmu("falcon ID %x", p_lsfm->wpr_header.falcon_id);
gp106_dbg_pmu("gen loader cfg addrbase %llx ", addr_base);

View File

@@ -170,10 +170,8 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
cmd.cmd.acr.boot_falcons.falconidmask =
falconidmask;
cmd.cmd.acr.boot_falcons.usevamask = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo =
u64_lo32(g->pmu.wpr_buf.gpu_va);
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi =
u64_hi32(g->pmu.wpr_buf.gpu_va);
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0;
gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
falconidmask);
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,

View File

@@ -287,7 +287,6 @@ struct nvgpu_pmu {
/* TBD: remove this if ZBC seq is fixed */
struct nvgpu_mem seq_buf;
struct nvgpu_mem trace_buf;
struct nvgpu_mem wpr_buf;
bool buf_loaded;
struct pmu_sha1_gid gid_info;