mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu:nvgpu: PMU cleanup for ACR
- Removed ACR support code from PMU module - Deleted ACR related ops from pmu ops - Deleted assigning of ACR related ops using pmu ops during HAL init -Removed code related to ACR bootstrap & dependent code for all chips. JIRA NVGPU-1147 Change-Id: I47a851a6b67a9aacde863685537c34566f97dc8d Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1817990 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a4065effdc
commit
7465926ccd
@@ -1648,17 +1648,6 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
|
||||
nvgpu_release_firmware(g, g->acr.pmu_desc);
|
||||
}
|
||||
|
||||
if (g->acr.acr_fw) {
|
||||
nvgpu_release_firmware(g, g->acr.acr_fw);
|
||||
}
|
||||
|
||||
if (g->acr.hsbl_fw) {
|
||||
nvgpu_release_firmware(g, g->acr.hsbl_fw);
|
||||
}
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &g->acr.acr_ucode);
|
||||
nvgpu_dma_unmap_free(vm, &g->acr.hsbl_ucode);
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
|
||||
|
||||
@@ -70,18 +70,6 @@ static get_ucode_details pmu_acr_supp_ucode_list[] = {
|
||||
gpccs_ucode_details,
|
||||
};
|
||||
|
||||
/*Once is LS mode, cpuctl_alias is only accessible*/
|
||||
static void start_gm20b_pmu(struct gk20a *g)
|
||||
{
|
||||
/*disable irqs for hs falcon booting as we will poll for halt*/
|
||||
nvgpu_mutex_acquire(&g->pmu.isr_mutex);
|
||||
g->ops.pmu.pmu_enable_irq(&g->pmu, true);
|
||||
g->pmu.isr_enabled = true;
|
||||
nvgpu_mutex_release(&g->pmu.isr_mutex);
|
||||
gk20a_writel(g, pwr_falcon_cpuctl_alias_r(),
|
||||
pwr_falcon_cpuctl_startcpu_f(1));
|
||||
}
|
||||
|
||||
void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
|
||||
{
|
||||
g->ops.fb.read_wpr_info(g, inf);
|
||||
@@ -1024,123 +1012,6 @@ static int lsf_gen_wpr_requirements(struct gk20a *g, struct ls_flcn_mgr *plsfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
|
||||
* start and end are addresses of ucode blob in non-WPR region*/
|
||||
int gm20b_bootstrap_hs_flcn(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
int err = 0;
|
||||
u64 *acr_dmem;
|
||||
u32 img_size_in_bytes = 0;
|
||||
u32 status, size;
|
||||
u64 start;
|
||||
struct acr_desc *acr = &g->acr;
|
||||
struct nvgpu_firmware *acr_fw = acr->acr_fw;
|
||||
struct flcn_bl_dmem_desc *bl_dmem_desc = &acr->bl_dmem_desc;
|
||||
u32 *acr_ucode_header_t210_load;
|
||||
u32 *acr_ucode_data_t210_load;
|
||||
|
||||
start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
|
||||
size = acr->ucode_blob.size;
|
||||
|
||||
nvgpu_pmu_dbg(g, " ");
|
||||
|
||||
if (!acr_fw) {
|
||||
/*First time init case*/
|
||||
acr_fw = nvgpu_request_firmware(g, GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
|
||||
if (!acr_fw) {
|
||||
nvgpu_err(g, "pmu ucode get fail");
|
||||
return -ENOENT;
|
||||
}
|
||||
acr->acr_fw = acr_fw;
|
||||
acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
|
||||
acr->hsbin_hdr->header_offset);
|
||||
acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
|
||||
acr->hsbin_hdr->data_offset);
|
||||
acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->hdr_offset);
|
||||
img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
|
||||
|
||||
/* Lets patch the signatures first.. */
|
||||
if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->sig_prod_offset),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->sig_dbg_offset),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->patch_loc),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->patch_sig)) < 0) {
|
||||
nvgpu_err(g, "patch signatures fail");
|
||||
err = -1;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
|
||||
&acr->acr_ucode);
|
||||
if (err) {
|
||||
err = -ENOMEM;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
|
||||
acr_dmem = (u64 *)
|
||||
&(((u8 *)acr_ucode_data_t210_load)[
|
||||
acr_ucode_header_t210_load[2]]);
|
||||
acr->acr_dmem_desc = (struct flcn_acr_desc *)((u8 *)(
|
||||
acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
|
||||
((struct flcn_acr_desc *)acr_dmem)->nonwpr_ucode_blob_start =
|
||||
start;
|
||||
((struct flcn_acr_desc *)acr_dmem)->nonwpr_ucode_blob_size =
|
||||
size;
|
||||
((struct flcn_acr_desc *)acr_dmem)->regions.no_regions = 2;
|
||||
((struct flcn_acr_desc *)acr_dmem)->wpr_offset = 0;
|
||||
|
||||
nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
|
||||
acr_ucode_data_t210_load, img_size_in_bytes);
|
||||
/*
|
||||
* In order to execute this binary, we will be using
|
||||
* a bootloader which will load this image into PMU IMEM/DMEM.
|
||||
* Fill up the bootloader descriptor for PMU HAL to use..
|
||||
* TODO: Use standard descriptor which the generic bootloader is
|
||||
* checked in.
|
||||
*/
|
||||
|
||||
bl_dmem_desc->signature[0] = 0;
|
||||
bl_dmem_desc->signature[1] = 0;
|
||||
bl_dmem_desc->signature[2] = 0;
|
||||
bl_dmem_desc->signature[3] = 0;
|
||||
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
|
||||
bl_dmem_desc->code_dma_base =
|
||||
(unsigned int)(((u64)acr->acr_ucode.gpu_va >> 8));
|
||||
bl_dmem_desc->code_dma_base1 = 0x0;
|
||||
bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
|
||||
bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
|
||||
bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
|
||||
bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
|
||||
bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
|
||||
bl_dmem_desc->data_dma_base =
|
||||
bl_dmem_desc->code_dma_base +
|
||||
((acr_ucode_header_t210_load[2]) >> 8);
|
||||
bl_dmem_desc->data_dma_base1 = 0x0;
|
||||
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
|
||||
} else {
|
||||
acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
|
||||
}
|
||||
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
|
||||
if (status != 0) {
|
||||
err = status;
|
||||
goto err_free_ucode_map;
|
||||
}
|
||||
return 0;
|
||||
err_free_ucode_map:
|
||||
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
|
||||
err_release_acr_fw:
|
||||
nvgpu_release_firmware(g, acr_fw);
|
||||
acr->acr_fw = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Patch signatures into ucode image
|
||||
*/
|
||||
@@ -1172,33 +1043,6 @@ int acr_ucode_patch_sig(struct gk20a *g,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bl_bootstrap(struct nvgpu_pmu *pmu,
|
||||
struct flcn_bl_dmem_desc *pbl_desc, u32 bl_sz)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct nvgpu_falcon_bl_info bl_info;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
gk20a_writel(g, pwr_falcon_itfen_r(),
|
||||
gk20a_readl(g, pwr_falcon_itfen_r()) |
|
||||
pwr_falcon_itfen_ctxen_enable_f());
|
||||
gk20a_writel(g, pwr_pmu_new_instblk_r(),
|
||||
pwr_pmu_new_instblk_ptr_f(
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
|
||||
pwr_pmu_new_instblk_valid_f(1) |
|
||||
pwr_pmu_new_instblk_target_sys_coh_f());
|
||||
|
||||
bl_info.bl_src = g->acr.hsbl_ucode.cpu_va;
|
||||
bl_info.bl_desc = (u8 *)pbl_desc;
|
||||
bl_info.bl_desc_size = sizeof(struct flcn_bl_dmem_desc);
|
||||
bl_info.bl_size = bl_sz;
|
||||
bl_info.bl_start_tag = g->acr.pmu_hsbl_desc->bl_start_tag;
|
||||
nvgpu_flcn_bl_bootstrap(&g->pmu_flcn, &bl_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
@@ -1268,202 +1112,3 @@ void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
|
||||
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
|
||||
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
|
||||
}
|
||||
|
||||
int gm20b_init_pmu_setup_hw1(struct gk20a *g,
|
||||
void *desc, u32 bl_sz)
|
||||
{
|
||||
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->isr_mutex);
|
||||
nvgpu_flcn_reset(pmu->flcn);
|
||||
pmu->isr_enabled = true;
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
|
||||
if (g->ops.pmu.setup_apertures) {
|
||||
g->ops.pmu.setup_apertures(g);
|
||||
}
|
||||
if (g->ops.pmu.update_lspmu_cmdline_args) {
|
||||
g->ops.pmu.update_lspmu_cmdline_args(g);
|
||||
}
|
||||
|
||||
/*disable irqs for hs falcon booting as we will poll for halt*/
|
||||
nvgpu_mutex_acquire(&pmu->isr_mutex);
|
||||
g->ops.pmu.pmu_enable_irq(pmu, false);
|
||||
pmu->isr_enabled = false;
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
/*Clearing mailbox register used to reflect capabilities*/
|
||||
gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
|
||||
err = bl_bootstrap(pmu, desc, bl_sz);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Executes a generic bootloader and wait for PMU to halt.
|
||||
* This BL will be used for those binaries that are loaded
|
||||
* and executed at times other than RM PMU Binary execution.
|
||||
*
|
||||
* @param[in] g gk20a pointer
|
||||
* @param[in] desc Bootloader descriptor
|
||||
* @param[in] dma_idx DMA Index
|
||||
* @param[in] b_wait_for_halt Wait for PMU to HALT
|
||||
*/
|
||||
int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
int err = 0;
|
||||
u32 bl_sz;
|
||||
struct acr_desc *acr = &g->acr;
|
||||
struct nvgpu_firmware *hsbl_fw = acr->hsbl_fw;
|
||||
struct hsflcn_bl_desc *pmu_bl_gm10x_desc;
|
||||
u32 *pmu_bl_gm10x = NULL;
|
||||
nvgpu_pmu_dbg(g, " ");
|
||||
|
||||
if (!hsbl_fw) {
|
||||
hsbl_fw = nvgpu_request_firmware(g,
|
||||
GM20B_HSBIN_PMU_BL_UCODE_IMAGE, 0);
|
||||
if (!hsbl_fw) {
|
||||
nvgpu_err(g, "pmu ucode load fail");
|
||||
return -ENOENT;
|
||||
}
|
||||
acr->hsbl_fw = hsbl_fw;
|
||||
acr->bl_bin_hdr = (struct bin_hdr *)hsbl_fw->data;
|
||||
acr->pmu_hsbl_desc = (struct hsflcn_bl_desc *)(hsbl_fw->data +
|
||||
acr->bl_bin_hdr->header_offset);
|
||||
pmu_bl_gm10x_desc = acr->pmu_hsbl_desc;
|
||||
pmu_bl_gm10x = (u32 *)(hsbl_fw->data +
|
||||
acr->bl_bin_hdr->data_offset);
|
||||
bl_sz = ALIGN(pmu_bl_gm10x_desc->bl_img_hdr.bl_code_size,
|
||||
256);
|
||||
acr->hsbl_ucode.size = bl_sz;
|
||||
nvgpu_pmu_dbg(g, "Executing Generic Bootloader\n");
|
||||
|
||||
/*TODO in code verify that enable PMU is done,
|
||||
scrubbing etc is done*/
|
||||
/*TODO in code verify that gmmu vm init is done*/
|
||||
err = nvgpu_dma_alloc_sys(g, bl_sz, &acr->hsbl_ucode);
|
||||
if (err) {
|
||||
nvgpu_err(g, "failed to allocate memory");
|
||||
goto err_done;
|
||||
}
|
||||
|
||||
acr->hsbl_ucode.gpu_va = nvgpu_gmmu_map(vm,
|
||||
&acr->hsbl_ucode,
|
||||
bl_sz,
|
||||
0, /* flags */
|
||||
gk20a_mem_flag_read_only, false,
|
||||
acr->hsbl_ucode.aperture);
|
||||
if (!acr->hsbl_ucode.gpu_va) {
|
||||
nvgpu_err(g, "failed to map pmu ucode memory!!");
|
||||
goto err_free_ucode;
|
||||
}
|
||||
|
||||
nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz);
|
||||
nvgpu_pmu_dbg(g, "Copied bl ucode to bl_cpuva\n");
|
||||
}
|
||||
/*
|
||||
* Disable interrupts to avoid kernel hitting breakpoint due
|
||||
* to PMU halt
|
||||
*/
|
||||
|
||||
if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
|
||||
gk20a_get_gr_idle_timeout(g))) {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dbg(g, "phys sec reg %x\n", gk20a_readl(g,
|
||||
pwr_falcon_mmu_phys_sec_r()));
|
||||
nvgpu_pmu_dbg(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
|
||||
|
||||
g->ops.pmu.init_falcon_setup_hw(g, desc, acr->hsbl_ucode.size);
|
||||
|
||||
/* Poll for HALT */
|
||||
if (b_wait_for_halt) {
|
||||
err = g->ops.pmu.falcon_wait_for_halt(g,
|
||||
ACR_COMPLETION_TIMEOUT_MS);
|
||||
if (err == 0) {
|
||||
/* Clear the HALT interrupt */
|
||||
if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
|
||||
gk20a_get_gr_idle_timeout(g))) {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
} else {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
}
|
||||
nvgpu_pmu_dbg(g, "after waiting for halt, err %x\n", err);
|
||||
nvgpu_pmu_dbg(g, "phys sec reg %x\n", gk20a_readl(g,
|
||||
pwr_falcon_mmu_phys_sec_r()));
|
||||
nvgpu_pmu_dbg(g, "sctl reg %x\n", gk20a_readl(g, pwr_falcon_sctl_r()));
|
||||
start_gm20b_pmu(g);
|
||||
return 0;
|
||||
err_unmap_bl:
|
||||
nvgpu_gmmu_unmap(vm, &acr->hsbl_ucode, acr->hsbl_ucode.gpu_va);
|
||||
err_free_ucode:
|
||||
nvgpu_dma_free(g, &acr->hsbl_ucode);
|
||||
err_done:
|
||||
nvgpu_release_firmware(g, hsbl_fw);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Wait for PMU to halt
|
||||
* @param[in] g GPU object pointer
|
||||
* @param[in] timeout_ms Timeout in msec for PMU to halt
|
||||
* @return '0' if PMU halts
|
||||
*/
|
||||
int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
u32 data = 0;
|
||||
int ret = 0;
|
||||
|
||||
ret = nvgpu_flcn_wait_for_halt(pmu->flcn, timeout_ms);
|
||||
if (ret) {
|
||||
nvgpu_err(g, "ACR boot timed out");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
g->acr.capabilities = gk20a_readl(g, pwr_falcon_mailbox1_r());
|
||||
nvgpu_pmu_dbg(g, "ACR capabilities %x\n", g->acr.capabilities);
|
||||
data = gk20a_readl(g, pwr_falcon_mailbox0_r());
|
||||
if (data) {
|
||||
nvgpu_err(g, "ACR boot failed, err %x", data);
|
||||
ret = -EAGAIN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
if (ret) {
|
||||
nvgpu_kill_task_pg_init(g);
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
|
||||
nvgpu_flcn_dump_stats(pmu->flcn);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Wait for PMU halt interrupt status to be cleared
|
||||
* @param[in] g GPU object pointer
|
||||
* @param[in] timeout_ms Timeout in msec for halt to clear
|
||||
* @return '0' if PMU halt irq status is clear
|
||||
*/
|
||||
int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
int status = 0;
|
||||
|
||||
if (nvgpu_flcn_clear_halt_intr_status(pmu->flcn, timeout_ms)) {
|
||||
status = -EBUSY;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
|
||||
bool gm20b_is_pmu_supported(struct gk20a *g);
|
||||
int prepare_ucode_blob(struct gk20a *g);
|
||||
int gm20b_bootstrap_hs_flcn(struct gk20a *g);
|
||||
bool gm20b_is_lazy_bootstrap(u32 falcon_id);
|
||||
bool gm20b_is_priv_load(u32 falcon_id);
|
||||
void gm20b_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
|
||||
@@ -44,14 +43,10 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size);
|
||||
int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
|
||||
void *lsfm, u32 *p_bl_gen_desc_size, u32 falconid);
|
||||
int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms);
|
||||
int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout);
|
||||
int gm20b_init_pmu_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz);
|
||||
void gm20b_update_lspmu_cmdline_args(struct gk20a *g);
|
||||
void gm20b_setup_apertures(struct gk20a *g);
|
||||
|
||||
int gm20b_pmu_setup_sw(struct gk20a *g);
|
||||
int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt);
|
||||
int gm20b_init_nspmu_setup_hw1(struct gk20a *g);
|
||||
int acr_ucode_patch_sig(struct gk20a *g,
|
||||
unsigned int *p_img,
|
||||
|
||||
@@ -722,19 +722,12 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
|
||||
gops->pmu.prepare_ucode = prepare_ucode_blob;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
|
||||
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
|
||||
gops->pmu.is_priv_load = gm20b_is_priv_load;
|
||||
gops->pmu.get_wpr = gm20b_wpr_info;
|
||||
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space;
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
gm20b_pmu_populate_loader_cfg;
|
||||
gops->pmu.flcn_populate_bl_dmem_desc =
|
||||
gm20b_flcn_populate_bl_dmem_desc;
|
||||
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt;
|
||||
gops->pmu.falcon_clear_halt_interrupt_status =
|
||||
clear_halt_interrupt_status;
|
||||
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1;
|
||||
gops->pmu.update_lspmu_cmdline_args =
|
||||
gm20b_update_lspmu_cmdline_args;
|
||||
gops->pmu.setup_apertures = gm20b_setup_apertures;
|
||||
@@ -747,7 +740,6 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
|
||||
|
||||
gops->pmu.load_lsfalcon_ucode = NULL;
|
||||
|
||||
@@ -1191,135 +1191,3 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
|
||||
plsfm->wpr_size = wpr_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
|
||||
* start and end are addresses of ucode blob in non-WPR region*/
|
||||
int gp106_bootstrap_hs_flcn(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
int err = 0;
|
||||
u64 *acr_dmem;
|
||||
u32 img_size_in_bytes = 0;
|
||||
u32 status;
|
||||
struct acr_desc *acr = &g->acr;
|
||||
struct nvgpu_firmware *acr_fw = acr->acr_fw;
|
||||
struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
|
||||
u32 *acr_ucode_header_t210_load;
|
||||
u32 *acr_ucode_data_t210_load;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
|
||||
gp106_dbg_pmu(g, " ");
|
||||
|
||||
if (!acr_fw) {
|
||||
/*First time init case*/
|
||||
acr_fw = nvgpu_request_firmware(g,
|
||||
GM20B_HSBIN_PMU_UCODE_IMAGE,
|
||||
NVGPU_REQUEST_FIRMWARE_NO_SOC);
|
||||
if (!acr_fw) {
|
||||
nvgpu_err(g, "pmu ucode get fail");
|
||||
return -ENOENT;
|
||||
}
|
||||
acr->acr_fw = acr_fw;
|
||||
acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
|
||||
acr->hsbin_hdr->header_offset);
|
||||
acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
|
||||
acr->hsbin_hdr->data_offset);
|
||||
acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->hdr_offset);
|
||||
img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
|
||||
|
||||
/* Lets patch the signatures first.. */
|
||||
if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->sig_prod_offset),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->sig_dbg_offset),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->patch_loc),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->patch_sig)) < 0) {
|
||||
nvgpu_err(g, "patch signatures fail");
|
||||
err = -1;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
|
||||
&acr->acr_ucode);
|
||||
if (err) {
|
||||
err = -ENOMEM;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
|
||||
g->ops.pmu.get_wpr(g, &wpr_inf);
|
||||
|
||||
acr_dmem = (u64 *)
|
||||
&(((u8 *)acr_ucode_data_t210_load)[
|
||||
acr_ucode_header_t210_load[2]]);
|
||||
acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
|
||||
acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
|
||||
wpr_inf.nonwpr_base;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
|
||||
wpr_inf.size;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 1;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
|
||||
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_region_id = 1;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.region_props[
|
||||
0].region_id = 1;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.region_props[
|
||||
0].start_addr = (wpr_inf.wpr_base ) >> 8;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.region_props[
|
||||
0].end_addr = ((wpr_inf.wpr_base) + wpr_inf.size) >> 8;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.region_props[
|
||||
0].shadowmMem_startaddress = wpr_inf.nonwpr_base >> 8;
|
||||
|
||||
nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
|
||||
acr_ucode_data_t210_load, img_size_in_bytes);
|
||||
|
||||
/*
|
||||
* In order to execute this binary, we will be using
|
||||
* a bootloader which will load this image into PMU IMEM/DMEM.
|
||||
* Fill up the bootloader descriptor for PMU HAL to use..
|
||||
* TODO: Use standard descriptor which the generic bootloader is
|
||||
* checked in.
|
||||
*/
|
||||
|
||||
bl_dmem_desc->signature[0] = 0;
|
||||
bl_dmem_desc->signature[1] = 0;
|
||||
bl_dmem_desc->signature[2] = 0;
|
||||
bl_dmem_desc->signature[3] = 0;
|
||||
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
|
||||
flcn64_set_dma( &bl_dmem_desc->code_dma_base,
|
||||
acr->acr_ucode.gpu_va);
|
||||
bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
|
||||
bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
|
||||
bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
|
||||
bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
|
||||
bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
|
||||
flcn64_set_dma( &bl_dmem_desc->data_dma_base,
|
||||
acr->acr_ucode.gpu_va +
|
||||
(acr_ucode_header_t210_load[2]));
|
||||
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
|
||||
} else {
|
||||
acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
|
||||
}
|
||||
|
||||
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
|
||||
if (status != 0) {
|
||||
err = status;
|
||||
goto err_free_ucode_map;
|
||||
}
|
||||
|
||||
/* sec2 reset - to keep it idle */
|
||||
nvgpu_flcn_reset(&g->sec2_flcn);
|
||||
|
||||
return 0;
|
||||
err_free_ucode_map:
|
||||
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
|
||||
err_release_acr_fw:
|
||||
nvgpu_release_firmware(g, acr_fw);
|
||||
acr->acr_fw = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -618,15 +618,8 @@ static const struct gpu_ops gp106_ops = {
|
||||
.is_lazy_bootstrap = gp106_is_lazy_bootstrap,
|
||||
.is_priv_load = gp106_is_priv_load,
|
||||
.prepare_ucode = gp106_prepare_ucode_blob,
|
||||
.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn,
|
||||
.get_wpr = gp106_wpr_info,
|
||||
.alloc_blob_space = gp106_alloc_blob_space,
|
||||
.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
|
||||
.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
|
||||
.falcon_wait_for_halt = gp106_sec2_wait_for_halt,
|
||||
.falcon_clear_halt_interrupt_status =
|
||||
gp106_sec2_clear_halt_interrupt_status,
|
||||
.init_falcon_setup_hw = init_sec2_setup_hw1,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_get_queue_head = pwr_pmu_queue_head_r,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
|
||||
@@ -782,19 +782,12 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
|
||||
gops->pmu.prepare_ucode = prepare_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
|
||||
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
|
||||
gops->pmu.is_priv_load = gm20b_is_priv_load,
|
||||
gops->pmu.get_wpr = gm20b_wpr_info,
|
||||
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
gm20b_pmu_populate_loader_cfg,
|
||||
gops->pmu.flcn_populate_bl_dmem_desc =
|
||||
gm20b_flcn_populate_bl_dmem_desc,
|
||||
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
|
||||
gops->pmu.falcon_clear_halt_interrupt_status =
|
||||
clear_halt_interrupt_status,
|
||||
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
|
||||
gops->pmu.update_lspmu_cmdline_args =
|
||||
gm20b_update_lspmu_cmdline_args;
|
||||
gops->pmu.setup_apertures = gm20b_setup_apertures;
|
||||
@@ -809,12 +802,10 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
|
||||
|
||||
gops->pmu.load_lsfalcon_ucode = NULL;
|
||||
gops->pmu.init_wpr_region = NULL;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
|
||||
|
||||
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
|
||||
}
|
||||
|
||||
@@ -719,15 +719,8 @@ static const struct gpu_ops gv100_ops = {
|
||||
.is_lazy_bootstrap = gp106_is_lazy_bootstrap,
|
||||
.is_priv_load = gp106_is_priv_load,
|
||||
.prepare_ucode = gp106_prepare_ucode_blob,
|
||||
.pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn,
|
||||
.get_wpr = gp106_wpr_info,
|
||||
.alloc_blob_space = gp106_alloc_blob_space,
|
||||
.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
|
||||
.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
|
||||
.falcon_wait_for_halt = gp106_sec2_wait_for_halt,
|
||||
.falcon_clear_halt_interrupt_status =
|
||||
gp106_sec2_clear_halt_interrupt_status,
|
||||
.init_falcon_setup_hw = init_sec2_setup_hw1,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_get_queue_head = pwr_pmu_queue_head_r,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
|
||||
@@ -47,11 +47,6 @@
|
||||
#define gv11b_dbg_pmu(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
|
||||
|
||||
static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
|
||||
{
|
||||
dma_addr->lo |= u64_lo32(value);
|
||||
dma_addr->hi |= u64_hi32(value);
|
||||
}
|
||||
/*Externs*/
|
||||
|
||||
/*Forwards*/
|
||||
@@ -68,173 +63,6 @@ int gv11b_alloc_blob_space(struct gk20a *g,
|
||||
return err;
|
||||
}
|
||||
|
||||
/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
|
||||
* start and end are addresses of ucode blob in non-WPR region*/
|
||||
int gv11b_bootstrap_hs_flcn(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
int err = 0;
|
||||
u64 *acr_dmem;
|
||||
u32 img_size_in_bytes = 0;
|
||||
u32 status, size, index;
|
||||
u64 start;
|
||||
struct acr_desc *acr = &g->acr;
|
||||
struct nvgpu_firmware *acr_fw = acr->acr_fw;
|
||||
struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
|
||||
u32 *acr_ucode_header_t210_load;
|
||||
u32 *acr_ucode_data_t210_load;
|
||||
|
||||
start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
|
||||
size = acr->ucode_blob.size;
|
||||
|
||||
gv11b_dbg_pmu(g, "acr ucode blob start %llx\n", start);
|
||||
gv11b_dbg_pmu(g, "acr ucode blob size %x\n", size);
|
||||
|
||||
gv11b_dbg_pmu(g, " ");
|
||||
|
||||
if (!acr_fw) {
|
||||
/*First time init case*/
|
||||
acr_fw = nvgpu_request_firmware(g,
|
||||
GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
|
||||
if (!acr_fw) {
|
||||
nvgpu_err(g, "pmu ucode get fail");
|
||||
return -ENOENT;
|
||||
}
|
||||
acr->acr_fw = acr_fw;
|
||||
acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
|
||||
acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
|
||||
acr->hsbin_hdr->header_offset);
|
||||
acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
|
||||
acr->hsbin_hdr->data_offset);
|
||||
acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->hdr_offset);
|
||||
img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
|
||||
|
||||
gv11b_dbg_pmu(g, "sig dbg offset %u\n",
|
||||
acr->fw_hdr->sig_dbg_offset);
|
||||
gv11b_dbg_pmu(g, "sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
|
||||
gv11b_dbg_pmu(g, "sig prod offset %u\n",
|
||||
acr->fw_hdr->sig_prod_offset);
|
||||
gv11b_dbg_pmu(g, "sig prod size %u\n",
|
||||
acr->fw_hdr->sig_prod_size);
|
||||
gv11b_dbg_pmu(g, "patch loc %u\n", acr->fw_hdr->patch_loc);
|
||||
gv11b_dbg_pmu(g, "patch sig %u\n", acr->fw_hdr->patch_sig);
|
||||
gv11b_dbg_pmu(g, "header offset %u\n", acr->fw_hdr->hdr_offset);
|
||||
gv11b_dbg_pmu(g, "header size %u\n", acr->fw_hdr->hdr_size);
|
||||
|
||||
/* Lets patch the signatures first.. */
|
||||
if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->sig_prod_offset),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->sig_dbg_offset),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->patch_loc),
|
||||
(u32 *)(acr_fw->data +
|
||||
acr->fw_hdr->patch_sig)) < 0) {
|
||||
nvgpu_err(g, "patch signatures fail");
|
||||
err = -1;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
|
||||
&acr->acr_ucode);
|
||||
if (err) {
|
||||
err = -ENOMEM;
|
||||
goto err_release_acr_fw;
|
||||
}
|
||||
|
||||
for (index = 0; index < 9; index++) {
|
||||
gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
|
||||
acr_ucode_header_t210_load[index]);
|
||||
}
|
||||
|
||||
acr_dmem = (u64 *)
|
||||
&(((u8 *)acr_ucode_data_t210_load)[
|
||||
acr_ucode_header_t210_load[2]]);
|
||||
acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
|
||||
acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
|
||||
(start);
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
|
||||
size;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2;
|
||||
((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
|
||||
|
||||
nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
|
||||
acr_ucode_data_t210_load, img_size_in_bytes);
|
||||
/*
|
||||
* In order to execute this binary, we will be using
|
||||
* a bootloader which will load this image into PMU IMEM/DMEM.
|
||||
* Fill up the bootloader descriptor for PMU HAL to use..
|
||||
* TODO: Use standard descriptor which the generic bootloader is
|
||||
* checked in.
|
||||
*/
|
||||
bl_dmem_desc->signature[0] = 0;
|
||||
bl_dmem_desc->signature[1] = 0;
|
||||
bl_dmem_desc->signature[2] = 0;
|
||||
bl_dmem_desc->signature[3] = 0;
|
||||
bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
|
||||
flcn64_set_dma(&bl_dmem_desc->code_dma_base,
|
||||
acr->acr_ucode.gpu_va);
|
||||
bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
|
||||
bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
|
||||
bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
|
||||
bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
|
||||
bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
|
||||
flcn64_set_dma(&bl_dmem_desc->data_dma_base,
|
||||
acr->acr_ucode.gpu_va +
|
||||
acr_ucode_header_t210_load[2]);
|
||||
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
|
||||
} else {
|
||||
acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0;
|
||||
}
|
||||
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
|
||||
if (status != 0) {
|
||||
err = status;
|
||||
goto err_free_ucode_map;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free_ucode_map:
|
||||
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
|
||||
err_release_acr_fw:
|
||||
nvgpu_release_firmware(g, acr_fw);
|
||||
acr->acr_fw = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bl_bootstrap(struct nvgpu_pmu *pmu,
|
||||
struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct nvgpu_falcon_bl_info bl_info;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gk20a_writel(g, pwr_falcon_itfen_r(),
|
||||
gk20a_readl(g, pwr_falcon_itfen_r()) |
|
||||
pwr_falcon_itfen_ctxen_enable_f());
|
||||
gk20a_writel(g, pwr_pmu_new_instblk_r(),
|
||||
pwr_pmu_new_instblk_ptr_f(
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
|
||||
pwr_pmu_new_instblk_valid_f(1) |
|
||||
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
|
||||
pwr_pmu_new_instblk_target_sys_coh_f() :
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f())) ;
|
||||
|
||||
bl_info.bl_src = g->acr.hsbl_ucode.cpu_va;
|
||||
bl_info.bl_desc = (u8 *)pbl_desc;
|
||||
bl_info.bl_desc_size = sizeof(struct flcn_bl_dmem_desc_v1);
|
||||
bl_info.bl_size = bl_sz;
|
||||
bl_info.bl_start_tag = g->acr.pmu_hsbl_desc->bl_start_tag;
|
||||
nvgpu_flcn_bl_bootstrap(&g->pmu_flcn, &bl_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gv11b_setup_apertures(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
@@ -263,37 +91,3 @@ void gv11b_setup_apertures(struct gk20a *g)
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
|
||||
}
|
||||
|
||||
int gv11b_init_pmu_setup_hw1(struct gk20a *g,
|
||||
void *desc, u32 bl_sz)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->isr_mutex);
|
||||
nvgpu_flcn_reset(pmu->flcn);
|
||||
pmu->isr_enabled = true;
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
|
||||
if (g->ops.pmu.setup_apertures) {
|
||||
g->ops.pmu.setup_apertures(g);
|
||||
}
|
||||
if (g->ops.pmu.update_lspmu_cmdline_args) {
|
||||
g->ops.pmu.update_lspmu_cmdline_args(g);
|
||||
}
|
||||
|
||||
/*disable irqs for hs falcon booting as we will poll for halt*/
|
||||
nvgpu_mutex_acquire(&pmu->isr_mutex);
|
||||
g->ops.pmu.pmu_enable_irq(pmu, false);
|
||||
pmu->isr_enabled = false;
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
/*Clearing mailbox register used to reflect capabilities*/
|
||||
gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
|
||||
err = bl_bootstrap(pmu, desc, bl_sz);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -877,17 +877,10 @@ int gv11b_init_hal(struct gk20a *g)
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
|
||||
gops->pmu.get_wpr = gm20b_wpr_info,
|
||||
gops->pmu.alloc_blob_space = gv11b_alloc_blob_space,
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
gp106_pmu_populate_loader_cfg,
|
||||
gops->pmu.flcn_populate_bl_dmem_desc =
|
||||
gp106_flcn_populate_bl_dmem_desc,
|
||||
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
|
||||
gops->pmu.falcon_clear_halt_interrupt_status =
|
||||
clear_halt_interrupt_status,
|
||||
gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
|
||||
gops->pmu.update_lspmu_cmdline_args =
|
||||
gm20b_update_lspmu_cmdline_args;
|
||||
gops->pmu.setup_apertures = gv11b_setup_apertures;
|
||||
@@ -901,11 +894,9 @@ int gv11b_init_hal(struct gk20a *g)
|
||||
} else {
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
|
||||
|
||||
gops->pmu.load_lsfalcon_ucode = NULL;
|
||||
gops->pmu.init_wpr_region = NULL;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
|
||||
|
||||
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
|
||||
}
|
||||
|
||||
@@ -1052,12 +1052,6 @@ struct gpu_ops {
|
||||
void (*dump_secure_fuses)(struct gk20a *g);
|
||||
int (*reset_engine)(struct gk20a *g, bool do_reset);
|
||||
bool (*is_engine_in_reset)(struct gk20a *g);
|
||||
int (*falcon_wait_for_halt)(struct gk20a *g,
|
||||
unsigned int timeout);
|
||||
int (*falcon_clear_halt_interrupt_status)(struct gk20a *g,
|
||||
unsigned int timeout);
|
||||
int (*init_falcon_setup_hw)(struct gk20a *g,
|
||||
void *desc, u32 bl_sz);
|
||||
bool (*is_lazy_bootstrap)(u32 falcon_id);
|
||||
bool (*is_priv_load)(u32 falcon_id);
|
||||
void (*get_wpr)(struct gk20a *g, struct wpr_carveout_info *inf);
|
||||
|
||||
@@ -604,19 +604,12 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
|
||||
gops->pmu.prepare_ucode = prepare_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn,
|
||||
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
|
||||
gops->pmu.is_priv_load = gm20b_is_priv_load,
|
||||
gops->pmu.get_wpr = gm20b_wpr_info,
|
||||
gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
gm20b_pmu_populate_loader_cfg,
|
||||
gops->pmu.flcn_populate_bl_dmem_desc =
|
||||
gm20b_flcn_populate_bl_dmem_desc,
|
||||
gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
|
||||
gops->pmu.falcon_clear_halt_interrupt_status =
|
||||
clear_halt_interrupt_status,
|
||||
gops->pmu.init_falcon_setup_hw = gm20b_init_pmu_setup_hw1,
|
||||
|
||||
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
|
||||
gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
|
||||
@@ -628,12 +621,10 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
|
||||
|
||||
gops->pmu.load_lsfalcon_ucode = NULL;
|
||||
gops->pmu.init_wpr_region = NULL;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
|
||||
|
||||
gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user