mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: update pmu, sec2 sw setup sequence
pmu.g & sec2.g were set in nvgpu_falcon_sw_init. They are now set in nvgpu_early_init_pmu_sw & nvgpu_init_sec2_setup_sw. Pass gk20a & pmu struct to nvgpu_init_pmu_fw_support like sec2. pmu_fw_support & sec2_setup_sw are separated from respective init sequence and now are called earlier since we need ->g member earlier and most of the setup is sw only. nvgpu_init_pmu_fw_ver_ops is now being exported. JIRA NVGPU-1594 Change-Id: I6c71c6730ce06dad190159269e2cc60301f0237b Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1968241 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8e8e40e66d
commit
b8c8d627af
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -443,13 +443,11 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
flcn = &g->pmu_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
g->pmu.flcn = &g->pmu_flcn;
|
||||
g->pmu.g = g;
|
||||
break;
|
||||
case FALCON_ID_SEC2:
|
||||
flcn = &g->sec2_flcn;
|
||||
flcn->flcn_id = flcn_id;
|
||||
g->sec2.flcn = &g->sec2_flcn;
|
||||
g->sec2.g = g;
|
||||
break;
|
||||
case FALCON_ID_FECS:
|
||||
flcn = &g->fecs_flcn;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics
|
||||
*
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -174,6 +174,22 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
err = nvgpu_early_init_pmu_sw(g, &g->pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to early init pmu sw");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_SEC2_RTOS)) {
|
||||
err = nvgpu_init_sec2_setup_sw(g, &g->sec2);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init sec2 sw setup");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (g->ops.acr.acr_sw_init != NULL &&
|
||||
nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
g->ops.acr.acr_sw_init(g, &g->acr);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -118,9 +118,9 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
|
||||
pmu->ucode_image = (u32 *)pmu_fw->data;
|
||||
g->acr.pmu_desc = pmu_desc;
|
||||
|
||||
err = nvgpu_init_pmu_fw_support(pmu);
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_pmu_dbg(g, "failed to set function pointers\n");
|
||||
nvgpu_pmu_dbg(g, "failed to set function pointers");
|
||||
goto release_sig;
|
||||
}
|
||||
|
||||
@@ -352,12 +352,11 @@ int prepare_ucode_blob(struct gk20a *g)
|
||||
if (g->acr.ucode_blob.cpu_va != NULL) {
|
||||
/*Recovery case, we do not need to form
|
||||
non WPR blob of ucodes*/
|
||||
err = nvgpu_init_pmu_fw_support(pmu);
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_pmu_dbg(g, "failed to set function pointers\n");
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
plsfm = &lsfm_l;
|
||||
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr));
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -149,7 +149,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
|
||||
pmu->ucode_image = (u32 *)pmu_fw->data;
|
||||
g->acr.pmu_desc = pmu_desc;
|
||||
|
||||
err = nvgpu_init_pmu_fw_support(pmu);
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to set function pointers");
|
||||
goto release_sig;
|
||||
@@ -489,12 +489,11 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
|
||||
if (g->acr.ucode_blob.cpu_va != NULL) {
|
||||
/*Recovery case, we do not need to form
|
||||
non WPR blob of ucodes*/
|
||||
err = nvgpu_init_pmu_fw_support(pmu);
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
gp106_dbg_pmu(g, "failed to set function pointers\n");
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
plsfm = &lsfm_l;
|
||||
(void) memset((void *)plsfm, 0, sizeof(struct ls_flcn_mgr_v1));
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -1117,7 +1117,7 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg,
|
||||
pg->eng_buf_load_v2.dma_desc.params |= (U32(value) << U32(24));
|
||||
}
|
||||
|
||||
static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_v *pv = &g->ops.pmu_ver;
|
||||
@@ -1664,13 +1664,14 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
|
||||
nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
|
||||
}
|
||||
|
||||
int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu)
|
||||
int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
pmu->g = g;
|
||||
|
||||
err = nvgpu_mutex_init(&pmu->elpg_mutex);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
@@ -1698,15 +1699,8 @@ int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu)
|
||||
|
||||
pmu->remove_support = nvgpu_remove_pmu_support;
|
||||
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
goto fail_pmu_seq;
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
fail_pmu_seq:
|
||||
nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
|
||||
fail_pmu_copy:
|
||||
nvgpu_mutex_destroy(&pmu->pmu_copy_lock);
|
||||
fail_isr:
|
||||
@@ -1729,7 +1723,11 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (pmu->fw != NULL) {
|
||||
return nvgpu_init_pmu_fw_support(pmu);
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to set function pointers");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0);
|
||||
@@ -1753,7 +1751,12 @@ int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g)
|
||||
nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image,
|
||||
pmu->desc->app_start_offset + pmu->desc->app_size);
|
||||
|
||||
return nvgpu_init_pmu_fw_support(pmu);
|
||||
err = nvgpu_init_pmu_fw_ver_ops(pmu);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to set function pointers");
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
err_release_fw:
|
||||
nvgpu_release_firmware(g, pmu->fw);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -106,12 +106,14 @@ static void nvgpu_remove_sec2_support(struct nvgpu_sec2 *sec2)
|
||||
nvgpu_mutex_destroy(&sec2->isr_mutex);
|
||||
}
|
||||
|
||||
static int nvgpu_init_sec2_setup_sw(struct gk20a *g, struct nvgpu_sec2 *sec2)
|
||||
int nvgpu_init_sec2_setup_sw(struct gk20a *g, struct nvgpu_sec2 *sec2)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
sec2->g = g;
|
||||
|
||||
sec2->seq = nvgpu_kzalloc(g, SEC2_MAX_NUM_SEQUENCES *
|
||||
sizeof(struct sec2_sequence));
|
||||
if (sec2->seq == NULL) {
|
||||
@@ -151,11 +153,6 @@ int nvgpu_init_sec2_support(struct gk20a *g)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_init_sec2_setup_sw(g, sec2);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Enable irq*/
|
||||
nvgpu_mutex_acquire(&sec2->isr_mutex);
|
||||
g->ops.sec2.enable_irq(sec2, true);
|
||||
@@ -165,7 +162,6 @@ int nvgpu_init_sec2_support(struct gk20a *g)
|
||||
/* execute SEC2 in secure mode to boot RTOS */
|
||||
g->ops.sec2.secured_sec2_start(g);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -480,7 +480,8 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 size);
|
||||
|
||||
/* PMU F/W support */
|
||||
int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g);
|
||||
|
||||
/* PG init*/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -106,6 +106,7 @@ int nvgpu_sec2_bootstrap_ls_falcons(struct gk20a *g, struct nvgpu_sec2 *sec2,
|
||||
u32 falcon_id);
|
||||
|
||||
/* sec2 init */
|
||||
int nvgpu_init_sec2_setup_sw(struct gk20a *g, struct nvgpu_sec2 *sec2);
|
||||
int nvgpu_init_sec2_support(struct gk20a *g);
|
||||
int nvgpu_sec2_destroy(struct gk20a *g);
|
||||
int nvgpu_sec2_queue_init(struct nvgpu_sec2 *sec2, u32 id,
|
||||
|
||||
Reference in New Issue
Block a user