mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: ucode blob prepare using ACR ops
Moved ACR ucode blob prepare ops to struct nvgpu_acr from PMU ops as ACR needs to be independent from PMU. JIRA NVGPU-1147 Change-Id: I2ad1805fcbd0837c24f6f09b6bc292ad2c346fb6 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2007291 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
49255caeff
commit
f1bdef62b6
@@ -310,19 +310,15 @@ int gk20a_finalize_poweron(struct gk20a *g)
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
if (g->ops.pmu.prepare_ucode != NULL) {
|
||||
err = g->ops.pmu.prepare_ucode(g);
|
||||
}
|
||||
if (g->acr.bootstrap_hs_acr != NULL &&
|
||||
nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
err = g->acr.prepare_ucode_blob(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init pmu ucode");
|
||||
nvgpu_err(g, "ACR ucode blob prepare failed");
|
||||
nvgpu_mutex_release(&g->tpc_pg_lock);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (g->acr.bootstrap_hs_acr != NULL &&
|
||||
nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
err = g->acr.bootstrap_hs_acr(g, &g->acr, &g->acr.acr);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "ACR bootstrap failed");
|
||||
|
||||
@@ -1379,6 +1379,7 @@ void nvgpu_gm20b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
|
||||
gm20b_acr_default_sw_init(g, &acr->acr);
|
||||
|
||||
acr->prepare_ucode_blob = prepare_ucode_blob;
|
||||
acr->get_wpr_info = gm20b_wpr_info;
|
||||
acr->alloc_blob_space = gm20b_alloc_blob_space;
|
||||
acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
|
||||
|
||||
@@ -1311,6 +1311,7 @@ void nvgpu_gp106_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
|
||||
nvgpu_gp106_acr_default_sw_init(g, &acr->acr);
|
||||
|
||||
acr->prepare_ucode_blob = gp106_prepare_ucode_blob;
|
||||
acr->get_wpr_info = gp106_wpr_info;
|
||||
acr->alloc_blob_space = gp106_alloc_blob_space;
|
||||
acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
|
||||
|
||||
@@ -160,6 +160,7 @@ void nvgpu_gv11b_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
|
||||
gv11b_acr_default_sw_init(g, &acr->acr);
|
||||
|
||||
acr->prepare_ucode_blob = gp106_prepare_ucode_blob;
|
||||
acr->get_wpr_info = gm20b_wpr_info;
|
||||
acr->alloc_blob_space = gm20b_alloc_blob_space;
|
||||
acr->bootstrap_hs_acr = gm20b_bootstrap_hs_acr;
|
||||
|
||||
@@ -145,6 +145,7 @@ void nvgpu_tu104_acr_sw_init(struct gk20a *g, struct nvgpu_acr *acr)
|
||||
/* Inherit settings from older chip */
|
||||
nvgpu_gp106_acr_sw_init(g, acr);
|
||||
|
||||
acr->prepare_ucode_blob = gp106_prepare_ucode_blob;
|
||||
acr->bootstrap_owner = FALCON_ID_GSPLITE;
|
||||
acr->max_supported_lsfm = TU104_MAX_SUPPORTED_LSFM;
|
||||
acr->bootstrap_hs_acr = tu104_bootstrap_hs_acr;
|
||||
|
||||
@@ -347,6 +347,9 @@ int nvgpu_init_pmu_support(struct gk20a *g)
|
||||
g->ops.pmu.secured_pmu_start(g);
|
||||
}
|
||||
} else {
|
||||
/* prepare blob for non-secure PMU boot */
|
||||
err = nvgpu_pmu_prepare_ns_ucode_blob(g);
|
||||
|
||||
/* Do non-secure PMU boot */
|
||||
err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -862,7 +862,6 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported;
|
||||
gops->pmu.prepare_ucode = prepare_ucode_blob;
|
||||
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap;
|
||||
gops->pmu.is_priv_load = gm20b_is_priv_load;
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
@@ -881,7 +880,6 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
} else {
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported;
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob;
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap =
|
||||
gm20b_ns_pmu_setup_hw_and_bootstrap;
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
|
||||
|
||||
@@ -935,7 +935,6 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.is_pmu_supported = gm20b_is_pmu_supported,
|
||||
gops->pmu.prepare_ucode = prepare_ucode_blob,
|
||||
gops->pmu.is_lazy_bootstrap = gm20b_is_lazy_bootstrap,
|
||||
gops->pmu.is_priv_load = gm20b_is_priv_load,
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
@@ -956,7 +955,6 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
} else {
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.is_pmu_supported = gk20a_is_pmu_supported,
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap =
|
||||
gm20b_ns_pmu_setup_hw_and_bootstrap;
|
||||
gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
|
||||
|
||||
@@ -870,7 +870,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.load_lsfalcon_ucode = gv100_load_falcon_ucode,
|
||||
.is_lazy_bootstrap = gp106_is_lazy_bootstrap,
|
||||
.is_priv_load = gp106_is_priv_load,
|
||||
.prepare_ucode = gp106_prepare_ucode_blob,
|
||||
.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
|
||||
.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
|
||||
@@ -1067,7 +1067,6 @@ int gv11b_init_hal(struct gk20a *g)
|
||||
/* priv security dependent ops */
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
/* Add in ops from gm20b acr */
|
||||
gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
|
||||
gops->pmu.pmu_populate_loader_cfg =
|
||||
gp106_pmu_populate_loader_cfg,
|
||||
gops->pmu.flcn_populate_bl_dmem_desc =
|
||||
@@ -1085,7 +1084,6 @@ int gv11b_init_hal(struct gk20a *g)
|
||||
gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
|
||||
} else {
|
||||
/* Inherit from gk20a */
|
||||
gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
|
||||
gops->pmu.pmu_setup_hw_and_bootstrap =
|
||||
gm20b_ns_pmu_setup_hw_and_bootstrap;
|
||||
|
||||
|
||||
@@ -168,7 +168,7 @@ struct nvgpu_acr {
|
||||
|
||||
u32 pmu_args;
|
||||
|
||||
int (*prepare_ucode_blob)(struct gk20a *g, struct nvgpu_acr *acr);
|
||||
int (*prepare_ucode_blob)(struct gk20a *g);
|
||||
void (*get_wpr_info)(struct gk20a *g, struct wpr_carveout_info *inf);
|
||||
int (*alloc_blob_space)(struct gk20a *g, size_t size,
|
||||
struct nvgpu_mem *mem);
|
||||
|
||||
@@ -1169,7 +1169,6 @@ struct gpu_ops {
|
||||
struct {
|
||||
bool (*is_pmu_supported)(struct gk20a *g);
|
||||
u32 (*falcon_base_addr)(void);
|
||||
int (*prepare_ucode)(struct gk20a *g);
|
||||
int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g);
|
||||
int (*pmu_nsbootstrap)(struct nvgpu_pmu *pmu);
|
||||
int (*pmu_init_perfmon)(struct nvgpu_pmu *pmu);
|
||||
|
||||
@@ -901,7 +901,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.load_lsfalcon_ucode = gv100_load_falcon_ucode,
|
||||
.is_lazy_bootstrap = gp106_is_lazy_bootstrap,
|
||||
.is_priv_load = gp106_is_priv_load,
|
||||
.prepare_ucode = gp106_prepare_ucode_blob,
|
||||
.pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
|
||||
.flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
|
||||
Reference in New Issue
Block a user