gpu: nvgpu: ACR code cleanup

Removed unnecessary code from ACR LSFM discovering
ucode images & some PMU variables depending on ACR.

JIRA NVGPU-1147

Change-Id: I26e46d326d5f904456e40044a91c96f3dd32fe53
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2008365
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-01-31 14:38:19 +05:30
committed by mobile promotions
parent f1bdef62b6
commit ecc27b3f8b
5 changed files with 59 additions and 178 deletions

View File

@@ -357,99 +357,48 @@ free_sgt:
return err; return err;
} }
static bool lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm,
u32 falcon_id)
{
return ((plsfm->disable_mask >> falcon_id) & 0x1U) != 0U;
}
/* Discover all managed falcon ucode images */ /* Discover all managed falcon ucode images */
static int lsfm_discover_ucode_images(struct gk20a *g, static int lsfm_discover_ucode_images(struct gk20a *g,
struct ls_flcn_mgr *plsfm) struct ls_flcn_mgr *plsfm)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
struct flcn_ucode_img ucode_img; struct flcn_ucode_img ucode_img;
u32 falcon_id; u32 falcon_id;
u32 i; u32 i;
int status; int err = 0;
/* LSFM requires a secure PMU, discover it first.*/ /*
/* Obtain the PMU ucode image and add it to the list if required*/ * Enumerate all constructed falcon objects, as we need the ucode
* image info and total falcon count
*/
for (i = 0U; i < g->acr.max_supported_lsfm; i++) {
(void) memset(&ucode_img, 0, sizeof(ucode_img)); (void) memset(&ucode_img, 0, sizeof(ucode_img));
status = pmu_ucode_details(g, &ucode_img);
if (status != 0) {
return status;
}
/* The falon_id is formed by grabbing the static base
* falon_id from the image and adding the
* engine-designated falcon instance.*/
pmu->pmu_mode |= PMU_SECURE_MODE;
falcon_id = ucode_img.lsf_desc->falcon_id +
ucode_img.flcn_inst;
if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
pmu->falcon_id = falcon_id;
if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
pmu->falcon_id) == 0) {
pmu->pmu_mode |= PMU_LSFM_MANAGED;
}
plsfm->managed_flcn_cnt++;
} else {
nvgpu_pmu_dbg(g, "id not managed %d\n",
ucode_img.lsf_desc->falcon_id);
}
/*Free any ucode image resources if not managing this falcon*/
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) == 0U) {
nvgpu_pmu_dbg(g, "pmu is not LSFM managed\n");
lsfm_free_ucode_img_res(g, &ucode_img);
}
/* Enumerate all constructed falcon objects,
as we need the ucode image info and total falcon count.*/
/*0th index is always PMU which is already handled in earlier
if condition*/
for (i = 1; i < g->acr.max_supported_lsfm; i++) {
(void) memset(&ucode_img, 0, sizeof(ucode_img));
if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) { if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) {
if (ucode_img.lsf_desc != NULL) { if (ucode_img.lsf_desc != NULL) {
/* We have engine sigs, ensure that this falcon /*
is aware of the secure mode expectations * falon_id is formed by grabbing the static
(ACR status)*/ * base falonId from the image and adding the
* engine-designated falcon instance.
/* falon_id is formed by grabbing the static */
base falonId from the image and adding the
engine-designated falcon instance. */
falcon_id = ucode_img.lsf_desc->falcon_id + falcon_id = ucode_img.lsf_desc->falcon_id +
ucode_img.flcn_inst; ucode_img.flcn_inst;
if (!lsfm_falcon_disabled(g, plsfm, err = lsfm_add_ucode_img(g, plsfm, &ucode_img,
falcon_id)) { falcon_id);
/* Do not manage non-FB ucode*/ if (err != 0) {
if (lsfm_add_ucode_img(g, nvgpu_err(g, " Failed to add falcon-%d to LSFM ",
plsfm, &ucode_img, falcon_id) falcon_id);
== 0) { goto exit;
}
plsfm->managed_flcn_cnt++; plsfm->managed_flcn_cnt++;
} }
} else {
nvgpu_pmu_dbg(g, "not managed %d\n",
ucode_img.lsf_desc->falcon_id);
lsfm_free_nonpmu_ucode_img_res(g,
&ucode_img);
} }
} }
} else {
/* Consumed all available falcon objects */
nvgpu_pmu_dbg(g, "Done checking for ucodes %d\n", i);
break;
}
}
return 0;
}
exit:
return err;
}
int gm20b_pmu_populate_loader_cfg(struct gk20a *g, int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
void *lsfm, u32 *p_bl_gen_desc_size) void *lsfm, u32 *p_bl_gen_desc_size)
@@ -601,8 +550,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
struct lsfm_managed_ucode_img *pnode) struct lsfm_managed_ucode_img *pnode)
{ {
struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
nvgpu_pmu_dbg(g, "non pmu. write flcn bl gen desc\n"); nvgpu_pmu_dbg(g, "non pmu. write flcn bl gen desc\n");
g->ops.pmu.flcn_populate_bl_dmem_desc(g, g->ops.pmu.flcn_populate_bl_dmem_desc(g,
pnode, &pnode->bl_gen_desc_size, pnode, &pnode->bl_gen_desc_size,
@@ -610,13 +558,11 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
return 0; return 0;
} }
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) != 0U) { if (pnode->wpr_header.falcon_id == FALCON_ID_PMU) {
nvgpu_pmu_dbg(g, "pmu write flcn bl gen desc\n"); nvgpu_pmu_dbg(g, "pmu write flcn bl gen desc\n");
if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
&pnode->bl_gen_desc_size); &pnode->bl_gen_desc_size);
} }
}
/* Failed to find the falcon requested. */ /* Failed to find the falcon requested. */
return -ENOENT; return -ENOENT;
@@ -764,8 +710,6 @@ static int lsfm_parse_no_loader_ucode(u32 *p_ucodehdr,
static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
u32 falcon_id, struct lsfm_managed_ucode_img *pnode) u32 falcon_id, struct lsfm_managed_ucode_img *pnode)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
u32 full_app_size = 0; u32 full_app_size = 0;
u32 data = 0; u32 data = 0;
@@ -822,7 +766,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
flags should be populated.*/ flags should be populated.*/
pnode->lsb_header.flags = 0; pnode->lsb_header.flags = 0;
if (falcon_id == pmu->falcon_id) { if (falcon_id == FALCON_ID_PMU) {
data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE; data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
pnode->lsb_header.flags = data; pnode->lsb_header.flags = data;
} }

View File

@@ -493,99 +493,47 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
return err; return err;
} }
static bool lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
u32 falcon_id)
{
return ((plsfm->disable_mask >> falcon_id) & 0x1U) != 0U;
}
/* Discover all managed falcon ucode images */ /* Discover all managed falcon ucode images */
int lsfm_discover_ucode_images(struct gk20a *g, int lsfm_discover_ucode_images(struct gk20a *g,
struct ls_flcn_mgr_v1 *plsfm) struct ls_flcn_mgr_v1 *plsfm)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
struct flcn_ucode_img_v1 ucode_img; struct flcn_ucode_img_v1 ucode_img;
u32 falcon_id; u32 falcon_id;
u32 i; u32 i;
int status; int err = 0;
/* LSFM requires a secure PMU, discover it first.*/ /*
/* Obtain the PMU ucode image and add it to the list if required*/ * Enumerate all constructed falcon objects, as we need the ucode
(void) memset(&ucode_img, 0, sizeof(ucode_img)); * image info and total falcon count
status = pmu_ucode_details(g, &ucode_img);
if (status != 0) {
return status;
}
if (ucode_img.lsf_desc != NULL) {
/* The falon_id is formed by grabbing the static base
* falon_id from the image and adding the
* engine-designated falcon instance.
*/ */
pmu->pmu_mode |= PMU_SECURE_MODE; for (i = 0U; i < g->acr.max_supported_lsfm; i++) {
falcon_id = ucode_img.lsf_desc->falcon_id +
ucode_img.flcn_inst;
if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
pmu->falcon_id = falcon_id;
if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
pmu->falcon_id) == 0) {
pmu->pmu_mode |= PMU_LSFM_MANAGED;
}
plsfm->managed_flcn_cnt++;
} else {
gp106_dbg_pmu(g, "id not managed %d\n",
ucode_img.lsf_desc->falcon_id);
}
}
/*Free any ucode image resources if not managing this falcon*/
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) == 0U) {
gp106_dbg_pmu(g, "pmu is not LSFM managed\n");
lsfm_free_ucode_img_res(g, &ucode_img);
}
/* Enumerate all constructed falcon objects,
as we need the ucode image info and total falcon count.*/
/*0th index is always PMU which is already handled in earlier
if condition*/
for (i = 1; i < g->acr.max_supported_lsfm; i++) {
(void) memset(&ucode_img, 0, sizeof(ucode_img)); (void) memset(&ucode_img, 0, sizeof(ucode_img));
if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) { if (pmu_acr_supp_ucode_list[i](g, &ucode_img) == 0) {
if (ucode_img.lsf_desc != NULL) { if (ucode_img.lsf_desc != NULL) {
/* We have engine sigs, ensure that this falcon /*
is aware of the secure mode expectations * falon_id is formed by grabbing the static
(ACR status)*/ * base falonId from the image and adding the
* engine-designated falcon instance.
/* falon_id is formed by grabbing the static */
base falonId from the image and adding the
engine-designated falcon instance. */
falcon_id = ucode_img.lsf_desc->falcon_id + falcon_id = ucode_img.lsf_desc->falcon_id +
ucode_img.flcn_inst; ucode_img.flcn_inst;
if (!lsfm_falcon_disabled(g, plsfm, err = lsfm_add_ucode_img(g, plsfm, &ucode_img,
falcon_id)) { falcon_id);
/* Do not manage non-FB ucode*/ if (err != 0) {
if (lsfm_add_ucode_img(g, nvgpu_err(g, " Failed to add falcon-%d to LSFM ",
plsfm, &ucode_img, falcon_id) falcon_id);
== 0) { goto exit;
}
plsfm->managed_flcn_cnt++; plsfm->managed_flcn_cnt++;
} }
} else {
gp106_dbg_pmu(g, "not managed %d\n",
ucode_img.lsf_desc->falcon_id);
lsfm_free_nonpmu_ucode_img_res(g,
&ucode_img);
} }
} }
} else {
/* Consumed all available falcon objects */ exit:
gp106_dbg_pmu(g, "Done checking for ucodes %d\n", i); return err;
break;
}
}
return 0;
} }
int gp106_pmu_populate_loader_cfg(struct gk20a *g, int gp106_pmu_populate_loader_cfg(struct gk20a *g,
@@ -736,9 +684,7 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
struct lsfm_managed_ucode_img_v2 *pnode) struct lsfm_managed_ucode_img_v2 *pnode)
{ {
if (pnode->wpr_header.falcon_id != FALCON_ID_PMU) {
struct nvgpu_pmu *pmu = &g->pmu;
if (pnode->wpr_header.falcon_id != pmu->falcon_id) {
gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n"); gp106_dbg_pmu(g, "non pmu. write flcn bl gen desc\n");
g->ops.pmu.flcn_populate_bl_dmem_desc(g, g->ops.pmu.flcn_populate_bl_dmem_desc(g,
pnode, &pnode->bl_gen_desc_size, pnode, &pnode->bl_gen_desc_size,
@@ -746,13 +692,11 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
return 0; return 0;
} }
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) != 0U) { if (pnode->wpr_header.falcon_id == FALCON_ID_PMU) {
gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n"); gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode, return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
&pnode->bl_gen_desc_size); &pnode->bl_gen_desc_size);
} }
}
/* Failed to find the falcon requested. */ /* Failed to find the falcon requested. */
return -ENOENT; return -ENOENT;
@@ -940,8 +884,6 @@ static int lsfm_parse_no_loader_ucode(u32 *p_ucodehdr,
void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
u32 falcon_id, struct lsfm_managed_ucode_img_v2 *pnode) u32 falcon_id, struct lsfm_managed_ucode_img_v2 *pnode)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
u32 full_app_size = 0; u32 full_app_size = 0;
u32 data = 0; u32 data = 0;
@@ -998,7 +940,7 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
flags should be populated.*/ flags should be populated.*/
pnode->lsb_header.flags = 0; pnode->lsb_header.flags = 0;
if (falcon_id == pmu->falcon_id) { if (falcon_id == FALCON_ID_PMU) {
data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE; data = NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE;
pnode->lsb_header.flags = data; pnode->lsb_header.flags = data;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -53,9 +53,6 @@ struct nvgpu_acr;
#define ACR_COMPLETION_TIMEOUT_MS 10000U /*in msec */ #define ACR_COMPLETION_TIMEOUT_MS 10000U /*in msec */
#define PMU_SECURE_MODE BIT8(0)
#define PMU_LSFM_MANAGED BIT8(1)
struct bin_hdr { struct bin_hdr {
/* 0x10de */ /* 0x10de */
u32 bin_magic; u32 bin_magic;

View File

@@ -416,8 +416,6 @@ struct nvgpu_pmu {
}; };
unsigned long perfmon_events_cnt; unsigned long perfmon_events_cnt;
bool perfmon_sampling_enabled; bool perfmon_sampling_enabled;
u8 pmu_mode; /*Added for GM20b, and ACR*/
u32 falcon_id;
u32 aelpg_param[5]; u32 aelpg_param[5];
u32 override_done; u32 override_done;
}; };

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2018 NVIDIA Corporation. All rights reserved. * Copyright (C) 2018-2019, NVIDIA Corporation. All rights reserved.
* *
* This software is licensed under the terms of the GNU General Public * This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and * License version 2, as published by the Free Software Foundation, and
@@ -399,9 +399,9 @@ static int security_show(struct seq_file *s, void *data)
{ {
struct gk20a *g = s->private; struct gk20a *g = s->private;
seq_printf(s, "%d\n", g->pmu.pmu_mode); seq_printf(s, "%d\n", nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY));
return 0;
return 0;
} }
static int security_open(struct inode *inode, struct file *file) static int security_open(struct inode *inode, struct file *file)