gpu: nvgpu: Added lsfm unit under common/pmu/lsfm

lsfm-LS falcon manager

Created lsfm unit under common/pmu/lsfm, moved functions &
variables related to lsfm functionality under lsfm unit,
within lsfm unit created separate files based on init which
does chip specific s/w init, separated private/public
functionality.

JIRA NVGPU-3021

Change-Id: Iad4a4e5533122fb2387a4980581a0d7bcdb37d67
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2080546
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-04-08 09:33:41 +05:30
committed by mobile promotions
parent dc405b38e1
commit cff9f19941
32 changed files with 808 additions and 469 deletions

View File

@@ -105,7 +105,6 @@ nvgpu-y += \
common/pmu/pg/pg_sw_gp10b.o \
common/pmu/pg/pg_sw_gp106.o \
common/pmu/pg/pg_sw_gv11b.o \
common/pmu/pmu_gv100.o \
common/pmu/pmu_tu104.o \
common/pmu/ipc/pmu_cmd.o \
common/pmu/ipc/pmu_msg.o \
@@ -122,6 +121,11 @@ nvgpu-y += \
common/acr/acr_sw_gv100.o \
common/acr/acr_sw_gv11b.o \
common/acr/acr_sw_tu104.o \
common/pmu/lsfm/lsfm.o \
common/pmu/lsfm/lsfm_sw_gm20b.o \
common/pmu/lsfm/lsfm_sw_gp10b.o \
common/pmu/lsfm/lsfm_sw_gv100.o \
common/pmu/lsfm/lsfm_sw_tu104.o \
common/pmu/perf/vfe_var.o \
common/pmu/perf/vfe_equ.o \
common/pmu/perf/pmu_perf.o \

View File

@@ -144,7 +144,6 @@ srcs += common/sim.c \
common/pmu/pg/pg_sw_gp10b.c \
common/pmu/pg/pg_sw_gp106.c \
common/pmu/pg/pg_sw_gv11b.c \
common/pmu/pmu_gv100.c \
common/pmu/pmu_tu104.c \
common/pmu/ipc/pmu_cmd.c \
common/pmu/ipc/pmu_msg.c \
@@ -208,6 +207,11 @@ srcs += common/sim.c \
common/regops/regops_gv100.c \
common/regops/regops_gv11b.c \
common/regops/regops_tu104.c \
common/pmu/lsfm/lsfm.c \
common/pmu/lsfm/lsfm_sw_gm20b.c \
common/pmu/lsfm/lsfm_sw_gp10b.c \
common/pmu/lsfm/lsfm_sw_gv100.c \
common/pmu/lsfm/lsfm_sw_tu104.c \
common/pmu/pstate/pstate.c \
common/pmu/volt/volt_dev.c \
common/pmu/volt/volt_pmu.c \

View File

@@ -32,6 +32,7 @@
#include <nvgpu/sec2.h>
#include <nvgpu/acr.h>
#include <nvgpu/power_features/pg.h>
#include <nvgpu/pmu/lsfm.h>
#include "gr_falcon_priv.h"
@@ -476,13 +477,12 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g)
g->ops.gr.falcon.configure_fmodel(g);
}
g->pmu_lsf_loaded_falcon_id = 0;
if (nvgpu_is_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE)) {
/* this must be recovery so bootstrap fecs and gpccs */
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
nvgpu_gr_falcon_load_gpccs_with_bootloader(g);
err = g->ops.pmu.load_lsfalcon_ucode(g,
BIT32(FALCON_ID_FECS));
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g, &g->pmu,
g->pmu.lsfm, BIT32(FALCON_ID_FECS));
} else {
/* bind WPR VA inst block */
nvgpu_gr_falcon_bind_instblk(g);
@@ -492,9 +492,10 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g)
err = nvgpu_sec2_bootstrap_ls_falcons(g,
&g->sec2, FALCON_ID_GPCCS);
} else if (g->support_ls_pmu) {
err = g->ops.pmu.load_lsfalcon_ucode(g,
BIT32(FALCON_ID_FECS) |
BIT32(FALCON_ID_GPCCS));
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g,
&g->pmu, g->pmu.lsfm,
BIT32(FALCON_ID_FECS) |
BIT32(FALCON_ID_GPCCS));
} else {
err = nvgpu_acr_bootstrap_hs_acr(g, g->acr);
if (err != 0) {
@@ -531,8 +532,9 @@ int nvgpu_gr_falcon_load_secure_ctxsw_ucode(struct gk20a *g)
err = nvgpu_sec2_bootstrap_ls_falcons(g,
&g->sec2, FALCON_ID_GPCCS);
} else if (g->support_ls_pmu) {
err = g->ops.pmu.load_lsfalcon_ucode(g,
falcon_id_mask);
err = nvgpu_pmu_lsfm_bootstrap_ls_falcon(g,
&g->pmu, g->pmu.lsfm,
falcon_id_mask);
} else {
/* GR falcons bootstrapped by ACR */
err = 0;

View File

@@ -28,6 +28,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/pmu/volt.h>
#include <nvgpu/pmu/therm.h>
#include <nvgpu/pmu/lsfm.h>
static int pmu_payload_extract(struct nvgpu_pmu *pmu, struct pmu_sequence *seq)
{
@@ -490,9 +491,7 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
return err;
}
if (g->ops.pmu.init_wpr_region != NULL) {
g->ops.pmu.init_wpr_region(g);
}
nvgpu_pmu_lsfm_int_wpr_region(g, pmu, pmu->lsfm);
if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
g->ops.pmu.pmu_init_perfmon(pmu);
@@ -550,18 +549,7 @@ void nvgpu_pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
switch (msg->hdr.unit_id) {
case PMU_UNIT_ACR:
switch (rpc.function) {
case NV_PMU_RPC_ID_ACR_INIT_WPR_REGION:
nvgpu_pmu_dbg(g,
"reply NV_PMU_RPC_ID_ACR_INIT_WPR_REGION");
g->pmu_lsf_pmu_wpr_init_done = true;
break;
case NV_PMU_RPC_ID_ACR_BOOTSTRAP_GR_FALCONS:
nvgpu_pmu_dbg(g,
"reply NV_PMU_RPC_ID_ACR_BOOTSTRAP_GR_FALCONS");
g->pmu_lsf_loaded_falcon_id = 1;
break;
}
nvgpu_pmu_lsfm_rpc_handler(g, rpc_payload);
break;
case PMU_UNIT_PERFMON_T18X:
case PMU_UNIT_PERFMON:

View File

@@ -67,10 +67,12 @@ int nvgpu_pmu_sequences_alloc(struct gk20a *g,
}
void nvgpu_pmu_sequences_free(struct gk20a *g,
struct pmu_sequences *sequences)
struct pmu_sequences *sequences)
{
nvgpu_mutex_destroy(&sequences->pmu_seq_lock);
nvgpu_kfree(g, sequences->seq);
if (sequences->seq != NULL) {
nvgpu_kfree(g, sequences->seq);
}
}
void nvgpu_pmu_sequences_init(struct pmu_sequences *sequences)

View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/types.h>
#include <nvgpu/dma.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmu/lsfm.h>
#include "lsfm_sw_gm20b.h"
#include "lsfm_sw_gp10b.h"
#include "lsfm_sw_gv100.h"
#include "lsfm_sw_tu104.h"
int nvgpu_pmu_lsfm_int_wpr_region(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm)
{
if (lsfm == NULL || lsfm->init_wpr_region == NULL) {
return 0;
}
return lsfm->init_wpr_region(g, pmu);
}
int nvgpu_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm, u32 falcon_id_mask)
{
if (lsfm == NULL || lsfm->bootstrap_ls_falcon == NULL) {
return 0;
}
return lsfm->bootstrap_ls_falcon(g, pmu, lsfm, falcon_id_mask);
}
int nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm)
{
if (lsfm == NULL || lsfm->ls_pmu_cmdline_args_copy == NULL) {
return 0;
}
return lsfm->ls_pmu_cmdline_args_copy(g, pmu);
}
void nvgpu_pmu_lsfm_rpc_handler(struct gk20a *g,
struct rpc_handler_payload *rpc_payload)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct nv_pmu_rpc_struct_acr_bootstrap_gr_falcons acr_rpc;
(void) memset(&acr_rpc, 0, sizeof(struct nv_pmu_rpc_header));
nvgpu_memcpy((u8 *)&acr_rpc, (u8 *)rpc_payload->rpc_buff,
sizeof(struct nv_pmu_rpc_struct_acr_bootstrap_gr_falcons));
switch (acr_rpc.hdr.function) {
case NV_PMU_RPC_ID_ACR_INIT_WPR_REGION:
nvgpu_pmu_dbg(g,
"reply NV_PMU_RPC_ID_ACR_INIT_WPR_REGION");
pmu->lsfm->is_wpr_init_done = true;
break;
case NV_PMU_RPC_ID_ACR_BOOTSTRAP_GR_FALCONS:
nvgpu_pmu_dbg(g,
"reply NV_PMU_RPC_ID_ACR_BOOTSTRAP_GR_FALCONS");
pmu->lsfm->loaded_falcon_id = 1U;
break;
default:
nvgpu_pmu_dbg(g, "unsupported ACR function");
break;
}
}
int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm)
{
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
int err = 0;
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
goto done;
}
if (*lsfm != NULL) {
/* skip alloc/reinit for unrailgate sequence */
nvgpu_pmu_dbg(g, "skip lsfm init for unrailgate sequence");
(*lsfm)->is_wpr_init_done = false;
(*lsfm)->loaded_falcon_id = 0U;
goto done;
}
*lsfm = (struct nvgpu_pmu_lsfm *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_pmu_lsfm));
if (*lsfm == NULL) {
err = -ENOMEM;
goto done;
}
switch (ver) {
case GK20A_GPUID_GM20B:
case GK20A_GPUID_GM20B_B:
nvgpu_gm20b_lsfm_sw_init(g, *lsfm);
break;
case NVGPU_GPUID_GP10B:
case NVGPU_GPUID_GV11B:
nvgpu_gp10b_lsfm_sw_init(g, *lsfm);
break;
case NVGPU_GPUID_GV100:
nvgpu_gv100_lsfm_sw_init(g, *lsfm);
break;
case NVGPU_GPUID_TU104:
nvgpu_tu104_lsfm_sw_init(g, *lsfm);
break;
default:
nvgpu_kfree(g, *lsfm);
err = -EINVAL;
nvgpu_err(g, "no support for GPUID %x", ver);
break;
}
done:
return err;
}
void nvgpu_pmu_lsfm_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_lsfm *lsfm)
{
if (lsfm != NULL) {
nvgpu_kfree(g, lsfm);
}
}

View File

@@ -0,0 +1,205 @@
/*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/timers.h>
#include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h>
#include "common/pmu/pmu_gm20b.h"
#include "lsfm_sw_gm20b.h"
static void lsfm_handle_acr_init_wpr_region_msg(struct gk20a *g,
struct pmu_msg *msg, void *param, u32 status)
{
struct nvgpu_pmu *pmu = &g->pmu;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) {
pmu->lsfm->is_wpr_init_done = true;
}
}
int gm20b_pmu_lsfm_init_acr_wpr_region(struct gk20a *g, struct nvgpu_pmu *pmu)
{
struct pmu_cmd cmd;
size_t tmp_size;
nvgpu_log_fn(g, " ");
/* init ACR */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_init_wpr_details);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
cmd.cmd.acr.init_wpr.regionid = 0x01U;
cmd.cmd.acr.init_wpr.wproffset = 0x00U;
nvgpu_pmu_dbg(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION");
return nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
lsfm_handle_acr_init_wpr_region_msg, pmu);
}
void gm20b_pmu_lsfm_handle_bootstrap_falcon_msg(struct gk20a *g,
struct pmu_msg *msg, void *param, u32 status)
{
struct nvgpu_pmu *pmu = &g->pmu;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
nvgpu_pmu_dbg(g, "response code = %x", msg->msg.acr.acrmsg.falconid);
pmu->lsfm->loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
}
static int gm20b_pmu_lsfm_bootstrap_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm,
u32 falcon_id, u32 flags)
{
struct pmu_cmd cmd;
size_t tmp_size;
nvgpu_log_fn(g, " ");
lsfm->loaded_falcon_id = 0U;
if (!lsfm->is_wpr_init_done) {
return -EINVAL;
}
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_falcon);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.bootstrap_falcon.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
cmd.cmd.acr.bootstrap_falcon.flags = flags;
cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id;
nvgpu_pmu_dbg(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x",
falcon_id);
return nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
gm20b_pmu_lsfm_handle_bootstrap_falcon_msg, pmu);
}
static int gm20b_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm, u32 falcon_id_mask)
{
int err = 0;
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
/* GM20B PMU supports loading FECS only */
if (!(falcon_id_mask == BIT32(FALCON_ID_FECS))) {
return -EINVAL;
}
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!lsfm->is_wpr_init_done) {
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&lsfm->is_wpr_init_done, 1U);
/* check again if it still not ready indicate an error */
if (!lsfm->is_wpr_init_done) {
nvgpu_err(g, "PMU not ready to load LSF");
return -ETIMEDOUT;
}
}
/* load FECS */
nvgpu_falcon_mailbox_write(&g->fecs_flcn, FALCON_MAILBOX_0, ~U32(0x0U));
err = gm20b_pmu_lsfm_bootstrap_falcon(g, pmu, lsfm,
FALCON_ID_FECS, flags);
if (err != 0) {
return err;
}
nvgpu_assert(falcon_id_mask <= U8_MAX);
pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&lsfm->loaded_falcon_id, (u8)FALCON_ID_FECS);
if (lsfm->loaded_falcon_id != FALCON_ID_FECS) {
err = -ETIMEDOUT;
}
return err;
}
int gm20b_pmu_lsfm_pmu_cmd_line_args_copy(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
u32 cmd_line_args_offset = 0U;
u32 dmem_size = 0U;
int err = 0;
err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size);
if (err != 0) {
nvgpu_err(g, "dmem size request failed");
return -EINVAL;
}
cmd_line_args_offset = dmem_size -
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
/* Copying pmu cmdline args */
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1U);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
return nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0U);
}
void nvgpu_gm20b_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm)
{
nvgpu_log_fn(g, " ");
lsfm->is_wpr_init_done = false;
lsfm->loaded_falcon_id = 0U;
lsfm->init_wpr_region = gm20b_pmu_lsfm_init_acr_wpr_region;
lsfm->bootstrap_ls_falcon = gm20b_pmu_lsfm_bootstrap_ls_falcon;
lsfm->ls_pmu_cmdline_args_copy = gm20b_pmu_lsfm_pmu_cmd_line_args_copy;
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_LSFM_SW_GM20B_H
#define NVGPU_LSFM_SW_GM20B_H
int gm20b_pmu_lsfm_init_acr_wpr_region(struct gk20a *g, struct nvgpu_pmu *pmu);
void gm20b_pmu_lsfm_handle_bootstrap_falcon_msg(struct gk20a *g,
struct pmu_msg *msg, void *param, u32 status);
int gm20b_pmu_lsfm_pmu_cmd_line_args_copy(struct gk20a *g,
struct nvgpu_pmu *pmu);
void nvgpu_gm20b_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm);
#endif /*NVGPU_LSFM_SW_GM20B_H*/

View File

@@ -0,0 +1,134 @@
/*
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/timers.h>
#include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h>
#include "lsfm_sw_gm20b.h"
#include "lsfm_sw_gp10b.h"
static int gp10b_pmu_lsfm_bootstrap_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm,
u32 falconidmask, u32 flags)
{
struct pmu_cmd cmd;
size_t tmp_size;
nvgpu_log_fn(g, " ");
lsfm->loaded_falcon_id = 0U;
nvgpu_pmu_dbg(g, "wprinit status = %x", lsfm->is_wpr_init_done);
if (!lsfm->is_wpr_init_done) {
return -EINVAL;
}
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.boot_falcons.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS;
cmd.cmd.acr.boot_falcons.flags = flags;
cmd.cmd.acr.boot_falcons.falconidmask = falconidmask;
cmd.cmd.acr.boot_falcons.usevamask = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0U;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0U;
nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x",
falconidmask);
return nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
gm20b_pmu_lsfm_handle_bootstrap_falcon_msg, pmu);
}
static int gp10b_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm, u32 falcon_id_mask)
{
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
int err = 0;
/* GP10B PMU supports loading FECS and GPCCS only */
if (falcon_id_mask == 0U) {
err = -EINVAL;
goto done;
}
if ((falcon_id_mask &
~(BIT32(FALCON_ID_FECS) | BIT32(FALCON_ID_GPCCS))) != 0U) {
err = -EINVAL;
goto done;
}
lsfm->loaded_falcon_id = 0U;
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!lsfm->is_wpr_init_done) {
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&lsfm->is_wpr_init_done, 1U);
/* check again if it still not ready indicate an error */
if (!lsfm->is_wpr_init_done) {
nvgpu_err(g, "PMU not ready to load LSF");
err = -ETIMEDOUT;
goto done;
}
}
/* bootstrap falcon(s) */
err = gp10b_pmu_lsfm_bootstrap_falcon(g, pmu, lsfm,
falcon_id_mask, flags);
if (err != 0) {
err = -EINVAL;
goto done;
}
nvgpu_assert(falcon_id_mask <= U8_MAX);
pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&lsfm->loaded_falcon_id, (u8)falcon_id_mask);
if (lsfm->loaded_falcon_id != falcon_id_mask) {
err = -ETIMEDOUT;
}
done:
return err;
}
void nvgpu_gp10b_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm)
{
nvgpu_log_fn(g, " ");
lsfm->is_wpr_init_done = false;
lsfm->loaded_falcon_id = 0U;
lsfm->init_wpr_region = gm20b_pmu_lsfm_init_acr_wpr_region;
lsfm->bootstrap_ls_falcon = gp10b_pmu_lsfm_bootstrap_ls_falcon;
lsfm->ls_pmu_cmdline_args_copy = gm20b_pmu_lsfm_pmu_cmd_line_args_copy;
}

View File

@@ -1,7 +1,5 @@
/*
* GV100 PMU
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,12 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_PMU_GV100_H
#define NVGPU_PMU_GV100_H
#ifndef NVGPU_LSFM_SW_GP10B_H
#define NVGPU_LSFM_SW_GP10B_H
struct gk20a;
void nvgpu_gp10b_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm);
int gv100_pmu_init_acr(struct gk20a *g);
int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
#endif /* NVGPU_PMU_GV100_H */
#endif /*NVGPU_LSFM_SW_GP10B_H*/

View File

@@ -1,7 +1,5 @@
/*
* GV100 PMU
*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,22 +20,26 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/timers.h>
#include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/pmu/cmd.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h>
#include "pmu_gv100.h"
#include "lsfm_sw_gv100.h"
int gv100_pmu_init_acr(struct gk20a *g)
static int gv100_pmu_lsfm_init_acr_wpr_region(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct nv_pmu_rpc_struct_acr_init_wpr_region rpc;
int status = 0;
(void) memset(&rpc, 0,
sizeof(struct nv_pmu_rpc_struct_acr_init_wpr_region));
rpc.wpr_regionId = 0x1;
rpc.wpr_offset = 0x0;
rpc.wpr_regionId = 0x1U;
rpc.wpr_offset = 0x0U;
PMU_RPC_EXECUTE(status, pmu, ACR, INIT_WPR_REGION, &rpc, 0);
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC status=0x%x",
@@ -47,31 +49,31 @@ int gv100_pmu_init_acr(struct gk20a *g)
return status;
}
int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
static int gv100_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm, u32 falcon_id_mask)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct nv_pmu_rpc_struct_acr_bootstrap_gr_falcons rpc;
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
int status = 0;
if (falconidmask == 0U) {
if (falcon_id_mask == 0U) {
return -EINVAL;
}
if ((falconidmask &
if ((falcon_id_mask &
~(BIT32(FALCON_ID_FECS) |
BIT32(FALCON_ID_GPCCS))) != 0U) {
return -EINVAL;
}
g->pmu_lsf_loaded_falcon_id = 0;
lsfm->loaded_falcon_id = 0U;
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) {
if (!lsfm->is_wpr_init_done) {
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1);
nvgpu_get_poll_timeout(g),
&lsfm->is_wpr_init_done, 1U);
/* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) {
if (!lsfm->is_wpr_init_done) {
nvgpu_err(g, "PMU not ready to load LSF");
status = -ETIMEDOUT;
goto exit;
@@ -80,11 +82,11 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
(void) memset(&rpc, 0,
sizeof(struct nv_pmu_rpc_struct_acr_bootstrap_gr_falcons));
rpc.falcon_id_mask = falconidmask;
rpc.falcon_id_mask = falcon_id_mask;
rpc.flags = flags;
rpc.falcon_va_mask = 0;
rpc.wpr_base_virtual.lo = 0;
rpc.wpr_base_virtual.hi = 0;
rpc.falcon_va_mask = 0U;
rpc.wpr_base_virtual.lo = 0U;
rpc.wpr_base_virtual.hi = 0U;
PMU_RPC_EXECUTE(status, pmu, ACR, BOOTSTRAP_GR_FALCONS, &rpc, 0);
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
@@ -92,12 +94,57 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
}
pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&g->pmu_lsf_loaded_falcon_id, 1);
&lsfm->loaded_falcon_id, 1U);
if (g->pmu_lsf_loaded_falcon_id != 1U) {
if (lsfm->loaded_falcon_id != 1U) {
status = -ETIMEDOUT;
}
exit:
return status;
}
int gv100_update_lspmu_cmdline_args_copy(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
u32 cmd_line_args_offset = 0U;
u32 dmem_size = 0U;
int err = 0;
err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size);
if (err != 0) {
nvgpu_err(g, "dmem size request failed");
return -EINVAL;
}
cmd_line_args_offset = dmem_size -
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, 0U);
g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1U);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface != NULL) {
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
}
return nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0U);
}
void nvgpu_gv100_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm)
{
nvgpu_log_fn(g, " ");
lsfm->is_wpr_init_done = false;
lsfm->loaded_falcon_id = 0U;
lsfm->init_wpr_region = gv100_pmu_lsfm_init_acr_wpr_region;
lsfm->bootstrap_ls_falcon = gv100_pmu_lsfm_bootstrap_ls_falcon;
lsfm->ls_pmu_cmdline_args_copy = gv100_update_lspmu_cmdline_args_copy;
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_LSFM_SW_GV100_H
#define NVGPU_LSFM_SW_GV100_H
int gv100_update_lspmu_cmdline_args_copy(struct gk20a *g,
struct nvgpu_pmu *pmu);
void nvgpu_gv100_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm);
#endif /* NVGPU_LSFM_SW_GV100_H */

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/timers.h>
#include <nvgpu/pmu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/lsfm.h>
#include "lsfm_sw_gv100.h"
#include "lsfm_sw_tu104.h"
void nvgpu_tu104_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm)
{
nvgpu_log_fn(g, " ");
lsfm->is_wpr_init_done = false;
/* LSF is not handled by PMU on this chip */
lsfm->init_wpr_region = NULL;
lsfm->bootstrap_ls_falcon = NULL;
lsfm->ls_pmu_cmdline_args_copy = gv100_update_lspmu_cmdline_args_copy;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_LSFM_SW_TU104_H
#define NVGPU_LSFM_SW_TU104_H
void nvgpu_tu104_lsfm_sw_init(struct gk20a *g, struct nvgpu_pmu_lsfm *lsfm);
#endif /* NVGPU_LSFM_SW_TU104_H */

View File

@@ -36,6 +36,7 @@
#include <nvgpu/string.h>
#include <nvgpu/power_features/cg.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/pmu/lsfm.h>
static void pmu_report_error(struct gk20a *g, u32 err_type,
u32 status, u32 pmu_err_type)
@@ -280,8 +281,10 @@ int nvgpu_init_pmu_support(struct gk20a *g)
g->ops.pmu.setup_apertures(g);
}
if (g->ops.pmu.update_lspmu_cmdline_args != NULL) {
g->ops.pmu.update_lspmu_cmdline_args(g);
err = nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(g, pmu,
pmu->lsfm);
if (err != 0) {
goto exit;
}
if (g->ops.pmu.pmu_enable_irq != NULL) {
@@ -366,7 +369,6 @@ int nvgpu_pmu_destroy(struct gk20a *g)
pmu->pmu_ready = false;
pmu->perfmon_ready = false;
pmu->pmu_pg.zbc_ready = false;
g->pmu_lsf_pmu_wpr_init_done = false;
nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
nvgpu_log_fn(g, "done");

View File

@@ -35,6 +35,7 @@
#include <nvgpu/pmu/volt.h>
#include <nvgpu/pmu/clk/clk.h>
#include <nvgpu/pmu/allocator.h>
#include <nvgpu/pmu/lsfm.h>
/* PMU NS UCODE IMG */
#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin"
@@ -1617,14 +1618,17 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_pmu_dmem_allocator_destroy(&pmu->dmem);
nvgpu_list_for_each_entry_safe(pboardobjgrp, pboardobjgrp_tmp,
&g->boardobjgrp_head, boardobjgrp, node) {
pboardobjgrp->destruct(pboardobjgrp);
}
if (nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) {
nvgpu_list_for_each_entry_safe(pboardobjgrp,
pboardobjgrp_tmp, &g->boardobjgrp_head,
boardobjgrp, node) {
pboardobjgrp->destruct(pboardobjgrp);
}
nvgpu_list_for_each_entry_safe(pboardobj, pboardobj_tmp,
nvgpu_list_for_each_entry_safe(pboardobj, pboardobj_tmp,
&g->boardobj_head, boardobj, node) {
pboardobj->destruct(pboardobj);
pboardobj->destruct(pboardobj);
}
}
if (pmu->fw_image != NULL) {
@@ -1651,6 +1655,8 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
}
nvgpu_pmu_lsfm_deinit(g, pmu, pmu->lsfm);
nvgpu_mutex_destroy(&pmu->pmu_pg.elpg_mutex);
nvgpu_mutex_destroy(&pmu->pmu_pg.pg_mutex);
nvgpu_mutex_destroy(&pmu->isr_mutex);
@@ -1758,36 +1764,35 @@ int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
err = nvgpu_mutex_init(&pmu->pmu_pg.pg_mutex);
if (err != 0) {
goto fail_elpg;
goto init_failed;
}
err = nvgpu_mutex_init(&pmu->isr_mutex);
if (err != 0) {
goto fail_pg;
goto init_failed;
}
err = nvgpu_mutex_init(&pmu->pmu_copy_lock);
if (err != 0) {
goto fail_isr;
goto init_failed;
}
err = init_pmu_ucode(pmu);
if (err != 0) {
goto fail_pmu_copy;
goto init_failed;
}
err = nvgpu_pmu_lsfm_init(g, &pmu->lsfm);
if (err != 0) {
goto init_failed;
}
pmu->remove_support = nvgpu_remove_pmu_support;
goto exit;
fail_pmu_copy:
nvgpu_mutex_destroy(&pmu->pmu_copy_lock);
fail_isr:
nvgpu_mutex_destroy(&pmu->isr_mutex);
fail_pg:
nvgpu_mutex_destroy(&pmu->pmu_pg.pg_mutex);
fail_elpg:
nvgpu_mutex_destroy(&pmu->pmu_pg.elpg_mutex);
init_failed:
nvgpu_remove_pmu_support(pmu);
exit:
return err;
}

View File

@@ -67,8 +67,6 @@ void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable);
void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status);
int gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data);
u32 gk20a_pmu_falcon_base_addr(void);

View File

@@ -35,7 +35,6 @@
#include "pmu_gk20a.h"
#include "pmu_gm20b.h"
#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
#include <nvgpu/hw/gm20b/hw_pwr_gm20b.h>
/* PROD settings for ELPG sequencing registers*/
@@ -120,144 +119,6 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
return ret;
}
static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status)
{
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) {
g->pmu_lsf_pmu_wpr_init_done = true;
}
nvgpu_log_fn(g, "done");
}
int gm20b_pmu_init_acr(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
size_t tmp_size;
nvgpu_log_fn(g, " ");
/* init ACR */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_init_wpr_details);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
cmd.cmd.acr.init_wpr.regionid = 0x01U;
cmd.cmd.acr.init_wpr.wproffset = 0x00U;
nvgpu_pmu_dbg(g, "cmd post PMU_ACR_CMD_ID_INIT_WPR_REGION");
nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_acr_init_wpr_msg, pmu);
nvgpu_log_fn(g, "done");
return 0;
}
void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 status)
{
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "reply PMU_ACR_CMD_ID_BOOTSTRAP_FALCON");
nvgpu_pmu_dbg(g, "response code = %x\n", msg->msg.acr.acrmsg.falconid);
g->pmu_lsf_loaded_falcon_id = msg->msg.acr.acrmsg.falconid;
nvgpu_log_fn(g, "done");
}
static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
u32 val)
{
u32 delay = GR_FECS_POLL_INTERVAL;
u32 reg;
struct nvgpu_timeout timeout;
nvgpu_log_fn(g, " ");
reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
do {
reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
if (reg == val) {
return 0;
}
nvgpu_udelay(delay);
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;
}
void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
size_t tmp_size;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
if (g->pmu_lsf_pmu_wpr_init_done) {
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_falcon);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.bootstrap_falcon.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
cmd.cmd.acr.bootstrap_falcon.flags = flags;
cmd.cmd.acr.bootstrap_falcon.falconid = falcon_id;
nvgpu_pmu_dbg(g, "cmd post PMU_ACR_CMD_ID_BOOTSTRAP_FALCON: %x\n",
falcon_id);
nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_fecs_boot_acr_msg, pmu);
}
nvgpu_log_fn(g, "done");
return;
}
int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{
int err = 0;
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
u32 timeout = nvgpu_get_poll_timeout(g);
/* GM20B PMU supports loading FECS only */
if (!(falconidmask == BIT32(FALCON_ID_FECS))) {
return -EINVAL;
}
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) {
nvgpu_err(g, "PMU not ready to load LSF");
return -ETIMEDOUT;
}
}
/* load FECS */
gk20a_writel(g,
gr_fecs_ctxsw_mailbox_clear_r(0), ~U32(0x0U));
gm20b_pmu_load_lsf(g, FALCON_ID_FECS, flags);
err = pmu_gm20b_ctx_wait_lsf_ready(g, timeout,
0x55AA55AAU);
return err;
}
void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr)
{
gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
@@ -333,27 +194,6 @@ void gm20b_pmu_setup_apertures(struct gk20a *g)
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
}
void gm20b_update_lspmu_cmdline_args(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 cmd_line_args_offset = 0;
nvgpu_pmu_get_cmd_line_args_offset(g, &cmd_line_args_offset);
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
}
void gm20b_pmu_flcn_setup_boot_config(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;

View File

@@ -27,18 +27,15 @@
struct gk20a;
int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
int gm20b_pmu_setup_elpg(struct gk20a *g);
void pmu_dump_security_fuses_gm20b(struct gk20a *g);
void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags);
int gm20b_pmu_init_acr(struct gk20a *g);
void gm20b_write_dmatrfbase(struct gk20a *g, u32 addr);
bool gm20b_pmu_is_debug_mode_en(struct gk20a *g);
int gm20b_ns_pmu_setup_hw_and_bootstrap(struct gk20a *g);
void gm20b_pmu_setup_apertures(struct gk20a *g);
void gm20b_update_lspmu_cmdline_args(struct gk20a *g);
void gm20b_pmu_flcn_setup_boot_config(struct gk20a *g);
void gm20b_secured_pmu_start(struct gk20a *g);
bool gm20b_is_pmu_supported(struct gk20a *g);
void gm20b_clear_pmu_bar0_host_err_status(struct gk20a *g);
#endif /*NVGPU_GM20B_PMU_GM20B_H*/

View File

@@ -104,104 +104,6 @@ bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
return is_feature_supported;
}
static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
u32 flags)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u64 tmp_size;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "wprinit status = %x", g->pmu_lsf_pmu_wpr_init_done);
if (g->pmu_lsf_pmu_wpr_init_done) {
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons);
nvgpu_assert(tmp_size <= U64(U8_MAX));
cmd.hdr.size = U8(tmp_size);
cmd.cmd.acr.boot_falcons.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS;
cmd.cmd.acr.boot_falcons.flags = flags;
cmd.cmd.acr.boot_falcons.falconidmask =
falconidmask;
cmd.cmd.acr.boot_falcons.usevamask = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0;
nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x",
falconidmask);
nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_fecs_boot_acr_msg, pmu);
}
nvgpu_log_fn(g, "done");
}
int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
/* GM20B PMU supports loading FECS and GPCCS only */
if (falconidmask == 0U) {
return -EINVAL;
}
if ((falconidmask &
~(BIT32(FALCON_ID_FECS) |
BIT32(FALCON_ID_GPCCS))) != 0U) {
return -EINVAL;
}
g->pmu_lsf_loaded_falcon_id = 0;
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) {
nvgpu_err(g, "PMU not ready to load LSF");
return -ETIMEDOUT;
}
}
/* load falcon(s) */
gp106_pmu_load_multiple_falcons(g, falconidmask, flags);
nvgpu_assert(falconidmask < U32(U8_MAX));
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&g->pmu_lsf_loaded_falcon_id, U8(falconidmask));
if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
return -ETIMEDOUT;
}
return 0;
}
void gp106_update_lspmu_cmdline_args(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 cmd_line_args_offset = 0;
nvgpu_pmu_get_cmd_line_args_offset(g, &cmd_line_args_offset);
/*Copying pmu cmdline args*/
g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, 0);
g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
pmu, GK20A_PMU_TRACE_BUFSIZE);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface != NULL) {
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
}
nvgpu_falcon_copy_to_dmem(&pmu->flcn, cmd_line_args_offset,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
}
void gp106_pmu_setup_apertures(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;

View File

@@ -30,11 +30,9 @@ struct gk20a;
bool gp106_is_pmu_supported(struct gk20a *g);
u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id);
int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
bool gp106_pmu_is_engine_in_reset(struct gk20a *g);
int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset);
void gp106_update_lspmu_cmdline_args(struct gk20a *g);
void gp106_pmu_setup_apertures(struct gk20a *g);
u32 gp106_pmu_falcon_base_addr(void);

View File

@@ -134,79 +134,6 @@ static struct pg_init_sequence_list _pginitseq_gp10b[] = {
{0x0010e004U, 0x0000008EU},
};
static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
u32 flags)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
size_t tmp_size;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "wprinit status = %x", g->pmu_lsf_pmu_wpr_init_done);
if (g->pmu_lsf_pmu_wpr_init_done) {
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.boot_falcons.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS;
cmd.cmd.acr.boot_falcons.flags = flags;
cmd.cmd.acr.boot_falcons.falconidmask =
falconidmask;
cmd.cmd.acr.boot_falcons.usevamask = 0;
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0U;
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0U;
nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x",
falconidmask);
nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_fecs_boot_acr_msg, pmu);
}
nvgpu_log_fn(g, "done");
return;
}
int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
/* GM20B PMU supports loading FECS and GPCCS only */
if (falconidmask == 0U) {
return -EINVAL;
}
if ((falconidmask &
~(BIT32(FALCON_ID_FECS) |
BIT32(FALCON_ID_GPCCS))) != 0U) {
return -EINVAL;
}
g->pmu_lsf_loaded_falcon_id = 0;
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) {
nvgpu_err(g, "PMU not ready to load LSF");
return -ETIMEDOUT;
}
}
/* load falcon(s) */
gp10b_pmu_load_multiple_falcons(g, falconidmask, flags);
nvgpu_assert(falconidmask <= U8_MAX);
pmu_wait_message_cond(&g->pmu,
nvgpu_get_poll_timeout(g),
&g->pmu_lsf_loaded_falcon_id, (u8)falconidmask);
if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
return -ETIMEDOUT;
}
return 0;
}
int gp10b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;

View File

@@ -31,7 +31,6 @@ struct gk20a;
bool gp10b_is_pmu_supported(struct gk20a *g);
int gp10b_pmu_setup_elpg(struct gk20a *g);
int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr);
#endif /* NVGPU_PMU_GP10B_H */

View File

@@ -798,8 +798,6 @@ int vgpu_gp10b_init_hal(struct gk20a *g)
gops->get_litter_value = vgpu_gp10b_ops.get_litter_value;
gops->semaphore_wakeup = gk20a_channel_semaphore_wakeup;
g->pmu_lsf_pmu_wpr_init_done = 0;
if (priv->constants.can_set_clkrate) {
gops->clk.support_clk_freq_controller = true;
} else {

View File

@@ -1089,14 +1089,9 @@ int gm20b_init_hal(struct gk20a *g)
/* priv security dependent ops */
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Add in ops from gm20b acr */
gops->pmu.update_lspmu_cmdline_args =
gm20b_update_lspmu_cmdline_args;
gops->pmu.setup_apertures = gm20b_pmu_setup_apertures;
gops->pmu.secured_pmu_start = gm20b_secured_pmu_start;
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gm20b_load_falcon_ucode;
gops->gr.falcon.load_ctxsw_ucode =
nvgpu_gr_falcon_load_secure_ctxsw_ucode;
} else {
@@ -1104,15 +1099,11 @@ int gm20b_init_hal(struct gk20a *g)
gops->pmu.pmu_setup_hw_and_bootstrap =
gm20b_ns_pmu_setup_hw_and_bootstrap;
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
}
nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC_STENCIL, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, false);
nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = 0;
g->name = "gm20b";

View File

@@ -1175,32 +1175,21 @@ int gp10b_init_hal(struct gk20a *g)
/* priv security dependent ops */
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* Add in ops from gm20b acr */
gops->pmu.update_lspmu_cmdline_args =
gm20b_update_lspmu_cmdline_args;
gops->pmu.setup_apertures = gm20b_pmu_setup_apertures;
gops->pmu.secured_pmu_start = gm20b_secured_pmu_start;
gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
gops->gr.falcon.load_ctxsw_ucode =
nvgpu_gr_falcon_load_secure_ctxsw_ucode;
} else {
/* Inherit from gk20a */
gops->pmu.pmu_setup_hw_and_bootstrap =
gm20b_ns_pmu_setup_hw_and_bootstrap;
gops->pmu.pmu_nsbootstrap = pmu_bootstrap,
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
}
nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC_STENCIL, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, true);
nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
g->pmu_lsf_pmu_wpr_init_done = false;
g->name = "gp10b";

View File

@@ -106,7 +106,6 @@
#include "common/pmu/pmu_gp10b.h"
#include "common/pmu/pmu_gp106.h"
#include "common/pmu/pmu_gv11b.h"
#include "common/pmu/pmu_gv100.h"
#include "common/pmu/pg/pg_sw_gp106.h"
#include "common/nvlink/init/device_reginit_gv100.h"
#include "common/nvlink/intr_and_err_handling_gv100.h"
@@ -1084,8 +1083,6 @@ static const struct gpu_ops gv100_ops = {
},
.pmu = {
.falcon_base_addr = gp106_pmu_falcon_base_addr,
.init_wpr_region = gv100_pmu_init_acr,
.load_lsfalcon_ucode = gv100_load_falcon_ucode,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release,
@@ -1126,8 +1123,6 @@ static const struct gpu_ops gv100_ops = {
.get_irqdest = gk20a_pmu_get_irqdest,
.alloc_super_surface = nvgpu_pmu_super_surface_alloc,
.is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en,
.update_lspmu_cmdline_args =
gp106_update_lspmu_cmdline_args,
.setup_apertures = gp106_pmu_setup_apertures,
.secured_pmu_start = gm20b_secured_pmu_start,
.create_ssmd_lookup_table = nvgpu_pmu_create_ssmd_lookup_table,
@@ -1486,7 +1481,6 @@ int gv100_init_hal(struct gk20a *g)
/* for now */
nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
g->pmu_lsf_pmu_wpr_init_done = 0;
gops->clk.split_rail_support = false;
gops->clk.support_clk_freq_controller = false;
gops->clk.support_pmgr_domain = false;

View File

@@ -1092,9 +1092,6 @@ static const struct gpu_ops gv11b_ops = {
.dump_secure_fuses = pmu_dump_security_fuses_gm20b,
.pmu_dump_falcon_stats = gk20a_pmu_dump_falcon_stats,
/* PMU uocde */
.update_lspmu_cmdline_args = gm20b_update_lspmu_cmdline_args,
.init_wpr_region = gm20b_pmu_init_acr,
.load_lsfalcon_ucode = gp10b_load_falcon_ucode,
.save_zbc = gk20a_pmu_save_zbc,
.pmu_clear_bar0_host_err_status =
gm20b_clear_pmu_bar0_host_err_status,
@@ -1343,10 +1340,6 @@ int gv11b_init_hal(struct gk20a *g)
gops->pmu.pmu_nsbootstrap = gv11b_pmu_bootstrap;
gops->pmu.pmu_setup_hw_and_bootstrap =
gm20b_ns_pmu_setup_hw_and_bootstrap;
gops->pmu.load_lsfalcon_ucode = NULL;
gops->pmu.init_wpr_region = NULL;
}
nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);

View File

@@ -1415,8 +1415,6 @@ struct gpu_ops {
void (*pmu_dump_elpg_stats)(struct nvgpu_pmu *pmu);
void (*pmu_dump_falcon_stats)(struct nvgpu_pmu *pmu);
void (*pmu_enable_irq)(struct nvgpu_pmu *pmu, bool enable);
int (*init_wpr_region)(struct gk20a *g);
int (*load_lsfalcon_ucode)(struct gk20a *g, u32 falconidmask);
void (*write_dmatrfbase)(struct gk20a *g, u32 addr);
int (*pmu_elpg_statistics)(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data);
@@ -1436,7 +1434,6 @@ struct gpu_ops {
bool (*is_engine_in_reset)(struct gk20a *g);
void (*handle_ext_irq)(struct gk20a *g, u32 intr);
void (*set_irqmask)(struct gk20a *g);
void (*update_lspmu_cmdline_args)(struct gk20a *g);
void (*setup_apertures)(struct gk20a *g);
u32 (*get_irqdest)(struct gk20a *g);
int (*alloc_super_surface)(struct gk20a *g,
@@ -2076,8 +2073,6 @@ struct gk20a {
u32 mc_intr_mask_restore[4];
/*used for change of enum zbc update cmd id from ver 0 to ver1*/
u8 pmu_ver_cmd_id_zbc_table_update;
bool pmu_lsf_pmu_wpr_init_done;
u32 pmu_lsf_loaded_falcon_id;
/* Needed to keep track of deferred interrupts */
nvgpu_atomic_t hw_irq_stall_count;

View File

@@ -213,6 +213,8 @@ struct nvgpu_pmu {
struct nvgpu_mem ucode;
struct nvgpu_pmu_lsfm *lsfm;
/* TBD: remove this if ZBC seq is fixed */
struct nvgpu_mem seq_buf;
struct nvgpu_mem trace_buf;

View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LSFM_H
#define LSFM_H
struct gk20a;
struct nvgpu_pmu_lsfm;
struct nvgpu_pmu_lsfm {
bool is_wpr_init_done;
u32 loaded_falcon_id;
int (*init_wpr_region)(struct gk20a *g, struct nvgpu_pmu *pmu);
int (*bootstrap_ls_falcon)(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm,
u32 falcon_id_mask);
int (*ls_pmu_cmdline_args_copy)(struct gk20a *g, struct nvgpu_pmu *pmu);
};
int nvgpu_pmu_lsfm_int_wpr_region(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm);
int nvgpu_pmu_lsfm_bootstrap_ls_falcon(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm, u32 falcon_id_mask);
int nvgpu_pmu_lsfm_ls_pmu_cmdline_args_copy(struct gk20a *g,
struct nvgpu_pmu *pmu, struct nvgpu_pmu_lsfm *lsfm);
void nvgpu_pmu_lsfm_rpc_handler(struct gk20a *g,
struct rpc_handler_payload *rpc_payload);
int nvgpu_pmu_lsfm_init(struct gk20a *g, struct nvgpu_pmu_lsfm **lsfm);
void nvgpu_pmu_lsfm_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_lsfm *lsfm);
#endif /*LSFM_H*/

View File

@@ -109,7 +109,6 @@
#include "common/pmu/pmu_gm20b.h"
#include "common/pmu/pmu_gp10b.h"
#include "common/pmu/pmu_gp106.h"
#include "common/pmu/pmu_gv100.h"
#include "common/pmu/pmu_gv11b.h"
#include "common/pmu/pmu_tu104.h"
#include "common/pmu/pg/pg_sw_gp106.h"
@@ -1121,8 +1120,6 @@ static const struct gpu_ops tu104_ops = {
},
.pmu = {
.falcon_base_addr = gp106_pmu_falcon_base_addr,
.init_wpr_region = NULL,
.load_lsfalcon_ucode = gv100_load_falcon_ucode,
.pmu_queue_tail = gk20a_pmu_queue_tail,
.pmu_get_queue_head = pwr_pmu_queue_head_r,
.pmu_mutex_release = gk20a_pmu_mutex_release,
@@ -1165,8 +1162,6 @@ static const struct gpu_ops tu104_ops = {
.alloc_super_surface = nvgpu_pmu_super_surface_alloc,
.handle_ext_irq = gv11b_pmu_handle_ext_irq,
.is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en,
.update_lspmu_cmdline_args =
gp106_update_lspmu_cmdline_args,
.setup_apertures = gp106_pmu_setup_apertures,
.secured_pmu_start = gm20b_secured_pmu_start,
.create_ssmd_lookup_table = nvgpu_pmu_create_ssmd_lookup_table,
@@ -1564,8 +1559,6 @@ int tu104_init_hal(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
}
g->pmu_lsf_pmu_wpr_init_done = 0;
g->name = "tu10x";
return 0;