Files
linux-nvgpu/drivers/gpu/nvgpu/common/pmu/fw/fw.c
Mahantesh Kumbar ef524ee0d1 gpu: nvgpu: PG init sequence update
-Currently PG task is created for both iGPU & dGPU as part PMU init
sequence path, but task is not required for dGPU or can be skipped
if ELPG is not supported on iGPU, made changes to create PG task only
if supported else skip it, and made some functions to private as these
are required by PG UNIT only.
-PG instance is allocated & set to default properties as needed if
support is enabled else skip it.
-Made changes in dependent files as required to reflect above changes

JIRA NVGPU-1972

Change-Id: I4efb7f1814a9ad48770acea2173e66f0a4c8a9c1
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2094840
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2019-04-25 16:25:50 -07:00

266 lines
6.3 KiB
C

/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/pmu.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/bug.h>
#include <nvgpu/firmware.h>
#include <nvgpu/enabled.h>
#include <nvgpu/utils.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/pmu/allocator.h>
#include <nvgpu/pmu/fw.h>
/* PMU NS UCODE IMG */
#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin"
/* PMU SECURE UCODE IMG */
#define NVGPU_PMU_UCODE_IMAGE "gpmu_ucode_image.bin"
#define NVGPU_PMU_UCODE_DESC "gpmu_ucode_desc.bin"
#define NVGPU_PMU_UCODE_SIG "pmu_sig.bin"
void nvgpu_pmu_fw_get_cmd_line_args_offset(struct gk20a *g,
u32 *args_offset)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 dmem_size = 0;
int err = 0;
err = nvgpu_falcon_get_mem_size(&pmu->flcn, MEM_DMEM, &dmem_size);
if (err != 0) {
nvgpu_err(g, "dmem size request failed");
*args_offset = 0;
return;
}
*args_offset = dmem_size - pmu->fw.ops.get_cmd_line_args_size(pmu);
}
void nvgpu_pmu_fw_state_change(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 pmu_state, bool post_change_event)
{
nvgpu_pmu_dbg(g, "pmu_state - %d", pmu_state);
nvgpu_smp_wmb();
pmu->fw.state = pmu_state;
if (post_change_event) {
if (g->can_elpg) {
pmu->pg->pg_init.state_change = true;
nvgpu_cond_signal(&pmu->pg->pg_init.wq);
}
}
}
u32 nvgpu_pmu_get_fw_state(struct gk20a *g, struct nvgpu_pmu *pmu)
{
u32 state = pmu->fw.state;
nvgpu_smp_rmb();
return state;
}
void nvgpu_pmu_set_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu,
bool status)
{
nvgpu_smp_wmb();
pmu->fw.ready = status;
}
bool nvgpu_pmu_get_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu)
{
bool state = pmu->fw.ready;
nvgpu_smp_rmb();
return state;
}
int nvgpu_pmu_wait_fw_ack_status(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 timeout_ms, void *var, u8 val)
{
struct nvgpu_timeout timeout;
int err;
unsigned int delay = POLL_DELAY_MIN_US;
err = nvgpu_timeout_init(g, &timeout, timeout_ms,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "PMU wait timeout init failed.");
return err;
}
do {
nvgpu_rmb();
if (*(volatile u8 *)var == val) {
return 0;
}
if (nvgpu_can_busy(g) == 0) {
return 0;
}
if (g->ops.pmu.pmu_is_interrupted(pmu)) {
g->ops.pmu.pmu_isr(g);
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;
}
int nvgpu_pmu_wait_fw_ready(struct gk20a *g, struct nvgpu_pmu *pmu)
{
int status = 0;
status = nvgpu_pmu_wait_fw_ack_status(g, pmu,
nvgpu_get_poll_timeout(g),
&pmu->fw.ready, (u8)true);
if (status != 0) {
nvgpu_err(g, "PMU is not ready yet");
}
return status;
}
void nvgpu_pmu_fw_release(struct gk20a *g, struct nvgpu_pmu *pmu)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
nvgpu_log_fn(g, " ");
if (pmu->fw.fw_sig != NULL) {
nvgpu_release_firmware(g, pmu->fw.fw_sig);
}
if (pmu->fw.fw_desc != NULL) {
nvgpu_release_firmware(g, pmu->fw.fw_desc);
}
if (pmu->fw.fw_image != NULL) {
nvgpu_release_firmware(g, pmu->fw.fw_image);
}
if (nvgpu_mem_is_valid(&pmu->fw.ucode)) {
nvgpu_dma_unmap_free(vm, &pmu->fw.ucode);
}
}
struct nvgpu_firmware *nvgpu_pmu_fw_sig_desc(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
return pmu->fw.fw_sig;
}
struct nvgpu_firmware *nvgpu_pmu_fw_desc_desc(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
return pmu->fw.fw_desc;
}
struct nvgpu_firmware *nvgpu_pmu_fw_image_desc(struct gk20a *g,
struct nvgpu_pmu *pmu)
{
return pmu->fw.fw_image;
}
int nvgpu_pmu_init_pmu_fw(struct gk20a *g, struct nvgpu_pmu *pmu)
{
struct pmu_rtos_fw *rtos_fw = &pmu->fw;
struct pmu_ucode_desc *desc;
int err = 0;
nvgpu_log_fn(g, " ");
if (rtos_fw->fw_image != NULL) {
goto exit;
}
if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
/* non-secure PMU boot uocde */
rtos_fw->fw_image = nvgpu_request_firmware(g,
NVGPU_PMU_NS_UCODE_IMAGE, 0);
if (rtos_fw->fw_image == NULL) {
nvgpu_err(g,
"failed to load non-secure pmu ucode!!");
goto exit;
}
desc = (struct pmu_ucode_desc *)
(void *)rtos_fw->fw_image->data;
} else {
/* secure boot ucodes's */
nvgpu_pmu_dbg(g, "requesting PMU ucode image");
rtos_fw->fw_image =
nvgpu_request_firmware(g,
NVGPU_PMU_UCODE_IMAGE, 0);
if (rtos_fw->fw_image == NULL) {
nvgpu_err(g, "failed to load pmu ucode!!");
err = -ENOENT;
goto exit;
}
nvgpu_pmu_dbg(g, "requesting PMU ucode desc");
rtos_fw->fw_desc =
nvgpu_request_firmware(g,
NVGPU_PMU_UCODE_DESC, 0);
if (rtos_fw->fw_desc == NULL) {
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
nvgpu_pmu_dbg(g, "requesting PMU ucode sign");
rtos_fw->fw_sig =
nvgpu_request_firmware(g,
NVGPU_PMU_UCODE_SIG, 0);
if (rtos_fw->fw_sig == NULL) {
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
}
desc = (struct pmu_ucode_desc *)(void *)
rtos_fw->fw_desc->data;
}
err = nvgpu_pmu_init_fw_ver_ops(g, pmu, desc->app_version);
if (err != 0) {
nvgpu_err(g, "failed to set function pointers");
goto release_sig;
}
goto exit;
release_sig:
nvgpu_release_firmware(g, rtos_fw->fw_sig);
release_desc:
nvgpu_release_firmware(g, rtos_fw->fw_desc);
release_img_fw:
nvgpu_release_firmware(g, rtos_fw->fw_image);
exit:
return err;
}