mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: Move chip specific PG code to PG unit
As part of PG unit refactoring move chip specific PG code from common/pmu/pmu_<chip_name>.c/.h files to common/pmu/pg folder Make new files such as pg_sw_gp106.c/.h, pg_sw_gp10b.c/.h and pg_sw_gv11b.c/.h for PG code. NVGPU-1973 Change-Id: I97fa2395e388559edc26be5d64bfbc547d6a3e22 Signed-off-by: Divya Singhatwaria <dsinghatwari@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2077111 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4777c81f82
commit
08f9184f34
@@ -106,6 +106,9 @@ nvgpu-y += \
|
||||
common/pmu/pmu_gp10b.o \
|
||||
common/pmu/pmu_gp106.o \
|
||||
common/pmu/pmu_gv11b.o \
|
||||
common/pmu/pg/pg_sw_gp10b.o \
|
||||
common/pmu/pg/pg_sw_gp106.o \
|
||||
common/pmu/pg/pg_sw_gv11b.o \
|
||||
common/pmu/pmu_gv100.o \
|
||||
common/pmu/pmu_tu104.o \
|
||||
common/acr/acr.o \
|
||||
|
||||
@@ -138,6 +138,9 @@ srcs += common/sim.c \
|
||||
common/pmu/pmu_gp10b.c \
|
||||
common/pmu/pmu_gp106.c \
|
||||
common/pmu/pmu_gv11b.c \
|
||||
common/pmu/pg/pg_sw_gp10b.c \
|
||||
common/pmu/pg/pg_sw_gp106.c \
|
||||
common/pmu/pg/pg_sw_gv11b.c \
|
||||
common/pmu/pmu_gv100.c \
|
||||
common/pmu/pmu_tu104.c \
|
||||
common/acr/acr.c \
|
||||
|
||||
128
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c
Normal file
128
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c
Normal file
@@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmu/lpwr.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "pg_sw_gp106.h"
|
||||
|
||||
static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "PG PARAM cmd aborted");
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dbg(g, "PG PARAM is acknowledged from PMU %x",
|
||||
msg->msg.pg.msg_type);
|
||||
}
|
||||
|
||||
int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
int status;
|
||||
u64 tmp_size;
|
||||
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
|
||||
status = init_rppg(g);
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "RPPG init Failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_gr_init_param);
|
||||
nvgpu_assert(tmp_size <= U64(U8_MAX));
|
||||
cmd.hdr.size = U8(tmp_size);
|
||||
cmd.cmd.pg.gr_init_param.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.gr_init_param.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
|
||||
cmd.cmd.pg.gr_init_param.featuremask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_RPPG;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_param_msg, pmu, &seq);
|
||||
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_ms_init_param);
|
||||
nvgpu_assert(tmp_size <= U64(U8_MAX));
|
||||
cmd.hdr.size = U8(tmp_size);
|
||||
cmd.cmd.pg.ms_init_param.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.ms_init_param.cmd_id =
|
||||
PMU_PG_PARAM_CMD_MS_INIT_PARAM;
|
||||
cmd.cmd.pg.ms_init_param.support_mask =
|
||||
NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING |
|
||||
NVGPU_PMU_MS_FEATURE_MASK_SW_ASR |
|
||||
NVGPU_PMU_MS_FEATURE_MASK_RPPG |
|
||||
NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_param_msg, pmu, &seq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_pg_stats_v2 stats;
|
||||
int err;
|
||||
|
||||
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
|
||||
pmu->pmu_pg.stat_dmem_offset[pg_engine_id],
|
||||
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
pg_stat_data->ingating_time = stats.total_sleep_time_us;
|
||||
pg_stat_data->ungating_time = stats.total_non_sleep_time_us;
|
||||
pg_stat_data->gating_cnt = stats.entry_count;
|
||||
pg_stat_data->avg_entry_latency_us = stats.entry_latency_avg_us;
|
||||
pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
u32 gp106_pmu_pg_engines_list(struct gk20a *g)
|
||||
{
|
||||
return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
|
||||
BIT32(PMU_PG_ELPG_ENGINE_ID_MS);
|
||||
}
|
||||
36
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h
Normal file
36
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_PG_SW_GP106_H
|
||||
#define NVGPU_PG_SW_GP106_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
struct pmu_pg_stats_data;
|
||||
|
||||
int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id);
|
||||
int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data);
|
||||
u32 gp106_pmu_pg_engines_list(struct gk20a *g);
|
||||
|
||||
#endif /* NVGPU_PG_SW_GP106_H */
|
||||
102
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c
Normal file
102
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "pg_sw_gp10b.h"
|
||||
|
||||
static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "GR PARAM cmd aborted");
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dbg(g, "GR PARAM is acknowledged from PMU %x",
|
||||
msg->msg.pg.msg_type);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
size_t tmp_size;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_gr_init_param_v2);
|
||||
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
|
||||
cmd.hdr.size = (u8)tmp_size;
|
||||
cmd.cmd.pg.gr_init_param_v2.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v2.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v2.featuremask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING;
|
||||
cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor =
|
||||
g->ldiv_slowdown_factor;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_PG_PARAM ");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_gr_param_msg, pmu, &seq);
|
||||
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_pg_stats_v1 stats;
|
||||
int err;
|
||||
|
||||
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
|
||||
pmu->pmu_pg.stat_dmem_offset[pg_engine_id],
|
||||
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
pg_stat_data->ingating_time = stats.total_sleep_timeus;
|
||||
pg_stat_data->ungating_time = stats.total_nonsleep_timeus;
|
||||
pg_stat_data->gating_cnt = stats.entry_count;
|
||||
pg_stat_data->avg_entry_latency_us = stats.entrylatency_avgus;
|
||||
pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus;
|
||||
|
||||
return err;
|
||||
}
|
||||
35
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h
Normal file
35
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_PG_SW_GP10B_H
|
||||
#define NVGPU_PG_SW_GP10B_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
struct pmu_pg_stats_data;
|
||||
|
||||
int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data);
|
||||
int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
|
||||
|
||||
#endif /* NVGPU_PG_SW_GP10B_H */
|
||||
128
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c
Normal file
128
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c
Normal file
@@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "pg_sw_gv11b.h"
|
||||
|
||||
static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "Sub-feature mask update cmd aborted");
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dbg(g, "sub-feature mask update is acknowledged from PMU %x",
|
||||
msg->msg.pg.msg_type);
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "GR PARAM cmd aborted");
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dbg(g, "GR PARAM is acknowledged from PMU %x",
|
||||
msg->msg.pg.msg_type);
|
||||
}
|
||||
|
||||
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
size_t tmp_size;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_gr_init_param_v1);
|
||||
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
|
||||
cmd.hdr.size = (u8)tmp_size;
|
||||
cmd.cmd.pg.gr_init_param_v1.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v1.featuremask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ALL;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_param_msg, pmu, &seq);
|
||||
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
size_t tmp_size;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
|
||||
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
|
||||
cmd.hdr.size = (u8)tmp_size;
|
||||
cmd.cmd.pg.sf_mask_update.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.sf_mask_update.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE;
|
||||
cmd.cmd.pg.sf_mask_update.ctrl_id =
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
cmd.cmd.pg.sf_mask_update.enabled_mask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_UNBIND |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_sub_feature_msg, pmu, &seq);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
33
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h
Normal file
33
drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_PG_SW_GV11B_H
|
||||
#define NVGPU_PG_SW_GV11B_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
|
||||
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
|
||||
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id);
|
||||
|
||||
#endif /* NVGPU_PG_SW_GV11B_H */
|
||||
@@ -21,17 +21,14 @@
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/clk_arb.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmu/lpwr.h>
|
||||
|
||||
#include "pmu_gk20a.h"
|
||||
#include "pmu_gm20b.h"
|
||||
#include "pmu_gp10b.h"
|
||||
#include "pmu_gp106.h"
|
||||
#include <nvgpu/hw/gp106/hw_psec_gp106.h>
|
||||
|
||||
#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
|
||||
|
||||
bool gp106_is_pmu_supported(struct gk20a *g)
|
||||
@@ -85,106 +82,6 @@ u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 gp106_pmu_pg_engines_list(struct gk20a *g)
|
||||
{
|
||||
return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
|
||||
BIT32(PMU_PG_ELPG_ENGINE_ID_MS);
|
||||
}
|
||||
|
||||
static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "PG PARAM cmd aborted");
|
||||
return;
|
||||
}
|
||||
|
||||
gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x",
|
||||
msg->msg.pg.msg_type);
|
||||
}
|
||||
|
||||
int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
int status;
|
||||
u64 tmp_size;
|
||||
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
|
||||
status = init_rppg(g);
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "RPPG init Failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_gr_init_param);
|
||||
nvgpu_assert(tmp_size <= U64(U8_MAX));
|
||||
cmd.hdr.size = U8(tmp_size);
|
||||
cmd.cmd.pg.gr_init_param.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.gr_init_param.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
|
||||
cmd.cmd.pg.gr_init_param.featuremask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_RPPG;
|
||||
|
||||
gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_param_msg, pmu, &seq);
|
||||
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_ms_init_param);
|
||||
nvgpu_assert(tmp_size <= U64(U8_MAX));
|
||||
cmd.hdr.size = U8(tmp_size);
|
||||
cmd.cmd.pg.ms_init_param.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.ms_init_param.cmd_id =
|
||||
PMU_PG_PARAM_CMD_MS_INIT_PARAM;
|
||||
cmd.cmd.pg.ms_init_param.support_mask =
|
||||
NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING |
|
||||
NVGPU_PMU_MS_FEATURE_MASK_SW_ASR |
|
||||
NVGPU_PMU_MS_FEATURE_MASK_RPPG |
|
||||
NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING;
|
||||
|
||||
gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_param_msg, pmu, &seq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_pg_stats_v2 stats;
|
||||
int err;
|
||||
|
||||
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
|
||||
pmu->pmu_pg.stat_dmem_offset[pg_engine_id],
|
||||
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
pg_stat_data->ingating_time = stats.total_sleep_time_us;
|
||||
pg_stat_data->ungating_time = stats.total_non_sleep_time_us;
|
||||
pg_stat_data->gating_cnt = stats.entry_count;
|
||||
pg_stat_data->avg_entry_latency_us = stats.entry_latency_avg_us;
|
||||
pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
|
||||
{
|
||||
bool is_feature_supported = false;
|
||||
@@ -215,7 +112,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
|
||||
nvgpu_pmu_dbg(g, "wprinit status = %x", g->pmu_lsf_pmu_wpr_init_done);
|
||||
if (g->pmu_lsf_pmu_wpr_init_done) {
|
||||
/* send message to load FECS falcon */
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
@@ -233,7 +130,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
|
||||
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0;
|
||||
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0;
|
||||
|
||||
gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
|
||||
nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x",
|
||||
falconidmask);
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_fecs_boot_acr_msg, pmu, &seq);
|
||||
|
||||
@@ -23,20 +23,15 @@
|
||||
#ifndef NVGPU_PMU_GP106_H
|
||||
#define NVGPU_PMU_GP106_H
|
||||
|
||||
#define gp106_dbg_pmu(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
|
||||
bool gp106_is_pmu_supported(struct gk20a *g);
|
||||
u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
|
||||
u32 gp106_pmu_pg_engines_list(struct gk20a *g);
|
||||
int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id);
|
||||
bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id);
|
||||
int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
|
||||
|
||||
int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data);
|
||||
bool gp106_pmu_is_engine_in_reset(struct gk20a *g);
|
||||
int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset);
|
||||
void gp106_update_lspmu_cmdline_args(struct gk20a *g);
|
||||
|
||||
@@ -24,21 +24,15 @@
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/fuse.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "common/pmu/pmu_gp10b.h"
|
||||
#include "pmu_gk20a.h"
|
||||
#include "pmu_gm20b.h"
|
||||
#include "pmu_gp10b.h"
|
||||
|
||||
#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
|
||||
|
||||
#define gp10b_dbg_pmu(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
|
||||
|
||||
/* PROD settings for ELPG sequencing registers*/
|
||||
static struct pg_init_sequence_list _pginitseq_gp10b[] = {
|
||||
{0x0010ab10U, 0x0000868BU} ,
|
||||
@@ -149,7 +143,7 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
|
||||
nvgpu_pmu_dbg(g, "wprinit status = %x", g->pmu_lsf_pmu_wpr_init_done);
|
||||
if (g->pmu_lsf_pmu_wpr_init_done) {
|
||||
/* send message to load FECS falcon */
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
@@ -166,7 +160,7 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
|
||||
cmd.cmd.acr.boot_falcons.usevamask = 0;
|
||||
cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0U;
|
||||
cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0U;
|
||||
gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
|
||||
nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x",
|
||||
falconidmask);
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_fecs_boot_acr_msg, pmu, &seq);
|
||||
@@ -213,81 +207,6 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "GR PARAM cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n",
|
||||
msg->msg.pg.msg_type);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
size_t tmp_size;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_gr_init_param_v2);
|
||||
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
|
||||
cmd.hdr.size = (u8)tmp_size;
|
||||
cmd.cmd.pg.gr_init_param_v2.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v2.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v2.featuremask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING;
|
||||
cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor =
|
||||
g->ldiv_slowdown_factor;
|
||||
|
||||
gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM ");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_gr_param_msg, pmu, &seq);
|
||||
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_pg_stats_v1 stats;
|
||||
int err;
|
||||
|
||||
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
|
||||
pmu->pmu_pg.stat_dmem_offset[pg_engine_id],
|
||||
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
pg_stat_data->ingating_time = stats.total_sleep_timeus;
|
||||
pg_stat_data->ungating_time = stats.total_nonsleep_timeus;
|
||||
pg_stat_data->gating_cnt = stats.entry_count;
|
||||
pg_stat_data->avg_entry_latency_us = stats.entrylatency_avgus;
|
||||
pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int gp10b_pmu_setup_elpg(struct gk20a *g)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@@ -25,14 +25,13 @@
|
||||
#ifndef NVGPU_PMU_GP10B_H
|
||||
#define NVGPU_PMU_GP10B_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
|
||||
bool gp10b_is_pmu_supported(struct gk20a *g);
|
||||
int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data);
|
||||
int gp10b_pmu_setup_elpg(struct gk20a *g);
|
||||
int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask);
|
||||
int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
|
||||
void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr);
|
||||
|
||||
#endif /* NVGPU_PMU_GP10B_H */
|
||||
|
||||
@@ -24,8 +24,6 @@
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/io.h>
|
||||
#include <nvgpu/utils.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
@@ -33,13 +31,9 @@
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "pmu_gp10b.h"
|
||||
#include "pmu_gp106.h"
|
||||
#include "pmu_gv11b.h"
|
||||
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
|
||||
|
||||
#define gv11b_dbg_pmu(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg)
|
||||
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
|
||||
|
||||
#define ALIGN_4KB 12
|
||||
|
||||
@@ -174,7 +168,6 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
|
||||
pwr_pmu_new_instblk_target_sys_coh_f() :
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f()));
|
||||
|
||||
/* TBD: load all other surfaces */
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
|
||||
pmu, GK20A_PMU_TRACE_BUFSIZE);
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
|
||||
@@ -402,107 +395,6 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g)
|
||||
return intr_dest;
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
|
||||
return;
|
||||
}
|
||||
|
||||
gv11b_dbg_pmu(g, "sub-feature mask update is acknowledged from PMU %x\n",
|
||||
msg->msg.pg.msg_type);
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0U) {
|
||||
nvgpu_err(g, "GR PARAM cmd aborted\n");
|
||||
return;
|
||||
}
|
||||
|
||||
gv11b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x\n",
|
||||
msg->msg.pg.msg_type);
|
||||
}
|
||||
|
||||
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
size_t tmp_size;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_gr_init_param_v1);
|
||||
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
|
||||
cmd.hdr.size = (u8)tmp_size;
|
||||
cmd.cmd.pg.gr_init_param_v1.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
|
||||
cmd.cmd.pg.gr_init_param_v1.featuremask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ALL;
|
||||
|
||||
gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_param_msg, pmu, &seq);
|
||||
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
size_t tmp_size;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
tmp_size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
|
||||
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
|
||||
cmd.hdr.size = (u8)tmp_size;
|
||||
cmd.cmd.pg.sf_mask_update.cmd_type =
|
||||
PMU_PG_CMD_ID_PG_PARAM;
|
||||
cmd.cmd.pg.sf_mask_update.sub_cmd_id =
|
||||
PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE;
|
||||
cmd.cmd.pg.sf_mask_update.ctrl_id =
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
cmd.cmd.pg.sf_mask_update.enabled_mask =
|
||||
NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_UNBIND |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC |
|
||||
NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG;
|
||||
|
||||
gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_sub_feature_msg, pmu, &seq);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gv11b_setup_apertures(struct gk20a *g)
|
||||
{
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
@@ -512,24 +404,24 @@ void gv11b_setup_apertures(struct gk20a *g)
|
||||
|
||||
/* setup apertures - virtual */
|
||||
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
nvgpu_aperture_mask(g, inst_block,
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_coherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_local_fb_f()));
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
nvgpu_aperture_mask(g, inst_block,
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_coherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_local_fb_f()));
|
||||
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
|
||||
pwr_fbif_transcfg_mem_type_virtual_f());
|
||||
pwr_fbif_transcfg_mem_type_virtual_f());
|
||||
/* setup apertures - physical */
|
||||
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
nvgpu_aperture_mask(g, inst_block,
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_coherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_local_fb_f()));
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
nvgpu_aperture_mask(g, inst_block,
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_coherent_sysmem_f(),
|
||||
pwr_fbif_transcfg_target_local_fb_f()));
|
||||
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
pwr_fbif_transcfg_target_coherent_sysmem_f());
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
pwr_fbif_transcfg_target_coherent_sysmem_f());
|
||||
gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
|
||||
pwr_fbif_transcfg_mem_type_physical_f() |
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
|
||||
}
|
||||
|
||||
@@ -25,15 +25,15 @@
|
||||
#ifndef NVGPU_PMU_GV11B_H
|
||||
#define NVGPU_PMU_GV11B_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
|
||||
bool gv11b_is_pmu_supported(struct gk20a *g);
|
||||
int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu);
|
||||
int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
|
||||
int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id);
|
||||
int gv11b_pmu_setup_elpg(struct gk20a *g);
|
||||
|
||||
u32 gv11b_pmu_get_irqdest(struct gk20a *g);
|
||||
void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0);
|
||||
void gv11b_setup_apertures(struct gk20a *g);
|
||||
|
||||
#endif /* NVGPU_PMU_GV11B_H */
|
||||
|
||||
@@ -84,6 +84,7 @@
|
||||
#include "common/pmu/pmu_gk20a.h"
|
||||
#include "common/pmu/pmu_gm20b.h"
|
||||
#include "common/pmu/pmu_gp10b.h"
|
||||
#include "common/pmu/pg/pg_sw_gp10b.h"
|
||||
#include "common/top/top_gm20b.h"
|
||||
#include "common/top/top_gp10b.h"
|
||||
#include "common/sync/syncpt_cmdbuf_gk20a.h"
|
||||
|
||||
@@ -93,6 +93,7 @@
|
||||
#include "common/pmu/pmu_gp106.h"
|
||||
#include "common/pmu/pmu_gv11b.h"
|
||||
#include "common/pmu/pmu_gv100.h"
|
||||
#include "common/pmu/pg/pg_sw_gp106.h"
|
||||
#include "common/nvdec/nvdec_gp106.h"
|
||||
#include "common/nvlink/init/device_reginit_gv100.h"
|
||||
#include "common/nvlink/intr_and_err_handling_gv100.h"
|
||||
|
||||
@@ -86,6 +86,8 @@
|
||||
#include "common/pmu/pmu_gp10b.h"
|
||||
#include "common/pmu/pmu_gp106.h"
|
||||
#include "common/pmu/pmu_gv11b.h"
|
||||
#include "common/pmu/pg/pg_sw_gp106.h"
|
||||
#include "common/pmu/pg/pg_sw_gv11b.h"
|
||||
#include "common/top/top_gm20b.h"
|
||||
#include "common/top/top_gp10b.h"
|
||||
#include "common/sync/syncpt_cmdbuf_gv11b.h"
|
||||
|
||||
@@ -97,6 +97,7 @@
|
||||
#include "common/pmu/pmu_gv100.h"
|
||||
#include "common/pmu/pmu_gv11b.h"
|
||||
#include "common/pmu/pmu_tu104.h"
|
||||
#include "common/pmu/pg/pg_sw_gp106.h"
|
||||
#include "common/nvdec/nvdec_tu104.h"
|
||||
#include "common/top/top_gm20b.h"
|
||||
#include "common/top/top_gp10b.h"
|
||||
|
||||
Reference in New Issue
Block a user