From 08f9184f34eb5cda6225bb024c5b1b818c223a3c Mon Sep 17 00:00:00 2001 From: Divya Singhatwaria Date: Wed, 20 Mar 2019 11:38:18 +0530 Subject: [PATCH] gpu: nvgpu: Move chip specific PG code to PG unit As part of PG unit refactoring move chip specific PG code from common/pmu/pmu_.c/.h files to common/pmu/pg folder Make new files such as pg_sw_gp106.c/.h, pg_sw_gp10b.c/.h and pg_sw_gv11b.c/.h for PG code. NVGPU-1973 Change-Id: I97fa2395e388559edc26be5d64bfbc547d6a3e22 Signed-off-by: Divya Singhatwaria Reviewed-on: https://git-master.nvidia.com/r/2077111 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/Makefile | 3 + drivers/gpu/nvgpu/Makefile.sources | 3 + drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c | 128 ++++++++++++++++ drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h | 36 +++++ drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c | 102 +++++++++++++ drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h | 35 +++++ drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c | 128 ++++++++++++++++ drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h | 33 +++++ drivers/gpu/nvgpu/common/pmu/pmu_gp106.c | 109 +------------- drivers/gpu/nvgpu/common/pmu/pmu_gp106.h | 7 +- drivers/gpu/nvgpu/common/pmu/pmu_gp10b.c | 87 +---------- drivers/gpu/nvgpu/common/pmu/pmu_gp10b.h | 5 +- drivers/gpu/nvgpu/common/pmu/pmu_gv11b.c | 140 ++---------------- drivers/gpu/nvgpu/common/pmu/pmu_gv11b.h | 6 +- drivers/gpu/nvgpu/gp10b/hal_gp10b.c | 1 + drivers/gpu/nvgpu/gv100/hal_gv100.c | 1 + drivers/gpu/nvgpu/gv11b/hal_gv11b.c | 2 + drivers/gpu/nvgpu/tu104/hal_tu104.c | 1 + 18 files changed, 501 insertions(+), 326 deletions(-) create mode 100644 drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c create mode 100644 drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h create mode 100644 drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c create mode 100644 drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h create mode 100644 drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c create mode 100644 drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index 53be9c337..371e5de61 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -106,6 +106,9 @@ nvgpu-y += \ common/pmu/pmu_gp10b.o \ common/pmu/pmu_gp106.o \ common/pmu/pmu_gv11b.o \ + common/pmu/pg/pg_sw_gp10b.o \ + common/pmu/pg/pg_sw_gp106.o \ + common/pmu/pg/pg_sw_gv11b.o \ common/pmu/pmu_gv100.o \ common/pmu/pmu_tu104.o \ common/acr/acr.o \ diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index b6bea3ff3..c11d2b784 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -138,6 +138,9 @@ srcs += common/sim.c \ common/pmu/pmu_gp10b.c \ common/pmu/pmu_gp106.c \ common/pmu/pmu_gv11b.c \ + common/pmu/pg/pg_sw_gp10b.c \ + common/pmu/pg/pg_sw_gp106.c \ + common/pmu/pg/pg_sw_gv11b.c \ common/pmu/pmu_gv100.c \ common/pmu/pmu_tu104.c \ common/acr/acr.c \ diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c new file mode 100644 index 000000000..76d99a68c --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "pg_sw_gp106.h" + +static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + nvgpu_log_fn(g, " "); + + if (status != 0U) { + nvgpu_err(g, "PG PARAM cmd aborted"); + return; + } + + nvgpu_pmu_dbg(g, "PG PARAM is acknowledged from PMU %x", + msg->msg.pg.msg_type); +} + +int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_cmd cmd; + u32 seq; + int status; + u64 tmp_size; + + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { + + status = init_rppg(g); + if (status != 0) { + nvgpu_err(g, "RPPG init Failed"); + return -1; + } + + cmd.hdr.unit_id = PMU_UNIT_PG; + tmp_size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_gr_init_param); + nvgpu_assert(tmp_size <= U64(U8_MAX)); + cmd.hdr.size = U8(tmp_size); + cmd.cmd.pg.gr_init_param.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.gr_init_param.sub_cmd_id = + PMU_PG_PARAM_CMD_GR_INIT_PARAM; + cmd.cmd.pg.gr_init_param.featuremask = + NVGPU_PMU_GR_FEATURE_MASK_RPPG; + + nvgpu_pmu_dbg(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM"); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_param_msg, pmu, &seq); + } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { + cmd.hdr.unit_id = PMU_UNIT_PG; + tmp_size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_ms_init_param); + nvgpu_assert(tmp_size <= U64(U8_MAX)); + cmd.hdr.size = U8(tmp_size); + cmd.cmd.pg.ms_init_param.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.ms_init_param.cmd_id = + PMU_PG_PARAM_CMD_MS_INIT_PARAM; + cmd.cmd.pg.ms_init_param.support_mask = + NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING | + NVGPU_PMU_MS_FEATURE_MASK_SW_ASR | + NVGPU_PMU_MS_FEATURE_MASK_RPPG | + NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; + + nvgpu_pmu_dbg(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM"); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_param_msg, pmu, &seq); + } + + return 0; +} + +int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, + struct pmu_pg_stats_data *pg_stat_data) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_pg_stats_v2 stats; + int err; + + err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + pmu->pmu_pg.stat_dmem_offset[pg_engine_id], + (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0); + if (err != 0) { + nvgpu_err(g, "PMU falcon DMEM copy failed"); + return err; + } + + pg_stat_data->ingating_time = stats.total_sleep_time_us; + pg_stat_data->ungating_time = stats.total_non_sleep_time_us; + pg_stat_data->gating_cnt = stats.entry_count; + pg_stat_data->avg_entry_latency_us = stats.entry_latency_avg_us; + pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us; + + return err; +} + +u32 gp106_pmu_pg_engines_list(struct gk20a *g) +{ + return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | + BIT32(PMU_PG_ELPG_ENGINE_ID_MS); +} diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h new file mode 100644 index 000000000..b8200ad76 --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp106.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_PG_SW_GP106_H +#define NVGPU_PG_SW_GP106_H + +#include + +struct gk20a; +struct pmu_pg_stats_data; + +int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id); +int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, + struct pmu_pg_stats_data *pg_stat_data); +u32 gp106_pmu_pg_engines_list(struct gk20a *g); + +#endif /* NVGPU_PG_SW_GP106_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c new file mode 100644 index 000000000..368f6cb0f --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "pg_sw_gp10b.h" + +static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + nvgpu_log_fn(g, " "); + + if (status != 0U) { + nvgpu_err(g, "GR PARAM cmd aborted"); + return; + } + + nvgpu_pmu_dbg(g, "GR PARAM is acknowledged from PMU %x", + msg->msg.pg.msg_type); + + return; +} + +int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_cmd cmd; + u32 seq; + size_t tmp_size; + + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); + cmd.hdr.unit_id = PMU_UNIT_PG; + tmp_size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_gr_init_param_v2); + nvgpu_assert(tmp_size <= (size_t)U8_MAX); + cmd.hdr.size = (u8)tmp_size; + cmd.cmd.pg.gr_init_param_v2.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.gr_init_param_v2.sub_cmd_id = + PMU_PG_PARAM_CMD_GR_INIT_PARAM; + cmd.cmd.pg.gr_init_param_v2.featuremask = + NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; + cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = + g->ldiv_slowdown_factor; + + nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_PG_PARAM "); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_gr_param_msg, pmu, &seq); + + } else { + return -EINVAL; + } + + return 0; +} + +int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, + struct pmu_pg_stats_data *pg_stat_data) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_pg_stats_v1 stats; + int err; + + err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, + pmu->pmu_pg.stat_dmem_offset[pg_engine_id], + (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0); + if (err != 0) { + nvgpu_err(g, "PMU falcon DMEM copy failed"); + return err; + } + + pg_stat_data->ingating_time = stats.total_sleep_timeus; + pg_stat_data->ungating_time = stats.total_nonsleep_timeus; + pg_stat_data->gating_cnt = stats.entry_count; + pg_stat_data->avg_entry_latency_us = stats.entrylatency_avgus; + pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus; + + return err; +} diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h new file mode 100644 index 000000000..47ca21cac --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gp10b.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_PG_SW_GP10B_H +#define NVGPU_PG_SW_GP10B_H + +#include + +struct gk20a; +struct pmu_pg_stats_data; + +int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, + struct pmu_pg_stats_data *pg_stat_data); +int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); + +#endif /* NVGPU_PG_SW_GP10B_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c new file mode 100644 index 000000000..d0604bd77 --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "pg_sw_gv11b.h" + +static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + nvgpu_log_fn(g, " "); + + if (status != 0U) { + nvgpu_err(g, "Sub-feature mask update cmd aborted"); + return; + } + + nvgpu_pmu_dbg(g, "sub-feature mask update is acknowledged from PMU %x", + msg->msg.pg.msg_type); +} + +static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + nvgpu_log_fn(g, " "); + + if (status != 0U) { + nvgpu_err(g, "GR PARAM cmd aborted"); + return; + } + + nvgpu_pmu_dbg(g, "GR PARAM is acknowledged from PMU %x", + msg->msg.pg.msg_type); +} + +int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_cmd cmd; + u32 seq; + size_t tmp_size; + + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); + cmd.hdr.unit_id = PMU_UNIT_PG; + tmp_size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_gr_init_param_v1); + nvgpu_assert(tmp_size <= (size_t)U8_MAX); + cmd.hdr.size = (u8)tmp_size; + cmd.cmd.pg.gr_init_param_v1.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.gr_init_param_v1.sub_cmd_id = + PMU_PG_PARAM_CMD_GR_INIT_PARAM; + cmd.cmd.pg.gr_init_param_v1.featuremask = + NVGPU_PMU_GR_FEATURE_MASK_ALL; + + nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT"); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_pg_param_msg, pmu, &seq); + + } else { + return -EINVAL; + } + + return 0; +} + +int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_cmd cmd; + u32 seq; + size_t tmp_size; + + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { + (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); + cmd.hdr.unit_id = PMU_UNIT_PG; + tmp_size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_sub_feature_mask_update); + nvgpu_assert(tmp_size <= (size_t)U8_MAX); + cmd.hdr.size = (u8)tmp_size; + cmd.cmd.pg.sf_mask_update.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.sf_mask_update.sub_cmd_id = + PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE; + cmd.cmd.pg.sf_mask_update.ctrl_id = + PMU_PG_ELPG_ENGINE_ID_GRAPHICS; + cmd.cmd.pg.sf_mask_update.enabled_mask = + NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING | + NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING | + NVGPU_PMU_GR_FEATURE_MASK_UNBIND | + NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE | + NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY | + NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE | + NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM | + NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC | + NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG; + + nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE"); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_pg_sub_feature_msg, pmu, &seq); + } else { + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h new file mode 100644 index 000000000..80a2116a7 --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pg/pg_sw_gv11b.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_PG_SW_GV11B_H +#define NVGPU_PG_SW_GV11B_H + +#include + +struct gk20a; + +int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); +int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id); + +#endif /* NVGPU_PG_SW_GV11B_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gp106.c b/drivers/gpu/nvgpu/common/pmu/pmu_gp106.c index 5b567e21a..bb0e40093 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gp106.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gp106.c @@ -21,17 +21,14 @@ */ #include -#include #include #include #include #include #include "pmu_gk20a.h" -#include "pmu_gm20b.h" -#include "pmu_gp10b.h" #include "pmu_gp106.h" -#include + #include bool gp106_is_pmu_supported(struct gk20a *g) @@ -85,106 +82,6 @@ u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) return 0; } -u32 gp106_pmu_pg_engines_list(struct gk20a *g) -{ - return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | - BIT32(PMU_PG_ELPG_ENGINE_ID_MS); -} - -static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, - void *param, u32 handle, u32 status) -{ - nvgpu_log_fn(g, " "); - - if (status != 0U) { - nvgpu_err(g, "PG PARAM cmd aborted"); - return; - } - - gp106_dbg_pmu(g, "PG PARAM is acknowledged from PMU %x", - msg->msg.pg.msg_type); -} - -int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_cmd cmd; - u32 seq; - int status; - u64 tmp_size; - - (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { - - status = init_rppg(g); - if (status != 0) { - nvgpu_err(g, "RPPG init Failed"); - return -1; - } - - cmd.hdr.unit_id = PMU_UNIT_PG; - tmp_size = PMU_CMD_HDR_SIZE + - sizeof(struct pmu_pg_cmd_gr_init_param); - nvgpu_assert(tmp_size <= U64(U8_MAX)); - cmd.hdr.size = U8(tmp_size); - cmd.cmd.pg.gr_init_param.cmd_type = - PMU_PG_CMD_ID_PG_PARAM; - cmd.cmd.pg.gr_init_param.sub_cmd_id = - PMU_PG_PARAM_CMD_GR_INIT_PARAM; - cmd.cmd.pg.gr_init_param.featuremask = - NVGPU_PMU_GR_FEATURE_MASK_RPPG; - - gp106_dbg_pmu(g, "cmd post GR PMU_PG_CMD_ID_PG_PARAM"); - nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, - pmu_handle_param_msg, pmu, &seq); - } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { - cmd.hdr.unit_id = PMU_UNIT_PG; - tmp_size = PMU_CMD_HDR_SIZE + - sizeof(struct pmu_pg_cmd_ms_init_param); - nvgpu_assert(tmp_size <= U64(U8_MAX)); - cmd.hdr.size = U8(tmp_size); - cmd.cmd.pg.ms_init_param.cmd_type = - PMU_PG_CMD_ID_PG_PARAM; - cmd.cmd.pg.ms_init_param.cmd_id = - PMU_PG_PARAM_CMD_MS_INIT_PARAM; - cmd.cmd.pg.ms_init_param.support_mask = - NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING | - NVGPU_PMU_MS_FEATURE_MASK_SW_ASR | - NVGPU_PMU_MS_FEATURE_MASK_RPPG | - NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; - - gp106_dbg_pmu(g, "cmd post MS PMU_PG_CMD_ID_PG_PARAM"); - nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, - pmu_handle_param_msg, pmu, &seq); - } - - return 0; -} - -int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, - struct pmu_pg_stats_data *pg_stat_data) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_pg_stats_v2 stats; - int err; - - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, - pmu->pmu_pg.stat_dmem_offset[pg_engine_id], - (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0); - if (err != 0) { - nvgpu_err(g, "PMU falcon DMEM copy failed"); - return err; - } - - pg_stat_data->ingating_time = stats.total_sleep_time_us; - pg_stat_data->ungating_time = stats.total_non_sleep_time_us; - pg_stat_data->gating_cnt = stats.entry_count; - pg_stat_data->avg_entry_latency_us = stats.entry_latency_avg_us; - pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us; - - return err; -} - bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) { bool is_feature_supported = false; @@ -215,7 +112,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, nvgpu_log_fn(g, " "); - gp106_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + nvgpu_pmu_dbg(g, "wprinit status = %x", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -233,7 +130,7 @@ static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; - gp106_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", + nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x", falconidmask); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gp106.h b/drivers/gpu/nvgpu/common/pmu/pmu_gp106.h index fd684a7ec..2966aa8e4 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gp106.h +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gp106.h @@ -23,20 +23,15 @@ #ifndef NVGPU_PMU_GP106_H #define NVGPU_PMU_GP106_H -#define gp106_dbg_pmu(g, fmt, arg...) \ - nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) +#include struct gk20a; bool gp106_is_pmu_supported(struct gk20a *g); u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id); -u32 gp106_pmu_pg_engines_list(struct gk20a *g); -int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id); bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id); int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask); -int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, - struct pmu_pg_stats_data *pg_stat_data); bool gp106_pmu_is_engine_in_reset(struct gk20a *g); int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset); void gp106_update_lspmu_cmdline_args(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.c b/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.c index cd27e2897..5db27b1c0 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.c @@ -24,21 +24,15 @@ #include #include -#include -#include #include #include #include +#include "common/pmu/pmu_gp10b.h" #include "pmu_gk20a.h" -#include "pmu_gm20b.h" -#include "pmu_gp10b.h" #include -#define gp10b_dbg_pmu(g, fmt, arg...) \ - nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) - /* PROD settings for ELPG sequencing registers*/ static struct pg_init_sequence_list _pginitseq_gp10b[] = { {0x0010ab10U, 0x0000868BU} , @@ -149,7 +143,7 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, nvgpu_log_fn(g, " "); - gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); + nvgpu_pmu_dbg(g, "wprinit status = %x", g->pmu_lsf_pmu_wpr_init_done); if (g->pmu_lsf_pmu_wpr_init_done) { /* send message to load FECS falcon */ (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); @@ -166,7 +160,7 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, cmd.cmd.acr.boot_falcons.usevamask = 0; cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0U; cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0U; - gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", + nvgpu_pmu_dbg(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x", falconidmask); nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, pmu_handle_fecs_boot_acr_msg, pmu, &seq); @@ -213,81 +207,6 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask) return 0; } -static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, - void *param, u32 handle, u32 status) -{ - nvgpu_log_fn(g, " "); - - if (status != 0U) { - nvgpu_err(g, "GR PARAM cmd aborted"); - /* TBD: disable ELPG */ - return; - } - - gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n", - msg->msg.pg.msg_type); - - return; -} - -int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_cmd cmd; - u32 seq; - size_t tmp_size; - - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { - (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); - cmd.hdr.unit_id = PMU_UNIT_PG; - tmp_size = PMU_CMD_HDR_SIZE + - sizeof(struct pmu_pg_cmd_gr_init_param_v2); - nvgpu_assert(tmp_size <= (size_t)U8_MAX); - cmd.hdr.size = (u8)tmp_size; - cmd.cmd.pg.gr_init_param_v2.cmd_type = - PMU_PG_CMD_ID_PG_PARAM; - cmd.cmd.pg.gr_init_param_v2.sub_cmd_id = - PMU_PG_PARAM_CMD_GR_INIT_PARAM; - cmd.cmd.pg.gr_init_param_v2.featuremask = - NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; - cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = - g->ldiv_slowdown_factor; - - gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM "); - nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, - pmu_handle_gr_param_msg, pmu, &seq); - - } else { - return -EINVAL; - } - - return 0; -} - -int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, - struct pmu_pg_stats_data *pg_stat_data) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_pg_stats_v1 stats; - int err; - - err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, - pmu->pmu_pg.stat_dmem_offset[pg_engine_id], - (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0); - if (err != 0) { - nvgpu_err(g, "PMU falcon DMEM copy failed"); - return err; - } - - pg_stat_data->ingating_time = stats.total_sleep_timeus; - pg_stat_data->ungating_time = stats.total_nonsleep_timeus; - pg_stat_data->gating_cnt = stats.entry_count; - pg_stat_data->avg_entry_latency_us = stats.entrylatency_avgus; - pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus; - - return err; -} - int gp10b_pmu_setup_elpg(struct gk20a *g) { int ret = 0; diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.h b/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.h index a19b6cd6b..82dae5109 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.h +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gp10b.h @@ -25,14 +25,13 @@ #ifndef NVGPU_PMU_GP10B_H #define NVGPU_PMU_GP10B_H +#include + struct gk20a; bool gp10b_is_pmu_supported(struct gk20a *g); -int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, - struct pmu_pg_stats_data *pg_stat_data); int gp10b_pmu_setup_elpg(struct gk20a *g); int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask); -int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr); #endif /* NVGPU_PMU_GP10B_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.c b/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.c index 71777d1cb..a34561fd7 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.c @@ -24,8 +24,6 @@ #include #include -#include -#include #include #include #include @@ -33,13 +31,9 @@ #include #include -#include "pmu_gp10b.h" -#include "pmu_gp106.h" #include "pmu_gv11b.h" -#include -#define gv11b_dbg_pmu(g, fmt, arg...) \ - nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) +#include #define ALIGN_4KB 12 @@ -174,7 +168,6 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu) pwr_pmu_new_instblk_target_sys_coh_f() : pwr_pmu_new_instblk_target_sys_ncoh_f())); - /* TBD: load all other surfaces */ g->ops.pmu_ver.set_pmu_cmdline_args_trace_size( pmu, GK20A_PMU_TRACE_BUFSIZE); g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); @@ -402,107 +395,6 @@ u32 gv11b_pmu_get_irqdest(struct gk20a *g) return intr_dest; } -static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, - void *param, u32 handle, u32 status) -{ - nvgpu_log_fn(g, " "); - - if (status != 0U) { - nvgpu_err(g, "Sub-feature mask update cmd aborted\n"); - return; - } - - gv11b_dbg_pmu(g, "sub-feature mask update is acknowledged from PMU %x\n", - msg->msg.pg.msg_type); -} - -static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, - void *param, u32 handle, u32 status) -{ - nvgpu_log_fn(g, " "); - - if (status != 0U) { - nvgpu_err(g, "GR PARAM cmd aborted\n"); - return; - } - - gv11b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x\n", - msg->msg.pg.msg_type); -} - -int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_cmd cmd; - u32 seq; - size_t tmp_size; - - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { - (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); - cmd.hdr.unit_id = PMU_UNIT_PG; - tmp_size = PMU_CMD_HDR_SIZE + - sizeof(struct pmu_pg_cmd_gr_init_param_v1); - nvgpu_assert(tmp_size <= (size_t)U8_MAX); - cmd.hdr.size = (u8)tmp_size; - cmd.cmd.pg.gr_init_param_v1.cmd_type = - PMU_PG_CMD_ID_PG_PARAM; - cmd.cmd.pg.gr_init_param_v1.sub_cmd_id = - PMU_PG_PARAM_CMD_GR_INIT_PARAM; - cmd.cmd.pg.gr_init_param_v1.featuremask = - NVGPU_PMU_GR_FEATURE_MASK_ALL; - - gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); - nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, - pmu_handle_pg_param_msg, pmu, &seq); - - } else { - return -EINVAL; - } - - return 0; -} - -int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct pmu_cmd cmd; - u32 seq; - size_t tmp_size; - - if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { - (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); - cmd.hdr.unit_id = PMU_UNIT_PG; - tmp_size = PMU_CMD_HDR_SIZE + - sizeof(struct pmu_pg_cmd_sub_feature_mask_update); - nvgpu_assert(tmp_size <= (size_t)U8_MAX); - cmd.hdr.size = (u8)tmp_size; - cmd.cmd.pg.sf_mask_update.cmd_type = - PMU_PG_CMD_ID_PG_PARAM; - cmd.cmd.pg.sf_mask_update.sub_cmd_id = - PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE; - cmd.cmd.pg.sf_mask_update.ctrl_id = - PMU_PG_ELPG_ENGINE_ID_GRAPHICS; - cmd.cmd.pg.sf_mask_update.enabled_mask = - NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING | - NVGPU_PMU_GR_FEATURE_MASK_PRIV_RING | - NVGPU_PMU_GR_FEATURE_MASK_UNBIND | - NVGPU_PMU_GR_FEATURE_MASK_SAVE_GLOBAL_STATE | - NVGPU_PMU_GR_FEATURE_MASK_RESET_ENTRY | - NVGPU_PMU_GR_FEATURE_MASK_HW_SEQUENCE | - NVGPU_PMU_GR_FEATURE_MASK_ELPG_SRAM | - NVGPU_PMU_GR_FEATURE_MASK_ELPG_LOGIC | - NVGPU_PMU_GR_FEATURE_MASK_ELPG_L2RPPG; - - gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); - nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, - pmu_handle_pg_sub_feature_msg, pmu, &seq); - } else { - return -EINVAL; - } - - return 0; -} - void gv11b_setup_apertures(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; @@ -512,24 +404,24 @@ void gv11b_setup_apertures(struct gk20a *g) /* setup apertures - virtual */ gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), - pwr_fbif_transcfg_mem_type_physical_f() | - nvgpu_aperture_mask(g, inst_block, - pwr_fbif_transcfg_target_noncoherent_sysmem_f(), - pwr_fbif_transcfg_target_coherent_sysmem_f(), - pwr_fbif_transcfg_target_local_fb_f())); + pwr_fbif_transcfg_mem_type_physical_f() | + nvgpu_aperture_mask(g, inst_block, + pwr_fbif_transcfg_target_noncoherent_sysmem_f(), + pwr_fbif_transcfg_target_coherent_sysmem_f(), + pwr_fbif_transcfg_target_local_fb_f())); gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT), - pwr_fbif_transcfg_mem_type_virtual_f()); + pwr_fbif_transcfg_mem_type_virtual_f()); /* setup apertures - physical */ gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID), - pwr_fbif_transcfg_mem_type_physical_f() | - nvgpu_aperture_mask(g, inst_block, - pwr_fbif_transcfg_target_noncoherent_sysmem_f(), - pwr_fbif_transcfg_target_coherent_sysmem_f(), - pwr_fbif_transcfg_target_local_fb_f())); + pwr_fbif_transcfg_mem_type_physical_f() | + nvgpu_aperture_mask(g, inst_block, + pwr_fbif_transcfg_target_noncoherent_sysmem_f(), + pwr_fbif_transcfg_target_coherent_sysmem_f(), + pwr_fbif_transcfg_target_local_fb_f())); gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH), - pwr_fbif_transcfg_mem_type_physical_f() | - pwr_fbif_transcfg_target_coherent_sysmem_f()); + pwr_fbif_transcfg_mem_type_physical_f() | + pwr_fbif_transcfg_target_coherent_sysmem_f()); gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH), - pwr_fbif_transcfg_mem_type_physical_f() | - pwr_fbif_transcfg_target_noncoherent_sysmem_f()); + pwr_fbif_transcfg_mem_type_physical_f() | + pwr_fbif_transcfg_target_noncoherent_sysmem_f()); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.h b/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.h index d941c7670..8a889cdd0 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.h +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gv11b.h @@ -25,15 +25,15 @@ #ifndef NVGPU_PMU_GV11B_H #define NVGPU_PMU_GV11B_H +#include + struct gk20a; bool gv11b_is_pmu_supported(struct gk20a *g); int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu); -int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); -int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id); int gv11b_pmu_setup_elpg(struct gk20a *g); - u32 gv11b_pmu_get_irqdest(struct gk20a *g); void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0); void gv11b_setup_apertures(struct gk20a *g); + #endif /* NVGPU_PMU_GV11B_H */ diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c index ca9966d29..77163b636 100644 --- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c @@ -84,6 +84,7 @@ #include "common/pmu/pmu_gk20a.h" #include "common/pmu/pmu_gm20b.h" #include "common/pmu/pmu_gp10b.h" +#include "common/pmu/pg/pg_sw_gp10b.h" #include "common/top/top_gm20b.h" #include "common/top/top_gp10b.h" #include "common/sync/syncpt_cmdbuf_gk20a.h" diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c index 14e8e0df0..0548065a5 100644 --- a/drivers/gpu/nvgpu/gv100/hal_gv100.c +++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c @@ -93,6 +93,7 @@ #include "common/pmu/pmu_gp106.h" #include "common/pmu/pmu_gv11b.h" #include "common/pmu/pmu_gv100.h" +#include "common/pmu/pg/pg_sw_gp106.h" #include "common/nvdec/nvdec_gp106.h" #include "common/nvlink/init/device_reginit_gv100.h" #include "common/nvlink/intr_and_err_handling_gv100.h" diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c index f5ed0c085..4a88f667e 100644 --- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c @@ -86,6 +86,8 @@ #include "common/pmu/pmu_gp10b.h" #include "common/pmu/pmu_gp106.h" #include "common/pmu/pmu_gv11b.h" +#include "common/pmu/pg/pg_sw_gp106.h" +#include "common/pmu/pg/pg_sw_gv11b.h" #include "common/top/top_gm20b.h" #include "common/top/top_gp10b.h" #include "common/sync/syncpt_cmdbuf_gv11b.h" diff --git a/drivers/gpu/nvgpu/tu104/hal_tu104.c b/drivers/gpu/nvgpu/tu104/hal_tu104.c index ed3b0c909..643508e14 100644 --- a/drivers/gpu/nvgpu/tu104/hal_tu104.c +++ b/drivers/gpu/nvgpu/tu104/hal_tu104.c @@ -97,6 +97,7 @@ #include "common/pmu/pmu_gv100.h" #include "common/pmu/pmu_gv11b.h" #include "common/pmu/pmu_tu104.h" +#include "common/pmu/pg/pg_sw_gp106.h" #include "common/nvdec/nvdec_tu104.h" #include "common/top/top_gm20b.h" #include "common/top/top_gp10b.h"