diff --git a/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c b/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c index 287a1da88..c37e33e52 100644 --- a/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c +++ b/drivers/gpu/nvgpu/common/clk_arb/clk_arb.c @@ -37,6 +37,7 @@ #include #include #include +#include #include int nvgpu_clk_notification_queue_alloc(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c b/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c index f119e5a4b..a2d0981b8 100644 --- a/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c +++ b/drivers/gpu/nvgpu/common/clk_arb/clk_arb_gv100.c @@ -24,10 +24,11 @@ #include #include #include +#include +#include #include #include "clk_arb_gv100.h" -#include "common/pmu/clk/clk.h" bool gv100_check_clk_arb_support(struct gk20a *g) { @@ -58,7 +59,7 @@ int gv100_get_arbiter_clk_range(struct gk20a *g, u32 api_domain, { u32 clkwhich; struct clk_set_info *p0_info; - struct nvgpu_avfsfllobjs *pfllobjs = &(g->clk_pmu->avfs_fllobjs); + struct nvgpu_avfsfllobjs *pfllobjs = g->clk_pmu->avfs_fllobjs; u16 limit_min_mhz; bool error_status = false; diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk.c b/drivers/gpu/nvgpu/common/pmu/clk/clk.c index 7f170acff..03a4ff146 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk.c @@ -29,30 +29,21 @@ #include #include #include +#include #include -#include - -#include "clk.h" #include -#define BOOT_MCLK_MHZ 3003U - -struct clkrpc_pmucmdhandler_params { - struct nv_pmu_clk_rpc *prpccall; - u32 success; -}; - -static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, - void *param, u32 handle, u32 status) +void nvgpu_clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) { - struct clkrpc_pmucmdhandler_params *phandlerparams = - (struct clkrpc_pmucmdhandler_params *)param; + struct nvgpu_clkrpc_pmucmdhandler_params *phandlerparams = + (struct nvgpu_clkrpc_pmucmdhandler_params *)param; nvgpu_log_info(g, " "); if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { - nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x", - msg->msg.clk.msg_type); + nvgpu_err(g, "unsupported msg for CLK LOAD RPC %x", + msg->msg.clk.msg_type); return; } @@ -61,408 +52,31 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, } } - -int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload) +int nvgpu_clk_domain_freq_to_volt(struct gk20a *g, u8 clkdomain_idx, + u32 *pclkmhz, u32 *pvoltuv, u8 railidx) { - struct pmu_cmd cmd; - struct pmu_payload payload; - int status; - u32 seqdesc; - struct nv_pmu_clk_rpc rpccall; - struct clkrpc_pmucmdhandler_params handler; - struct nv_pmu_clk_load *clkload; + struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt rpc; + struct nvgpu_pmu *pmu = &g->pmu; + int status = -EINVAL; - (void) memset(&payload, 0, sizeof(struct pmu_payload)); - (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); - (void) memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); - (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); - - rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; - clkload = &rpccall.params.clk_load; - clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG; - clkload->action_mask = bload ? - NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES : - NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO; - - cmd.hdr.unit_id = PMU_UNIT_CLK; - cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + - (u32)sizeof(struct pmu_hdr); - - cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; - - payload.in.buf = (u8 *)&rpccall; - payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); - payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; - - payload.out.buf = (u8 *)&rpccall; - payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; - - handler.prpccall = &rpccall; - handler.success = 0; - - status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, - PMU_COMMAND_QUEUE_LPQ, - clkrpc_pmucmdhandler, (void *)&handler, - &seqdesc); + (void)memset(&rpc, 0, + sizeof(struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt)); + rpc.volt_rail_idx = + nvgpu_volt_rail_volt_domain_convert_to_idx(g, railidx); + rpc.clk_domain_idx = clkdomain_idx; + rpc.voltage_type = CTRL_VOLT_DOMAIN_LOGIC; + rpc.input.value = *pclkmhz; + PMU_RPC_EXECUTE_CPB(status, pmu, CLK, + CLK_DOMAIN_35_PROG_FREQ_TO_VOLT, &rpc, 0); if (status != 0) { - nvgpu_err(g, "unable to post clk RPC cmd %x", - cmd.cmd.clk.cmd_type); - goto done; + nvgpu_err(g, "Failed to execute Freq to Volt RPC status=0x%x", + status); } - - pmu_wait_message_cond(&g->pmu, - nvgpu_get_poll_timeout(g), - &handler.success, 1); - if (handler.success == 0U) { - nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed"); - status = -EINVAL; - } - -done: + *pvoltuv = rpc.output.value; return status; } -int clk_freq_effective_avg(struct gk20a *g, u32 *freqkHz, u32 clkDomainMask) { - - struct pmu_cmd cmd; - struct pmu_payload payload; - int status = 0; - u32 seqdesc; - struct nv_pmu_clk_rpc rpccall; - struct clkrpc_pmucmdhandler_params handler; - struct nv_pmu_clk_freq_effective_avg *clk_freq_effective_avg; - - (void) memset(&payload, 0, sizeof(struct pmu_payload)); - (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); - (void) memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); - (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); - - rpccall.function = NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG; - clk_freq_effective_avg = &rpccall.params.clk_freq_effective_avg; - clk_freq_effective_avg->clkDomainMask = clkDomainMask; - - cmd.hdr.unit_id = PMU_UNIT_CLK; - cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + - (u32)sizeof(struct pmu_hdr); - - cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; - - payload.in.buf = (u8 *)&rpccall; - payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); - payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; - - payload.out.buf = (u8 *)&rpccall; - payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; - - handler.prpccall = &rpccall; - handler.success = 0; - - status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, - PMU_COMMAND_QUEUE_LPQ, - clkrpc_pmucmdhandler, (void *)&handler, - &seqdesc); - if (status != 0) { - nvgpu_err(g, "unable to post clk RPC cmd %x", - cmd.cmd.clk.cmd_type); - goto done; - } - - pmu_wait_message_cond(&g->pmu, - nvgpu_get_poll_timeout(g), - &handler.success, 1); - if (handler.success == 0U) { - nvgpu_err(g, "rpc call to get clk frequency average failed"); - status = -EINVAL; - goto done; - } - - *freqkHz = rpccall.params.clk_freq_effective_avg.freqkHz[clkDomainMask]; - -done: - return status; -} - -int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) -{ - struct pmu_cmd cmd; - struct pmu_payload payload; - int status; - u32 seqdesc; - struct nv_pmu_clk_rpc rpccall; - struct clkrpc_pmucmdhandler_params handler; - struct nv_pmu_clk_load *clkload; - struct nvgpu_clk_freq_controllers *pclk_freq_controllers; - struct ctrl_boardobjgrp_mask_e32 *load_mask; - struct boardobjgrpmask_e32 isolate_cfc_mask; - - (void) memset(&payload, 0, sizeof(struct pmu_payload)); - (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); - (void) memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); - - pclk_freq_controllers = &g->clk_pmu->clk_freq_controllers; - rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; - clkload = &rpccall.params.clk_load; - clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER; - clkload->action_mask = bload ? - NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_YES : - NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_NO; - - load_mask = &rpccall.params.clk_load.payload.freq_controllers.load_mask; - - status = boardobjgrpmask_e32_init(&isolate_cfc_mask, NULL); - - if (bit_idx == CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL) { - status = boardobjgrpmask_export( - &pclk_freq_controllers-> - freq_ctrl_load_mask.super, - pclk_freq_controllers-> - freq_ctrl_load_mask.super.bitcount, - &load_mask->super); - - - } else { - status = boardobjgrpmask_bitset(&isolate_cfc_mask.super, - bit_idx); - status = boardobjgrpmask_export(&isolate_cfc_mask.super, - isolate_cfc_mask.super.bitcount, - &load_mask->super); - if (bload) { - status = boardobjgrpmask_bitset( - &pclk_freq_controllers-> - freq_ctrl_load_mask.super, - bit_idx); - } else { - status = boardobjgrpmask_bitclr( - &pclk_freq_controllers-> - freq_ctrl_load_mask.super, - bit_idx); - } - } - - if (status != 0) { - nvgpu_err(g, "Error in generating mask used to select CFC"); - goto done; - } - - cmd.hdr.unit_id = PMU_UNIT_CLK; - cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + - (u32)sizeof(struct pmu_hdr); - - cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; - - payload.in.buf = (u8 *)&rpccall; - payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); - payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; - - payload.out.buf = (u8 *)&rpccall; - payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; - - handler.prpccall = &rpccall; - handler.success = 0; - status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, - PMU_COMMAND_QUEUE_LPQ, - clkrpc_pmucmdhandler, (void *)&handler, - &seqdesc); - - if (status != 0) { - nvgpu_err(g, "unable to post clk RPC cmd %x", - cmd.cmd.clk.cmd_type); - goto done; - } - - pmu_wait_message_cond(&g->pmu, - nvgpu_get_poll_timeout(g), - &handler.success, 1); - - if (handler.success == 0U) { - nvgpu_err(g, "rpc call to load freq cntlr cal failed"); - status = -EINVAL; - } - -done: - return status; -} - -int nvgpu_clk_pmu_vin_load(struct gk20a *g) -{ - struct pmu_cmd cmd; - struct pmu_payload payload; - int status; - u32 seqdesc; - struct nv_pmu_clk_rpc rpccall; - struct clkrpc_pmucmdhandler_params handler; - struct nv_pmu_clk_load *clkload; - - (void) memset(&payload, 0, sizeof(struct pmu_payload)); - (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); - (void) memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); - - rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; - clkload = &rpccall.params.clk_load; - clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_VIN; - clkload->action_mask = NV_NV_PMU_CLK_LOAD_ACTION_MASK_VIN_HW_CAL_PROGRAM_YES << 4; - - cmd.hdr.unit_id = PMU_UNIT_CLK; - cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + - (u32)sizeof(struct pmu_hdr); - - cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; - cmd.cmd.clk.generic.b_perf_daemon_cmd =false; - - payload.in.buf = (u8 *)&rpccall; - payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); - payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; - - payload.out.buf = (u8 *)&rpccall; - payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; - - handler.prpccall = &rpccall; - handler.success = 0; - status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, - PMU_COMMAND_QUEUE_LPQ, - clkrpc_pmucmdhandler, (void *)&handler, - &seqdesc); - - if (status != 0) { - nvgpu_err(g, "unable to post clk RPC cmd %x", - cmd.cmd.clk.cmd_type); - goto done; - } - - pmu_wait_message_cond(&g->pmu, - nvgpu_get_poll_timeout(g), - &handler.success, 1); - - if (handler.success == 0U) { - nvgpu_err(g, "rpc call to load vin cal failed"); - status = -EINVAL; - } - -done: - return status; -} - -int nvgpu_clk_pmu_clk_domains_load(struct gk20a *g) -{ - struct pmu_cmd cmd; - struct pmu_payload payload; - struct nv_pmu_clk_rpc rpccall; - struct clkrpc_pmucmdhandler_params handler; - struct nv_pmu_clk_load *clkload; - int status; - u32 seqdesc; - - (void) memset(&payload, 0, sizeof(struct pmu_payload)); - (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); - (void) memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); - - rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; - clkload = &rpccall.params.clk_load; - clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_CLK_DOMAIN; - - cmd.hdr.unit_id = PMU_UNIT_CLK; - cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + - (u32)sizeof(struct pmu_hdr); - - cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; - cmd.cmd.clk.generic.b_perf_daemon_cmd = false; - - payload.in.buf = (u8 *)&rpccall; - payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); - payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; - - payload.out.buf = (u8 *)&rpccall; - payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); - payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; - payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; - - handler.prpccall = &rpccall; - handler.success = 0; - status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, - PMU_COMMAND_QUEUE_LPQ, - clkrpc_pmucmdhandler, (void *)&handler, - &seqdesc); - - if (status != 0) { - nvgpu_err(g, "unable to post clk RPC cmd %x", - cmd.cmd.clk.cmd_type); - goto done; - } - - (void) pmu_wait_message_cond(&g->pmu, - nvgpu_get_poll_timeout(g), - &handler.success, 1); - - if (handler.success == 0U) { - nvgpu_err(g, "rpc call to load clk_domains cal failed"); - status = -EINVAL; - } - -done: - return status; -} - -u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g, - struct nv_pmu_clk_rpc *rpccall, - struct nvgpu_set_fll_clk *setfllclk) -{ - struct nv_pmu_clk_vf_change_inject *vfchange; - - vfchange = &rpccall->params.clk_vf_change_inject; - vfchange->flags = 0; - vfchange->clk_list.num_domains = 3; - vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPCCLK; - vfchange->clk_list.clk_domains[0].clk_freq_khz = - (u32)setfllclk->gpc2clkmhz * 1000U; - vfchange->clk_list.clk_domains[0].clk_flags = 0; - vfchange->clk_list.clk_domains[0].current_regime_id = - setfllclk->current_regime_id_gpc; - vfchange->clk_list.clk_domains[0].target_regime_id = - setfllclk->target_regime_id_gpc; - vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBARCLK; - vfchange->clk_list.clk_domains[1].clk_freq_khz = - (u32)setfllclk->xbar2clkmhz * 1000U; - vfchange->clk_list.clk_domains[1].clk_flags = 0; - vfchange->clk_list.clk_domains[1].current_regime_id = - setfllclk->current_regime_id_xbar; - vfchange->clk_list.clk_domains[1].target_regime_id = - setfllclk->target_regime_id_xbar; - vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYSCLK; - vfchange->clk_list.clk_domains[2].clk_freq_khz = - (u32)setfllclk->sys2clkmhz * 1000U; - vfchange->clk_list.clk_domains[2].clk_flags = 0; - vfchange->clk_list.clk_domains[2].current_regime_id = - setfllclk->current_regime_id_sys; - vfchange->clk_list.clk_domains[2].target_regime_id = - setfllclk->target_regime_id_sys; - vfchange->volt_list.num_rails = 1; - vfchange->volt_list.rails[0].volt_domain = CTRL_VOLT_DOMAIN_LOGIC; - vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv; - vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv = - setfllclk->voltuv; - - return 0; -} - -u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g, +static u32 nvgpu_clk_vf_change_inject_data_fill(struct gk20a *g, struct nv_pmu_clk_rpc *rpccall, struct nvgpu_set_fll_clk *setfllclk) { @@ -473,15 +87,15 @@ u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g, vfchange->clk_list.num_domains = 4; vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPCCLK; vfchange->clk_list.clk_domains[0].clk_freq_khz = - (u32)setfllclk->gpc2clkmhz * 1000U; + (u32)setfllclk->gpc2clkmhz * 1000U; vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBARCLK; vfchange->clk_list.clk_domains[1].clk_freq_khz = - (u32)setfllclk->xbar2clkmhz * 1000U; + (u32)setfllclk->xbar2clkmhz * 1000U; vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYSCLK; vfchange->clk_list.clk_domains[2].clk_freq_khz = - (u32)setfllclk->sys2clkmhz * 1000U; + (u32)setfllclk->sys2clkmhz * 1000U; vfchange->clk_list.clk_domains[3].clk_domain = CTRL_CLK_DOMAIN_NVDCLK; vfchange->clk_list.clk_domains[3].clk_freq_khz = 855 * 1000; @@ -490,7 +104,7 @@ u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g, vfchange->volt_list.rails[0].rail_idx = 0; vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv; vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv = - setfllclk->voltuv; + setfllclk->voltuv; return 0; } @@ -503,11 +117,12 @@ static int clk_pmu_vf_inject(struct gk20a *g, int status; u32 seqdesc; struct nv_pmu_clk_rpc rpccall; - struct clkrpc_pmucmdhandler_params handler; + struct nvgpu_clkrpc_pmucmdhandler_params handler; (void) memset(&payload, 0, sizeof(struct pmu_payload)); (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); - (void) memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); + (void) memset(&handler, 0, + sizeof(struct nvgpu_clkrpc_pmucmdhandler_params)); (void) memset(&cmd, 0, sizeof(struct pmu_cmd)); if ((setfllclk->gpc2clkmhz == 0U) || (setfllclk->xbar2clkmhz == 0U) || @@ -523,12 +138,11 @@ static int clk_pmu_vf_inject(struct gk20a *g, rpccall.function = NV_PMU_CLK_RPC_ID_CLK_VF_CHANGE_INJECT; - g->ops.pmu_ver.clk.clk_vf_change_inject_data_fill(g, - &rpccall, setfllclk); + nvgpu_clk_vf_change_inject_data_fill(g, &rpccall, setfllclk); cmd.hdr.unit_id = PMU_UNIT_CLK; cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + - (u32)sizeof(struct pmu_hdr); + (u32)sizeof(struct pmu_hdr); cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; @@ -547,13 +161,12 @@ static int clk_pmu_vf_inject(struct gk20a *g, handler.success = 0; status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, - PMU_COMMAND_QUEUE_LPQ, - clkrpc_pmucmdhandler, (void *)&handler, - &seqdesc); + PMU_COMMAND_QUEUE_LPQ, nvgpu_clkrpc_pmucmdhandler, + (void *)&handler, &seqdesc); if (status != 0) { nvgpu_err(g, "unable to post clk RPC cmd %x", - cmd.cmd.clk.cmd_type); + cmd.cmd.clk.cmd_type); goto done; } @@ -569,88 +182,37 @@ done: return status; } -static u8 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz) -{ - struct fll_device *pflldev; - u8 j; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - - BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super), - struct fll_device *, pflldev, j) { - if (pflldev->clk_domain == domain) { - if (pflldev->regime_desc.fixed_freq_regime_limit_mhz >= - clkmhz) { - return CTRL_CLK_FLL_REGIME_ID_FFR; - } else { - return CTRL_CLK_FLL_REGIME_ID_FR; - } - } - } - return CTRL_CLK_FLL_REGIME_ID_INVALID; -} - -static int set_regime_id(struct gk20a *g, u32 domain, u8 regimeid) -{ - struct fll_device *pflldev; - u8 j; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - - BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super), - struct fll_device *, pflldev, j) { - if (pflldev->clk_domain == domain) { - pflldev->regime_desc.regime_id = regimeid; - return 0; - } - } - return -EINVAL; -} - -static int get_regime_id(struct gk20a *g, u32 domain, u8 *regimeid) -{ - struct fll_device *pflldev; - u8 j; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - - BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super), - struct fll_device *, pflldev, j) { - if (pflldev->clk_domain == domain) { - *regimeid = pflldev->regime_desc.regime_id; - return 0; - } - } - return -EINVAL; -} - -int clk_set_fll_clks(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk) +int nvgpu_clk_set_fll_clks(struct gk20a *g, + struct nvgpu_set_fll_clk *setfllclk) { int status = -EINVAL; /*set regime ids */ - status = get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, + status = g->clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, &setfllclk->current_regime_id_gpc); if (status != 0) { goto done; } - setfllclk->target_regime_id_gpc = find_regime_id(g, + setfllclk->target_regime_id_gpc = g->clk_pmu->find_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, setfllclk->gpc2clkmhz); - status = get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, + status = g->clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, &setfllclk->current_regime_id_sys); if (status != 0) { goto done; } - setfllclk->target_regime_id_sys = find_regime_id(g, + setfllclk->target_regime_id_sys = g->clk_pmu->find_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, setfllclk->sys2clkmhz); - status = get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, + status = g->clk_pmu->get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, &setfllclk->current_regime_id_xbar); if (status != 0) { goto done; } - setfllclk->target_regime_id_xbar = find_regime_id(g, + setfllclk->target_regime_id_xbar = g->clk_pmu->find_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, setfllclk->xbar2clkmhz); status = clk_pmu_vf_inject(g, setfllclk); @@ -660,19 +222,19 @@ int clk_set_fll_clks(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk) } /* save regime ids */ - status = set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, + status = g->clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, setfllclk->target_regime_id_xbar); if (status != 0) { goto done; } - status = set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, + status = g->clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, setfllclk->target_regime_id_gpc); if (status != 0) { goto done; } - status = set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, + status = g->clk_pmu->set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, setfllclk->target_regime_id_sys); if (status != 0) { goto done; @@ -681,457 +243,20 @@ done: return status; } -int nvgpu_clk_get_fll_clks(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk) +int nvgpu_clk_get_fll_clks(struct gk20a *g, + struct nvgpu_set_fll_clk *setfllclk) { int status = -EINVAL; - struct nvgpu_clk_domain *pdomain; - u8 i; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - unsigned long bit; - u16 clkmhz = 0; - struct clk_domain_35_master *p35master; - struct clk_domain_35_slave *p35slave; - unsigned long slaveidxmask; + status = g->clk_pmu->get_fll(g, setfllclk); - if (setfllclk->gpc2clkmhz == 0U) { - return -EINVAL; - } - - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), - struct nvgpu_clk_domain *, pdomain, i) { - - if (pdomain->api_domain == CTRL_CLK_DOMAIN_GPCCLK) { - if (!pdomain->super.implements(g, &pdomain->super, - CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER)) { - status = -EINVAL; - goto done; - } - p35master = (struct clk_domain_35_master *)pdomain; - slaveidxmask = p35master->master.slave_idxs_mask; - for_each_set_bit(bit, &slaveidxmask, 32U) { - i = (u8)bit; - p35slave = (struct clk_domain_35_slave *) - CLK_CLK_DOMAIN_GET(pclk, i); - - clkmhz = 0; - status = p35slave->slave.clkdomainclkgetslaveclk(g, - pclk, (struct nvgpu_clk_domain *)(void *)p35slave, - &clkmhz, setfllclk->gpc2clkmhz); - if (status != 0) { - status = -EINVAL; - goto done; - } - if (p35slave->super.super.super.super.api_domain == - CTRL_CLK_DOMAIN_XBARCLK) { - setfllclk->xbar2clkmhz = clkmhz; - } - if (p35slave->super.super.super.super.api_domain == - CTRL_CLK_DOMAIN_SYSCLK) { - setfllclk->sys2clkmhz = clkmhz; - } - if (p35slave->super.super.super.super.api_domain == - CTRL_CLK_DOMAIN_NVDCLK) { - setfllclk->nvdclkmhz = clkmhz; - } - if (p35slave->super.super.super.super.api_domain == - CTRL_CLK_DOMAIN_HOSTCLK) { - setfllclk->hostclkmhz = clkmhz; - } - } - } - } -done: - return status; -} - -int clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain) -{ - int status = -EINVAL; - struct nvgpu_clk_domain *pdomain; - u8 i; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - u16 clkmhz = 0; - u32 volt = 0; - - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), - struct nvgpu_clk_domain *, pdomain, i) { - if (pdomain->api_domain == clkapidomain) { - status = pdomain->clkdomainclkvfsearch(g, pclk, - pdomain, &clkmhz, &volt, - CLK_PROG_VFE_ENTRY_LOGIC); - status = pdomain->clkdomainclkvfsearch(g, pclk, - pdomain, &clkmhz, &volt, - CLK_PROG_VFE_ENTRY_SRAM); - } - } - return status; -} - -static int clk_program_fllclks(struct gk20a *g, struct change_fll_clk *fllclk) -{ - int status = -EINVAL; - struct nvgpu_clk_domain *pdomain; - u8 i; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - unsigned long bit; - u16 clkmhz = 0; - struct clk_domain_3x_master *p3xmaster; - struct clk_domain_3x_slave *p3xslave; - unsigned long slaveidxmask; - struct nvgpu_set_fll_clk setfllclk; - - if (fllclk->api_clk_domain != CTRL_CLK_DOMAIN_GPCCLK) { - return -EINVAL; - } - if (fllclk->voltuv == 0U) { - return -EINVAL; - } - if (fllclk->clkmhz == 0U) { - return -EINVAL; - } - - setfllclk.voltuv = fllclk->voltuv; - setfllclk.gpc2clkmhz = fllclk->clkmhz; - - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), - struct nvgpu_clk_domain *, pdomain, i) { - - if (pdomain->api_domain == fllclk->api_clk_domain) { - - if (!pdomain->super.implements(g, &pdomain->super, - CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) { - status = -EINVAL; - goto done; - } - p3xmaster = (struct clk_domain_3x_master *)pdomain; - slaveidxmask = p3xmaster->slave_idxs_mask; - for_each_set_bit(bit, &slaveidxmask, 32U) { - i = (u8)bit; - p3xslave = (struct clk_domain_3x_slave *) - CLK_CLK_DOMAIN_GET(pclk, i); - if ((p3xslave->super.super.super.api_domain != - CTRL_CLK_DOMAIN_XBARCLK) && - (p3xslave->super.super.super.api_domain != - CTRL_CLK_DOMAIN_SYSCLK)) { - continue; - } - clkmhz = 0; - status = p3xslave->clkdomainclkgetslaveclk(g, - pclk, - (struct nvgpu_clk_domain *)p3xslave, - &clkmhz, - fllclk->clkmhz); - if (status != 0) { - status = -EINVAL; - goto done; - } - if (p3xslave->super.super.super.api_domain == - CTRL_CLK_DOMAIN_XBARCLK) { - setfllclk.xbar2clkmhz = clkmhz; - } - if (p3xslave->super.super.super.api_domain == - CTRL_CLK_DOMAIN_SYSCLK) { - setfllclk.sys2clkmhz = clkmhz; - } - } - } - } - /*set regime ids */ - status = get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, - &setfllclk.current_regime_id_gpc); - if (status != 0) { - goto done; - } - - setfllclk.target_regime_id_gpc = find_regime_id(g, - CTRL_CLK_DOMAIN_GPCCLK, setfllclk.gpc2clkmhz); - - status = get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, - &setfllclk.current_regime_id_sys); - if (status != 0) { - goto done; - } - - setfllclk.target_regime_id_sys = find_regime_id(g, - CTRL_CLK_DOMAIN_SYSCLK, setfllclk.sys2clkmhz); - - status = get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, - &setfllclk.current_regime_id_xbar); - if (status != 0) { - goto done; - } - - setfllclk.target_regime_id_xbar = find_regime_id(g, - CTRL_CLK_DOMAIN_XBARCLK, setfllclk.xbar2clkmhz); - - status = clk_pmu_vf_inject(g, &setfllclk); - - if (status != 0) { - nvgpu_err(g, - "vf inject to change clk failed"); - } - - /* save regime ids */ - status = set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK, - setfllclk.target_regime_id_xbar); - if (status != 0) { - goto done; - } - - status = set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK, - setfllclk.target_regime_id_gpc); - if (status != 0) { - goto done; - } - - status = set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK, - setfllclk.target_regime_id_sys); - if (status != 0) { - goto done; - } -done: - return status; -} - -int nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g) -{ - int status; - struct change_fll_clk bootfllclk; - u16 gpcclk_clkmhz = BOOT_GPCCLK_MHZ; - u32 gpcclk_voltuv = 0; - u32 voltuv = 0; - - status = nvgpu_clk_vf_point_cache(g); - if (status != 0) { - nvgpu_err(g,"caching failed"); - return status; - } - - status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK, - &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); - if (status != 0) { - return status; - } - - voltuv = gpcclk_voltuv; - status = nvgpu_volt_set_voltage(g, voltuv, 0); - if (status != 0) { - nvgpu_err(g, - "attempt to set boot voltage failed %d", - voltuv); - } - bootfllclk.api_clk_domain = CTRL_CLK_DOMAIN_GPCCLK; - bootfllclk.clkmhz = gpcclk_clkmhz; - bootfllclk.voltuv = voltuv; - status = clk_program_fllclks(g, &bootfllclk); - if (status != 0) { - nvgpu_err(g, "attempt to set boot gpcclk failed"); - } - status = clk_pmu_freq_effective_avg_load(g, true); - /* - * Read clocks after some delay with below method - * & extract clock data from buffer - * u32 freqkHz; - * status = clk_freq_effective_avg(g, &freqkHz, CTRL_CLK_DOMAIN_GPCCLK | - * CTRL_CLK_DOMAIN_XBARCLK | - * CTRL_CLK_DOMAIN_SYSCLK | - * CTRL_CLK_DOMAIN_NVDCLK) - * */ - - return status; -} - -int nvgpu_clk_set_fll_clk_gv10x(struct gk20a *g) -{ - int status; - struct change_fll_clk bootfllclk; - u16 gpcclk_clkmhz = BOOT_GPCCLK_MHZ; - u32 gpcclk_voltuv = 0U; - u32 voltuv = 0U; - - status = nvgpu_clk_vf_point_cache(g); - if (status != 0) { - nvgpu_err(g, "caching failed"); - return status; - } - - status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK, - &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); - if (status != 0) { - return status; - } - - voltuv = gpcclk_voltuv; - - status = nvgpu_volt_set_voltage(g, voltuv, 0U); - if (status != 0) { - nvgpu_err(g, "attempt to set max voltage failed %d", voltuv); - } - - bootfllclk.api_clk_domain = CTRL_CLK_DOMAIN_GPCCLK; - bootfllclk.clkmhz = gpcclk_clkmhz; - bootfllclk.voltuv = voltuv; - status = clk_program_fllclks(g, &bootfllclk); - if (status != 0) { - nvgpu_err(g, "attempt to set max gpcclk failed"); - } return status; } int nvgpu_clk_set_boot_fll_clk_tu10x(struct gk20a *g) { - struct nvgpu_pmu *pmu = &g->pmu; - struct nv_pmu_rpc_perf_change_seq_queue_change rpc; - struct ctrl_perf_change_seq_change_input change_input; - struct clk_set_info *p0_clk_set_info; - struct nvgpu_clk_domain *pclk_domain; - int status = 0; - u8 i = 0, gpcclk_domain=0; - u32 gpcclk_clkmhz=0, gpcclk_voltuv=0; - u32 vmin_uv = 0; - - (void) memset(&change_input, 0, - sizeof(struct ctrl_perf_change_seq_change_input)); - - BOARDOBJGRP_FOR_EACH(&(g->clk_pmu->clk_domainobjs.super.super), - struct nvgpu_clk_domain *, pclk_domain, i) { - - p0_clk_set_info = pstate_get_clk_set_info(g, CTRL_PERF_PSTATE_P0, - pclk_domain->domain); - - switch (pclk_domain->api_domain) { - case CTRL_CLK_DOMAIN_GPCCLK: - gpcclk_domain = i; - gpcclk_clkmhz = p0_clk_set_info->max_mhz; - change_input.clk[i].clk_freq_khz = - p0_clk_set_info->max_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - break; - case CTRL_CLK_DOMAIN_XBARCLK: - case CTRL_CLK_DOMAIN_SYSCLK: - case CTRL_CLK_DOMAIN_NVDCLK: - case CTRL_CLK_DOMAIN_HOSTCLK: - change_input.clk[i].clk_freq_khz = - p0_clk_set_info->max_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - break; - default: - nvgpu_pmu_dbg(g, "Fixed clock domain"); - break; - } - } - - change_input.pstate_index = 0U; - change_input.flags = (u32)CTRL_PERF_CHANGE_SEQ_CHANGE_FORCE; - change_input.vf_points_cache_counter = 0xFFFFFFFFU; - - status = clk_domain_freq_to_volt(g, gpcclk_domain, - &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); - - status = nvgpu_volt_get_vmin_ps35(g, &vmin_uv); - if(status != 0) - { - nvgpu_pmu_dbg(g, "Get vmin failed, proceeding with freq_to_volt value"); - } - if((status == 0) && (vmin_uv > gpcclk_voltuv)) { - gpcclk_voltuv = vmin_uv; - nvgpu_pmu_dbg(g, "Vmin is higher than evaluated Volt"); - } - - change_input.volt[0].voltage_uv = gpcclk_voltuv; - change_input.volt[0].voltage_min_noise_unaware_uv = gpcclk_voltuv; - change_input.volt_rails_mask.super.data[0] = 1U; - - /* RPC to PMU to queue to execute change sequence request*/ - (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_perf_change_seq_queue_change )); - rpc.change = change_input; - rpc.change.pstate_index = 0; - PMU_RPC_EXECUTE_CPB(status, pmu, PERF, CHANGE_SEQ_QUEUE_CHANGE, &rpc, 0); - if (status != 0) { - nvgpu_err(g, "Failed to execute Change Seq RPC status=0x%x", - status); - } - - /* Wait for sync change to complete. */ - if ((rpc.change.flags & CTRL_PERF_CHANGE_SEQ_CHANGE_ASYNC) == 0U) { - nvgpu_msleep(20); - } - - return status; -} - -int clk_domain_volt_to_freq(struct gk20a *g, u8 clkdomain_idx, - u32 *pclkmhz, u32 *pvoltuv, u8 railidx) -{ - struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt rpc; - struct nvgpu_pmu *pmu = &g->pmu; int status = -EINVAL; + status = g->clk_pmu->set_boot_fll(g); - (void)memset(&rpc, 0, sizeof(struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt )); - rpc.volt_rail_idx = nvgpu_volt_rail_volt_domain_convert_to_idx(g, railidx); - rpc.clk_domain_idx = clkdomain_idx; - rpc.voltage_type = CTRL_VOLT_DOMAIN_LOGIC; - rpc.input.value = *pvoltuv; - PMU_RPC_EXECUTE_CPB(status, pmu, CLK, CLK_DOMAIN_35_PROG_VOLT_TO_FREQ, &rpc, 0); - if (status != 0) { - nvgpu_err(g, "Failed to execute Freq to Volt RPC status=0x%x", - status); - } - *pclkmhz = rpc.output.value; - return status; -} - -int clk_domain_freq_to_volt(struct gk20a *g, u8 clkdomain_idx, - u32 *pclkmhz, u32 *pvoltuv, u8 railidx) -{ - struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt rpc; - struct nvgpu_pmu *pmu = &g->pmu; - int status = -EINVAL; - - (void)memset(&rpc, 0, sizeof(struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt )); - rpc.volt_rail_idx = nvgpu_volt_rail_volt_domain_convert_to_idx(g, railidx); - rpc.clk_domain_idx = clkdomain_idx; - rpc.voltage_type = CTRL_VOLT_DOMAIN_LOGIC; - rpc.input.value = *pclkmhz; - PMU_RPC_EXECUTE_CPB(status, pmu, CLK, CLK_DOMAIN_35_PROG_FREQ_TO_VOLT, &rpc, 0); - if (status != 0) { - nvgpu_err(g, "Failed to execute Freq to Volt RPC status=0x%x", - status); - } - *pvoltuv = rpc.output.value; - return status; -} - - -int clk_domain_get_f_or_v(struct gk20a *g, u32 clkapidomain, - u16 *pclkmhz, u32 *pvoltuv, u8 railidx) -{ - int status = -EINVAL; - struct nvgpu_clk_domain *pdomain; - u8 i; - struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; - u8 rail; - - if ((pclkmhz == NULL) || (pvoltuv == NULL)) { - return -EINVAL; - } - - if (railidx == CTRL_VOLT_DOMAIN_LOGIC) { - rail = CLK_PROG_VFE_ENTRY_LOGIC; - } else if (railidx == CTRL_VOLT_DOMAIN_SRAM) { - rail = CLK_PROG_VFE_ENTRY_SRAM; - } else { - return -EINVAL; - } - - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), - struct nvgpu_clk_domain *, pdomain, i) { - if (pdomain->api_domain == clkapidomain) { - status = pdomain->clkdomainclkvfsearch(g, pclk, - pdomain, pclkmhz, pvoltuv, rail); - return status; - } - } return status; } @@ -1155,3 +280,74 @@ void nvgpu_clk_free_pmupstate(struct gk20a *g) nvgpu_kfree(g, g->clk_pmu); g->clk_pmu = NULL; } + +int nvgpu_clk_set_req_fll_clk_ps35(struct gk20a *g, + struct nvgpu_clk_slave_freq *vf_point) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct nv_pmu_rpc_perf_change_seq_queue_change rpc; + struct ctrl_perf_change_seq_change_input change_input; + int status = 0; + u8 gpcclk_domain = 0U; + u32 gpcclk_voltuv = 0U, gpcclk_clkmhz = 0U; + u32 vmin_uv = 0, vmargin_uv = 0U, fmargin_mhz = 0U; + + (void) memset(&change_input, 0, + sizeof(struct ctrl_perf_change_seq_change_input)); + + g->clk_pmu->set_p0_clks(g, &gpcclk_domain, &gpcclk_clkmhz, + vf_point, &change_input); + + change_input.pstate_index = 0U; + change_input.flags = (u32)CTRL_PERF_CHANGE_SEQ_CHANGE_FORCE; + change_input.vf_points_cache_counter = 0xFFFFFFFFU; + + status = nvgpu_vfe_get_freq_margin_limit(g, &fmargin_mhz); + if (status != 0) { + nvgpu_err(g, "Failed to fetch Fmargin status=0x%x", status); + return status; + } + + gpcclk_clkmhz += fmargin_mhz; + status = nvgpu_clk_domain_freq_to_volt(g, gpcclk_domain, + &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); + + status = nvgpu_vfe_get_volt_margin_limit(g, &vmargin_uv); + if (status != 0) { + nvgpu_err(g, "Failed to fetch Vmargin status=0x%x", status); + return status; + } + + status = nvgpu_volt_get_vmin_ps35(g, &vmin_uv); + if (status != 0) { + nvgpu_pmu_dbg(g, + "Get vmin failed, proceeding with freq_to_volt value"); + } + if ((status == 0) && (vmin_uv > gpcclk_voltuv)) { + gpcclk_voltuv = vmin_uv; + nvgpu_log_fn(g, "Vmin is higher than evaluated Volt"); + } + + change_input.volt[0].voltage_uv = gpcclk_voltuv; + change_input.volt[0].voltage_min_noise_unaware_uv = gpcclk_voltuv; + change_input.volt_rails_mask.super.data[0] = 1U; + + /* RPC to PMU to queue to execute change sequence request*/ + (void) memset(&rpc, 0, + sizeof(struct nv_pmu_rpc_perf_change_seq_queue_change)); + rpc.change = change_input; + rpc.change.pstate_index = 0; + PMU_RPC_EXECUTE_CPB(status, pmu, PERF, + CHANGE_SEQ_QUEUE_CHANGE, &rpc, 0); + if (status != 0) { + nvgpu_err(g, "Failed to execute Change Seq RPC status=0x%x", + status); + } + + /* Wait for sync change to complete. */ + if ((rpc.change.flags & CTRL_PERF_CHANGE_SEQ_CHANGE_ASYNC) == 0U) { + nvgpu_msleep(20); + } + return status; +} + diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk.h b/drivers/gpu/nvgpu/common/pmu/clk/clk.h deleted file mode 100644 index db35f4362..000000000 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * general clock structures & definitions - * - * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef NVGPU_CLK_CLK_H -#define NVGPU_CLK_CLK_H - -#include - -#include "clk_vin.h" -#include "clk_fll.h" -#include "clk_domain.h" -#include "clk_prog.h" -#include "clk_mclk.h" -#include "clk_freq_controller.h" -#include "clk_freq_domain.h" - -#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP 0x10U -#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK 0x1FU -#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SHIFT 0U -#define BOOT_GPCCLK_MHZ 952U - -struct gk20a; - -int clk_set_boot_fll_clk(struct gk20a *g); - -struct clockentry { - u8 vbios_clk_domain; - u8 clk_which; - u8 perf_index; - u32 api_clk_domain; -}; - -struct change_fll_clk { - u32 api_clk_domain; - u16 clkmhz; - u32 voltuv; -}; - -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_MAX_NUMCLKS 9U - -struct vbios_clock_domain { - u8 clock_type; - u8 num_domains; - struct clockentry clock_entry[NV_PERF_HEADER_4X_CLOCKS_DOMAINS_MAX_NUMCLKS]; -}; - -struct vbios_clocks_table_1x_hal_clock_entry { - u32 domain; - bool b_noise_aware_capable; - u8 clk_vf_curve_count; -}; - -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_GPC2CLK 0U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_XBAR2CLK 1U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_DRAMCLK 2U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_SYS2CLK 3U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_HUB2CLK 4U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_MSDCLK 5U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_PWRCLK 6U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_DISPCLK 7U -#define NV_PERF_HEADER_4X_CLOCKS_DOMAINS_4_NUMCLKS 8U - -#define PERF_CLK_MCLK 0U -#define PERF_CLK_DISPCLK 1U -#define PERF_CLK_GPC2CLK 2U -#define PERF_CLK_HOSTCLK 3U -#define PERF_CLK_LTC2CLK 4U -#define PERF_CLK_SYS2CLK 5U -#define PERF_CLK_HUB2CLK 6U -#define PERF_CLK_LEGCLK 7U -#define PERF_CLK_MSDCLK 8U -#define PERF_CLK_XCLK 9U -#define PERF_CLK_PWRCLK 10U -#define PERF_CLK_XBAR2CLK 11U -#define PERF_CLK_PCIEGENCLK 12U -#define PERF_CLK_NUM 13U - -struct nvgpu_set_fll_clk; - -int clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain); -int clk_domain_get_f_or_v(struct gk20a *g, u32 clkapidomain, - u16 *pclkmhz, u32 *pvoltuv, u8 railidx); -int clk_domain_freq_to_volt(struct gk20a *g, u8 clkdomain_idx, - u32 *pclkmhz, u32 *pvoltuv, u8 railidx); -int clk_domain_volt_to_freq( struct gk20a *g, u8 clkdomain_idx, - u32 *pclkmhz, u32 *pvoltuv, u8 railidx); -int clk_set_fll_clks(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk); -int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx); -int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload); -int clk_freq_effective_avg(struct gk20a *g, u32 *freqkHz, u32 clkDomainMask); -#endif /* NVGPU_CLK_CLK_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c index e2b83a986..8bab93b18 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.c @@ -21,16 +21,46 @@ */ #include +#include #include #include #include -#include +#include +#include #include +#include +#include +#include #include +#include +#include -#include "clk.h" -#include "clk_fll.h" #include "clk_domain.h" +#include "clk_prog.h" + +struct nvgpu_clk_domain_rpc_pmucmdhandler_params { + struct nv_pmu_clk_rpc *prpccall; + u32 success; +}; + +static void nvgpu_clk_domain_rpc_pmucmdhandler(struct gk20a *g, + struct pmu_msg *msg, void *param, u32 handle, u32 status) +{ + struct nvgpu_clk_domain_rpc_pmucmdhandler_params *phandlerparams = + (struct nvgpu_clk_domain_rpc_pmucmdhandler_params *)param; + + nvgpu_log_info(g, " "); + + if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { + nvgpu_err(g, "unsupported msg for CLK LOAD RPC %x", + msg->msg.clk.msg_type); + return; + } + + if (phandlerparams->prpccall->b_supported) { + phandlerparams->success = 1; + } +} static struct nvgpu_clk_domain *construct_clk_domain(struct gk20a *g, void *pargs); @@ -41,6 +71,12 @@ static int devinit_get_clocks_table(struct gk20a *g, static int clk_domain_pmudatainit_super(struct gk20a *g, struct boardobj *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); +struct vbios_clocks_table_1x_hal_clock_entry { + u32 domain; + bool b_noise_aware_capable; + u8 clk_vf_curve_count; +}; + static struct vbios_clocks_table_1x_hal_clock_entry vbiosclktbl1xhalentry_gp[] = { { CLKWHICH_GPC2CLK, true, 1, }, @@ -122,6 +158,13 @@ static u32 clktranslatehalmumsettoapinumset(u32 clkhaldomains) return clkapidomains; } +static struct nvgpu_clk_domain *clk_get_clk_domain_from_index( + struct nvgpu_clk_pmupstate *pclk, u8 idx) +{ + return (struct nvgpu_clk_domain *)(void *)BOARDOBJGRP_OBJ_GET_BY_IDX( + &(pclk->clk_domainobjs->super.super), idx); +} + static int _clk_domains_pmudatainit_3x(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) @@ -206,7 +249,8 @@ int nvgpu_clk_domain_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - status = boardobjgrpconstruct_e32(g, &g->clk_pmu->clk_domainobjs.super); + status = boardobjgrpconstruct_e32(g, + &g->clk_pmu->clk_domainobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk domain, status - 0x%x", @@ -214,8 +258,8 @@ int nvgpu_clk_domain_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->clk_pmu->clk_domainobjs.super.super; - pclkdomainobjs = &(g->clk_pmu->clk_domainobjs); + pboardobjgrp = &g->clk_pmu->clk_domainobjs->super.super; + pclkdomainobjs = g->clk_pmu->clk_domainobjs; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_DOMAIN); @@ -291,37 +335,39 @@ int nvgpu_clk_domain_sw_setup(struct gk20a *g) } pdomain_master_35 = (struct clk_domain_35_master *)pdomain; - status = boardobjgrpmask_bitset( - &pdomain_master_35->master_slave_domains_grp_mask.super, i); + status = boardobjgrpmask_bitset(&pdomain_master_35-> + master_slave_domains_grp_mask.super, i); if (status != 0) { goto done; } } if (pdomain->super.implements(g, &pdomain->super, - CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE)) { - pdomain_slave = - (struct clk_domain_3x_slave *)pdomain; - pdomain_master = - (struct clk_domain_3x_master *) - (CLK_CLK_DOMAIN_GET((g->clk_pmu), - pdomain_slave->master_idx)); + CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE)) { + pdomain_slave = + (struct clk_domain_3x_slave *)pdomain; + pdomain_master = (struct clk_domain_3x_master *) + (g->clk_pmu->clk_get_clk_domain((g->clk_pmu), + pdomain_slave->master_idx)); pdomain_master->slave_idxs_mask |= BIT32(i); } if (pdomain->super.implements(g, &pdomain->super, - CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE)) { - pdomain_slave_35 = - (struct clk_domain_35_slave *)pdomain; - pdomain_master_35 = - (struct clk_domain_35_master *) - (CLK_CLK_DOMAIN_GET((g->clk_pmu), - pdomain_slave_35->slave.master_idx)); + CTRL_CLK_CLK_DOMAIN_TYPE_35_SLAVE)) { + pdomain_slave_35 = + (struct clk_domain_35_slave *)pdomain; + pdomain_master_35 = + (struct clk_domain_35_master *) + (g->clk_pmu->clk_get_clk_domain((g->clk_pmu), + pdomain_slave_35->slave.master_idx)); pdomain_master_35->master.slave_idxs_mask |= BIT32(i); - pdomain_slave_35->super.clk_pos = boardobjgrpmask_bitsetcount( - &pdomain_master_35->master_slave_domains_grp_mask.super); + pdomain_slave_35->super.clk_pos = + boardobjgrpmask_bitsetcount( + &pdomain_master_35-> + master_slave_domains_grp_mask.super); status = boardobjgrpmask_bitset( - &pdomain_master_35->master_slave_domains_grp_mask.super, i); + &pdomain_master_35-> + master_slave_domains_grp_mask.super, i); if (status != 0) { goto done; } @@ -341,7 +387,7 @@ int nvgpu_clk_domain_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->clk_pmu->clk_domainobjs.super.super; + pboardobjgrp = &g->clk_pmu->clk_domainobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -983,7 +1029,7 @@ static int clkdomaingetslaveclk(struct gk20a *g, if(ver == NVGPU_GPUID_GV100) { slaveidx = BOARDOBJ_GET_IDX(pdomain); p3xmaster = (struct clk_domain_3x_master *) - CLK_CLK_DOMAIN_GET(pclk, + g->clk_pmu->clk_get_clk_domain(pclk, ((struct clk_domain_3x_slave *) pdomain)->master_idx); pprog = CLK_CLK_PROG_GET(pclk, p3xmaster->super.clk_prog_idx_first); @@ -993,8 +1039,8 @@ static int clkdomaingetslaveclk(struct gk20a *g, slaveidx, pclkmhz, masterclkmhz); } else { slaveidx = BOARDOBJ_GET_IDX(pdomain); - p35master = (struct clk_domain_35_master *) - CLK_CLK_DOMAIN_GET(pclk, + p35master = (struct clk_domain_35_master *)(void *) + g->clk_pmu->clk_get_clk_domain(pclk, ((struct clk_domain_35_slave *) pdomain)->slave.master_idx); @@ -1044,8 +1090,8 @@ static int clkdomainvfsearch(struct gk20a *g, CTRL_CLK_CLK_DOMAIN_TYPE_3X_SLAVE)) { slaveidx = BOARDOBJ_GET_IDX(pdomain); pslaveidx = &slaveidx; - p3xmaster = (struct clk_domain_3x_master *) - CLK_CLK_DOMAIN_GET(pclk, + p3xmaster = (struct clk_domain_3x_master *)(void *) + g->clk_pmu->clk_get_clk_domain(pclk, ((struct clk_domain_3x_slave *) pdomain)->master_idx); } @@ -1171,7 +1217,7 @@ static int clk_domain_pmudatainit_35_prog(struct gk20a *g, struct clk_domain_35_prog *pclk_domain_35_prog; struct clk_domain_3x_prog *pclk_domain_3x_prog; struct nv_pmu_clk_clk_domain_35_prog_boardobj_set *pset; - struct nvgpu_clk_domains *pdomains = &(g->clk_pmu->clk_domainobjs); + struct nvgpu_clk_domains *pdomains = g->clk_pmu->clk_domainobjs; nvgpu_log_info(g, " "); @@ -1210,7 +1256,7 @@ static int _clk_domain_pmudatainit_3x_prog(struct gk20a *g, int status = 0; struct clk_domain_3x_prog *pclk_domain_3x_prog; struct nv_pmu_clk_clk_domain_30_prog_boardobj_set *pset; - struct nvgpu_clk_domains *pdomains = &(g->clk_pmu->clk_domainobjs); + struct nvgpu_clk_domains *pdomains = g->clk_pmu->clk_domainobjs; nvgpu_log_info(g, " "); @@ -1756,14 +1802,15 @@ static int clk_domain_pmudatainit_super(struct gk20a *g, return status; } -int clk_domain_clk_prog_link(struct gk20a *g, struct nvgpu_clk_pmupstate *pclk) +static int clk_domain_clk_prog_link(struct gk20a *g, + struct nvgpu_clk_pmupstate *pclk) { int status = 0; struct nvgpu_clk_domain *pdomain; u8 i; /* Iterate over all CLK_DOMAINs and flatten their VF curves.*/ - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), + BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pdomain, i) { status = pdomain->clkdomainclkproglink(g, pclk, pdomain); if (status != 0) { @@ -1777,3 +1824,400 @@ int clk_domain_clk_prog_link(struct gk20a *g, struct nvgpu_clk_pmupstate *pclk) done: return status; } + +int nvgpu_clk_pmu_clk_domains_load(struct gk20a *g) +{ + struct pmu_cmd cmd; + struct pmu_payload payload; + struct nv_pmu_clk_rpc rpccall; + struct nvgpu_clk_domain_rpc_pmucmdhandler_params handler; + struct nv_pmu_clk_load *clkload; + int status; + u32 seqdesc; + + (void) memset(&payload, 0, sizeof(struct pmu_payload)); + (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); + (void) memset(&handler, 0, sizeof( + struct nvgpu_clk_domain_rpc_pmucmdhandler_params)); + + rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; + clkload = &rpccall.params.clk_load; + clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_CLK_DOMAIN; + + cmd.hdr.unit_id = PMU_UNIT_CLK; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + + (u32)sizeof(struct pmu_hdr); + + cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; + cmd.cmd.clk.generic.b_perf_daemon_cmd = false; + + payload.in.buf = (u8 *)&rpccall; + payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); + payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; + + payload.out.buf = (u8 *)&rpccall; + payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + nvgpu_assert(NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET < U64(U32_MAX)); + payload.out.offset = (u32)NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; + + handler.prpccall = &rpccall; + handler.success = 0; + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + nvgpu_clk_domain_rpc_pmucmdhandler, (void *)&handler, + &seqdesc); + + if (status != 0) { + nvgpu_err(g, "unable to post clk RPC cmd %x", + cmd.cmd.clk.cmd_type); + goto done; + } + + (void) pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + &handler.success, 1); + + if (handler.success == 0U) { + nvgpu_err(g, "rpc call to load clk_domains cal failed"); + status = -EINVAL; + } + +done: + return status; +} + +static int clk_get_fll_clks_per_clk_domain(struct gk20a *g, + struct nvgpu_set_fll_clk *setfllclk) +{ + int status = -EINVAL; + struct nvgpu_clk_domain *pdomain; + u8 i; + struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; + unsigned long bit; + u16 clkmhz = 0; + struct clk_domain_35_master *p35master; + struct clk_domain_35_slave *p35slave; + unsigned long slaveidxmask; + + if (setfllclk->gpc2clkmhz == 0U) { + return -EINVAL; + } + + BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs->super.super), + struct nvgpu_clk_domain *, pdomain, i) { + + if (pdomain->api_domain == CTRL_CLK_DOMAIN_GPCCLK) { + if (!pdomain->super.implements(g, &pdomain->super, + CTRL_CLK_CLK_DOMAIN_TYPE_35_MASTER)) { + status = -EINVAL; + goto done; + } + p35master = (struct clk_domain_35_master *) + (void *)pdomain; + slaveidxmask = p35master->master.slave_idxs_mask; + for_each_set_bit(bit, &slaveidxmask, 32U) { + i = (u8)bit; + p35slave = (struct clk_domain_35_slave *) + (void *) + g->clk_pmu->clk_get_clk_domain(pclk, i); + + clkmhz = 0; + status = p35slave-> + slave.clkdomainclkgetslaveclk(g, + pclk, (struct nvgpu_clk_domain *) + (void *)p35slave, + &clkmhz, setfllclk->gpc2clkmhz); + if (status != 0) { + status = -EINVAL; + goto done; + } + if (p35slave->super.super.super.super. + api_domain == CTRL_CLK_DOMAIN_XBARCLK) { + setfllclk->xbar2clkmhz = clkmhz; + } + if (p35slave->super.super.super.super. + api_domain == CTRL_CLK_DOMAIN_SYSCLK) { + setfllclk->sys2clkmhz = clkmhz; + } + if (p35slave->super.super.super.super. + api_domain == CTRL_CLK_DOMAIN_NVDCLK) { + setfllclk->nvdclkmhz = clkmhz; + } + if (p35slave->super.super.super.super. + api_domain == CTRL_CLK_DOMAIN_HOSTCLK) { + setfllclk->hostclkmhz = clkmhz; + } + } + } + } +done: + return status; +} + +static int clk_set_boot_fll_clks_per_clk_domain(struct gk20a *g) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct nv_pmu_rpc_perf_change_seq_queue_change rpc; + struct ctrl_perf_change_seq_change_input change_input; + struct clk_set_info *p0_clk_set_info; + struct nvgpu_clk_domain *pclk_domain; + int status = 0; + u8 i = 0, gpcclk_domain = 0; + u32 gpcclk_clkmhz = 0, gpcclk_voltuv = 0; + u32 vmin_uv = 0; + + (void) memset(&change_input, 0, + sizeof(struct ctrl_perf_change_seq_change_input)); + + BOARDOBJGRP_FOR_EACH(&(g->clk_pmu->clk_domainobjs->super.super), + struct nvgpu_clk_domain *, pclk_domain, i) { + + p0_clk_set_info = pstate_get_clk_set_info(g, + CTRL_PERF_PSTATE_P0, pclk_domain->domain); + + switch (pclk_domain->api_domain) { + case CTRL_CLK_DOMAIN_GPCCLK: + gpcclk_domain = i; + gpcclk_clkmhz = p0_clk_set_info->max_mhz; + change_input.clk[i].clk_freq_khz = + (u32)p0_clk_set_info->max_mhz * 1000U; + change_input.clk_domains_mask.super.data[0] |= + (u32) BIT(i); + break; + case CTRL_CLK_DOMAIN_XBARCLK: + case CTRL_CLK_DOMAIN_SYSCLK: + case CTRL_CLK_DOMAIN_NVDCLK: + case CTRL_CLK_DOMAIN_HOSTCLK: + change_input.clk[i].clk_freq_khz = + (u32)p0_clk_set_info->max_mhz * 1000U; + change_input.clk_domains_mask.super.data[0] |= + (u32) BIT(i); + break; + default: + nvgpu_pmu_dbg(g, "Fixed clock domain"); + break; + } + } + + change_input.pstate_index = 0U; + change_input.flags = (u32)CTRL_PERF_CHANGE_SEQ_CHANGE_FORCE; + change_input.vf_points_cache_counter = 0xFFFFFFFFU; + + status = nvgpu_clk_domain_freq_to_volt(g, gpcclk_domain, + &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); + + status = nvgpu_volt_get_vmin_ps35(g, &vmin_uv); + if (status != 0) { + nvgpu_pmu_dbg(g, "Get vmin failed, proceeding with freq_to_volt value"); + } + if ((status == 0) && (vmin_uv > gpcclk_voltuv)) { + gpcclk_voltuv = vmin_uv; + nvgpu_pmu_dbg(g, "Vmin is higher than evaluated Volt"); + } + + change_input.volt[0].voltage_uv = gpcclk_voltuv; + change_input.volt[0].voltage_min_noise_unaware_uv = gpcclk_voltuv; + change_input.volt_rails_mask.super.data[0] = 1U; + + /* RPC to PMU to queue to execute change sequence request*/ + (void) memset(&rpc, 0, sizeof( + struct nv_pmu_rpc_perf_change_seq_queue_change)); + rpc.change = change_input; + rpc.change.pstate_index = 0; + PMU_RPC_EXECUTE_CPB(status, pmu, PERF, + CHANGE_SEQ_QUEUE_CHANGE, &rpc, 0); + if (status != 0) { + nvgpu_err(g, "Failed to execute Change Seq RPC status=0x%x", + status); + } + + /* Wait for sync change to complete. */ + if ((rpc.change.flags & CTRL_PERF_CHANGE_SEQ_CHANGE_ASYNC) == 0U) { + nvgpu_msleep(20); + } + + return status; +} + +static void clk_set_p0_clk_per_domain(struct gk20a *g, u8 *gpcclk_domain, + u32 *gpcclk_clkmhz, + struct nvgpu_clk_slave_freq *vf_point, + struct ctrl_perf_change_seq_change_input *change_input) +{ + struct nvgpu_clk_domain *pclk_domain; + struct clk_set_info *p0_info; + u32 max_clkmhz; + u16 max_ratio; + u8 i = 0; + + BOARDOBJGRP_FOR_EACH(&(g->clk_pmu->clk_domainobjs->super.super), + struct nvgpu_clk_domain *, pclk_domain, i) { + + switch (pclk_domain->api_domain) { + case CTRL_CLK_DOMAIN_GPCCLK: + *gpcclk_domain = i; + *gpcclk_clkmhz = vf_point->gpc_mhz; + + p0_info = pstate_get_clk_set_info(g, + CTRL_PERF_PSTATE_P0, CLKWHICH_GPCCLK); + if (p0_info == NULL) { + nvgpu_err(g, "failed to get GPCCLK P0 info"); + break; + } + if (vf_point->gpc_mhz < p0_info->min_mhz) { + vf_point->gpc_mhz = p0_info->min_mhz; + } + if (vf_point->gpc_mhz > p0_info->max_mhz) { + vf_point->gpc_mhz = p0_info->max_mhz; + } + change_input->clk[i].clk_freq_khz = + (u32)vf_point->gpc_mhz * 1000U; + change_input->clk_domains_mask.super.data[0] |= + (u32) BIT(i); + break; + case CTRL_CLK_DOMAIN_XBARCLK: + p0_info = pstate_get_clk_set_info(g, + CTRL_PERF_PSTATE_P0, CLKWHICH_XBARCLK); + if (p0_info == NULL) { + nvgpu_err(g, "failed to get XBARCLK P0 info"); + break; + } + max_ratio = (vf_point->xbar_mhz*100U)/vf_point->gpc_mhz; + if (vf_point->xbar_mhz < p0_info->min_mhz) { + vf_point->xbar_mhz = p0_info->min_mhz; + } + if (vf_point->xbar_mhz > p0_info->max_mhz) { + vf_point->xbar_mhz = p0_info->max_mhz; + } + change_input->clk[i].clk_freq_khz = + (u32)vf_point->xbar_mhz * 1000U; + change_input->clk_domains_mask.super.data[0] |= + (u32) BIT(i); + if (vf_point->gpc_mhz < vf_point->xbar_mhz) { + max_clkmhz = (((u32)vf_point->xbar_mhz * 100U) / + (u32)max_ratio); + if (*gpcclk_clkmhz < max_clkmhz) { + *gpcclk_clkmhz = max_clkmhz; + } + } + break; + case CTRL_CLK_DOMAIN_SYSCLK: + p0_info = pstate_get_clk_set_info(g, + CTRL_PERF_PSTATE_P0, CLKWHICH_SYSCLK); + if (p0_info == NULL) { + nvgpu_err(g, "failed to get SYSCLK P0 info"); + break; + } + max_ratio = (vf_point->sys_mhz*100U)/vf_point->gpc_mhz; + if (vf_point->sys_mhz < p0_info->min_mhz) { + vf_point->sys_mhz = p0_info->min_mhz; + } + if (vf_point->sys_mhz > p0_info->max_mhz) { + vf_point->sys_mhz = p0_info->max_mhz; + } + change_input->clk[i].clk_freq_khz = + (u32)vf_point->sys_mhz * 1000U; + change_input->clk_domains_mask.super.data[0] |= + (u32) BIT(i); + if (vf_point->gpc_mhz < vf_point->sys_mhz) { + max_clkmhz = (((u32)vf_point->sys_mhz * 100U) / + (u32)max_ratio); + if (*gpcclk_clkmhz < max_clkmhz) { + *gpcclk_clkmhz = max_clkmhz; + } + } + break; + case CTRL_CLK_DOMAIN_NVDCLK: + p0_info = pstate_get_clk_set_info(g, + CTRL_PERF_PSTATE_P0, CLKWHICH_NVDCLK); + if (p0_info == NULL) { + nvgpu_err(g, "failed to get NVDCLK P0 info"); + break; + } + max_ratio = (vf_point->nvd_mhz*100U)/vf_point->gpc_mhz; + if (vf_point->nvd_mhz < p0_info->min_mhz) { + vf_point->nvd_mhz = p0_info->min_mhz; + } + if (vf_point->nvd_mhz > p0_info->max_mhz) { + vf_point->nvd_mhz = p0_info->max_mhz; + } + change_input->clk[i].clk_freq_khz = + (u32)vf_point->nvd_mhz * 1000U; + change_input->clk_domains_mask.super.data[0] |= + (u32) BIT(i); + if (vf_point->gpc_mhz < vf_point->nvd_mhz) { + max_clkmhz = (((u32)vf_point->nvd_mhz * 100U) / + (u32)max_ratio); + if (*gpcclk_clkmhz < max_clkmhz) { + *gpcclk_clkmhz = max_clkmhz; + } + } + break; + case CTRL_CLK_DOMAIN_HOSTCLK: + p0_info = pstate_get_clk_set_info(g, + CTRL_PERF_PSTATE_P0, CLKWHICH_HOSTCLK); + if (p0_info == NULL) { + nvgpu_err(g, "failed to get HOSTCLK P0 info"); + break; + } + max_ratio = (vf_point->host_mhz*100U)/vf_point->gpc_mhz; + if (vf_point->host_mhz < p0_info->min_mhz) { + vf_point->host_mhz = p0_info->min_mhz; + } + if (vf_point->host_mhz > p0_info->max_mhz) { + vf_point->host_mhz = p0_info->max_mhz; + } + change_input->clk[i].clk_freq_khz = + (u32)vf_point->host_mhz * 1000U; + change_input->clk_domains_mask.super.data[0] |= + (u32) BIT(i); + if (vf_point->gpc_mhz < vf_point->host_mhz) { + max_clkmhz = (((u32)vf_point->host_mhz * 100U) / + (u32)max_ratio); + if (*gpcclk_clkmhz < max_clkmhz) { + *gpcclk_clkmhz = max_clkmhz; + } + } + break; + default: + nvgpu_pmu_dbg(g, "Fixed clock domain"); + break; + } + } +} + +int nvgpu_clk_domain_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->clk_domainobjs != NULL) { + return 0; + } + + g->clk_pmu->clk_domainobjs = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->clk_domainobjs)); + if (g->clk_pmu->clk_domainobjs == NULL) { + return -ENOMEM; + } + + g->clk_pmu->get_fll = + clk_get_fll_clks_per_clk_domain; + g->clk_pmu->set_boot_fll = + clk_set_boot_fll_clks_per_clk_domain; + g->clk_pmu->set_p0_clks = + clk_set_p0_clk_per_domain; + g->clk_pmu->clk_get_clk_domain = + clk_get_clk_domain_from_index; + g->clk_pmu->clk_domain_clk_prog_link = + clk_domain_clk_prog_link; + + return 0; +} + +void nvgpu_clk_domain_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->clk_domainobjs); + g->clk_pmu->clk_domainobjs = NULL; +} + diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.h index 7cb09dc47..49dc9a890 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.h +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_domain.h @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,12 +23,8 @@ #ifndef NVGPU_CLK_DOMAIN_H #define NVGPU_CLK_DOMAIN_H -#include #include #include -#include -#include -#include #include #define CLK_DOMAIN_BOARDOBJGRP_VERSION 0x30 @@ -103,10 +99,4 @@ struct clk_domain_35_slave { struct clk_domain_30_slave slave; }; -int clk_domain_clk_prog_link(struct gk20a *g, struct nvgpu_clk_pmupstate *pclk); - -#define CLK_CLK_DOMAIN_GET(pclk, idx) \ - ((struct nvgpu_clk_domain *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ - &pclk->clk_domainobjs.super.super, (u8)(idx))) - #endif /* NVGPU_CLK_DOMAIN_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c index 7828f559c..f2cb99458 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.c @@ -24,13 +24,17 @@ #include #include #include +#include #include #include #include +#include +#include -#include "clk.h" -#include "clk_fll.h" -#include "clk_domain.h" +#include "clk_vin.h" + +#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP 0x10U +#define NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK 0x1FU static int devinit_get_fll_device_table(struct gk20a *g, struct nvgpu_avfsfllobjs *pfllobjs); @@ -40,6 +44,21 @@ static int fll_device_init_pmudata_super(struct gk20a *g, struct boardobj *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); +static u8 clk_get_fll_lut_vf_num_entries(struct nvgpu_clk_pmupstate *pclk) +{ + return ((pclk)->avfs_fllobjs->lut_num_entries); +} + +static u32 clk_get_fll_lut_min_volt(struct nvgpu_clk_pmupstate *pclk) +{ + return ((pclk)->avfs_fllobjs->lut_min_voltage_uv); +} + +static u32 clk_get_fll_lut_step_size(struct nvgpu_clk_pmupstate *pclk) +{ + return ((pclk)->avfs_fllobjs->lut_step_size_uv); +} + static int _clk_fll_devgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) @@ -128,14 +147,14 @@ int nvgpu_clk_fll_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - status = boardobjgrpconstruct_e32(g, &g->clk_pmu->avfs_fllobjs.super); + status = boardobjgrpconstruct_e32(g, &g->clk_pmu->avfs_fllobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for fll, status - 0x%x", status); goto done; } - pfllobjs = &(g->clk_pmu->avfs_fllobjs); - pboardobjgrp = &(g->clk_pmu->avfs_fllobjs.super.super); + pfllobjs = g->clk_pmu->avfs_fllobjs; + pboardobjgrp = &(g->clk_pmu->avfs_fllobjs->super.super); BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, FLL_DEVICE); @@ -165,7 +184,7 @@ int nvgpu_clk_fll_sw_setup(struct gk20a *g) } status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, - &g->clk_pmu->avfs_fllobjs.super.super, + &g->clk_pmu->avfs_fllobjs->super.super, clk, CLK, clk_fll_device, CLK_FLL_DEVICE); if (status != 0) { nvgpu_err(g, @@ -217,7 +236,7 @@ int nvgpu_clk_fll_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->clk_pmu->avfs_fllobjs.super.super; + pboardobjgrp = &g->clk_pmu->avfs_fllobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -241,10 +260,10 @@ static int devinit_get_fll_device_table(struct gk20a *g, u32 index = 0; struct fll_device fll_dev_data; struct fll_device *pfll_dev; - struct vin_device *pvin_dev; + struct nvgpu_vin_device *pvin_dev; u32 desctablesize; u32 vbios_domain = NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_SKIP; - struct nvgpu_avfsvinobjs *pvinobjs = &g->clk_pmu->avfs_vinobjs; + struct nvgpu_avfsvinobjs *pvinobjs = g->clk_pmu->avfs_vinobjs; nvgpu_log_info(g, " "); @@ -294,7 +313,7 @@ static int devinit_get_fll_device_table(struct gk20a *g, fll_id = fll_desc_table_entry.fll_device_id; if ( (u8)fll_desc_table_entry.vin_idx_logic != CTRL_CLK_VIN_ID_UNDEFINED) { - pvin_dev = CLK_GET_VIN_DEVICE(pvinobjs, + pvin_dev = g->clk_pmu->clk_get_vin(pvinobjs, (u8)fll_desc_table_entry.vin_idx_logic); if (pvin_dev == NULL) { return -EINVAL; @@ -312,7 +331,7 @@ static int devinit_get_fll_device_table(struct gk20a *g, NV_FLL_DESC_LUT_PARAMS_VSELECT); if ( (u8)fll_desc_table_entry.vin_idx_sram != CTRL_CLK_VIN_ID_UNDEFINED) { - pvin_dev = CLK_GET_VIN_DEVICE(pvinobjs, + pvin_dev = g->clk_pmu->clk_get_vin(pvinobjs, (u8)fll_desc_table_entry.vin_idx_sram); if (pvin_dev == NULL) { return -EINVAL; @@ -341,7 +360,7 @@ static int devinit_get_fll_device_table(struct gk20a *g, vbios_domain = U32(fll_desc_table_entry.clk_domain) & U32(NV_PERF_DOMAIN_4X_CLOCK_DOMAIN_MASK); fll_dev_data.clk_domain = - g->ops.pmu_ver.clk.get_vbios_clk_domain(vbios_domain); + nvgpu_clk_get_vbios_clk_domain(vbios_domain); fll_dev_data.rail_idx_for_lut = 0; fll_dev_data.vin_idx_logic = @@ -380,7 +399,7 @@ done: return status; } -u32 nvgpu_clk_get_vbios_clk_domain_gv10x( u32 vbios_domain) +u32 nvgpu_clk_get_vbios_clk_domain(u32 vbios_domain) { if (vbios_domain == 0U) { return CTRL_CLK_DOMAIN_GPCCLK; @@ -392,20 +411,9 @@ u32 nvgpu_clk_get_vbios_clk_domain_gv10x( u32 vbios_domain) return CTRL_CLK_DOMAIN_NVDCLK; } else if (vbios_domain == 9U) { return CTRL_CLK_DOMAIN_HOSTCLK; + } else { + return 0; } - return 0; -} - -u32 nvgpu_clk_get_vbios_clk_domain_gp10x( u32 vbios_domain) -{ - if (vbios_domain == 0U) { - return CTRL_CLK_DOMAIN_GPC2CLK; - } else if (vbios_domain == 1U) { - return CTRL_CLK_DOMAIN_XBAR2CLK; - } else if (vbios_domain == 3U) { - return CTRL_CLK_DOMAIN_SYS2CLK; - } - return 0; } static int lutbroadcastslaveregister(struct gk20a *g, @@ -516,3 +524,87 @@ static int fll_device_init_pmudata_super(struct gk20a *g, return status; } + +static u8 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz) +{ + struct fll_device *pflldev; + u8 j; + struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; + + BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs->super.super), + struct fll_device *, pflldev, j) { + if (pflldev->clk_domain == domain) { + if (pflldev->regime_desc.fixed_freq_regime_limit_mhz >= + clkmhz) { + return CTRL_CLK_FLL_REGIME_ID_FFR; + } else { + return CTRL_CLK_FLL_REGIME_ID_FR; + } + } + } + return CTRL_CLK_FLL_REGIME_ID_INVALID; +} + +static int set_regime_id(struct gk20a *g, u32 domain, u8 regimeid) +{ + struct fll_device *pflldev; + u8 j; + struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; + + BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs->super.super), + struct fll_device *, pflldev, j) { + if (pflldev->clk_domain == domain) { + pflldev->regime_desc.regime_id = regimeid; + return 0; + } + } + return -EINVAL; +} + +static int get_regime_id(struct gk20a *g, u32 domain, u8 *regimeid) +{ + struct fll_device *pflldev; + u8 j; + struct nvgpu_clk_pmupstate *pclk = g->clk_pmu; + + BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs->super.super), + struct fll_device *, pflldev, j) { + if (pflldev->clk_domain == domain) { + *regimeid = pflldev->regime_desc.regime_id; + return 0; + } + } + return -EINVAL; +} + +int nvgpu_clk_fll_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->avfs_fllobjs != NULL) { + return 0; + } + + g->clk_pmu->avfs_fllobjs = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->avfs_fllobjs)); + if (g->clk_pmu->avfs_fllobjs == NULL) { + return -ENOMEM; + } + + g->clk_pmu->find_regime_id = find_regime_id; + g->clk_pmu->get_regime_id = get_regime_id; + g->clk_pmu->set_regime_id = set_regime_id; + g->clk_pmu->get_fll_lut_vf_num_entries = + clk_get_fll_lut_vf_num_entries; + g->clk_pmu->get_fll_lut_min_volt = + clk_get_fll_lut_min_volt; + g->clk_pmu->get_fll_lut_step_size = + clk_get_fll_lut_step_size; + + return 0; +} + +void nvgpu_clk_fll_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->avfs_fllobjs); + g->clk_pmu->avfs_fllobjs = NULL; +} diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.h deleted file mode 100644 index 1d4e35561..000000000 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_fll.h +++ /dev/null @@ -1,66 +0,0 @@ -/* -* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. -* - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. -*/ - -#ifndef NVGPU_CLK_FLL_H -#define NVGPU_CLK_FLL_H - -#include -#include -#include - -struct fll_device; -struct nvgpu_avfsfllobjs; - -typedef int fll_lut_broadcast_slave_register(struct gk20a *g, - struct nvgpu_avfsfllobjs *pfllobjs, - struct fll_device *pfll, - struct fll_device *pfll_slave); - -struct fll_device { - struct boardobj super; - u8 id; - u8 mdiv; - u16 input_freq_mhz; - u32 clk_domain; - u8 vin_idx_logic; - u8 vin_idx_sram; - u8 rail_idx_for_lut; - struct nv_pmu_clk_lut_device_desc lut_device; - struct nv_pmu_clk_regime_desc regime_desc; - u8 min_freq_vfe_idx; - u8 freq_ctrl_idx; - u8 target_regime_id_override; - bool b_skip_pldiv_below_dvco_min; - bool b_dvco_1x; - struct boardobjgrpmask_e32 lut_prog_broadcast_slave_mask; - fll_lut_broadcast_slave_register *lut_broadcast_slave_register; -}; - -#define CLK_FLL_LUT_VF_NUM_ENTRIES(pclk) \ - ((pclk)->avfs_fllobjs.lut_num_entries) - -#define CLK_FLL_LUT_MIN_VOLTAGE_UV(pclk) \ - ((pclk)->avfs_fllobjs.lut_min_voltage_uv) -#define CLK_FLL_LUT_STEP_SIZE_UV(pclk) \ - ((pclk)->avfs_fllobjs.lut_step_size_uv) - -#endif /* NVGPU_CLK_FLL_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c index 0a10f2713..429b4f21c 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.c @@ -21,18 +21,46 @@ */ #include +#include #include #include #include +#include +#include #include #include #include +#include +#include +#include +#include -#include "clk.h" -#include "clk_fll.h" -#include "clk_domain.h" #include "clk_freq_controller.h" +struct nvgpu_clk_freq_ctlr_rpc_pmucmdhandler_params { + struct nv_pmu_clk_rpc *prpccall; + u32 success; +}; + +void nvgpu_clk_freq_ctlr_rpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + struct nvgpu_clk_freq_ctlr_rpc_pmucmdhandler_params *phandlerparams = + (struct nvgpu_clk_freq_ctlr_rpc_pmucmdhandler_params *)param; + + nvgpu_log_info(g, " "); + + if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { + nvgpu_err(g, "unsupported msg for CLK LOAD RPC %x", + msg->msg.clk.msg_type); + return; + } + + if (phandlerparams->prpccall->b_supported) { + phandlerparams->success = 1; + } +} + static int clk_freq_controller_pmudatainit_super(struct gk20a *g, struct boardobj *board_obj_ptr, struct nv_pmu_boardobj *ppmudata) @@ -240,7 +268,7 @@ static int clk_get_freq_controller_table(struct gk20a *g, BIOS_GET_FIELD(u8, entry.param0, NV_VBIOS_FCT_1X_ENTRY_PARAM0_ID); - pclk_domain = CLK_CLK_DOMAIN_GET((g->clk_pmu), + pclk_domain = g->clk_pmu->clk_get_clk_domain((g->clk_pmu), (u32)entry.clk_domain_idx); freq_controller_data.freq_controller.clk_domain = pclk_domain->api_domain; @@ -332,7 +360,7 @@ int nvgpu_clk_freq_controller_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->clk_pmu->clk_freq_controllers.super.super; + pboardobjgrp = &g->clk_pmu->clk_freq_controllers->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -397,7 +425,7 @@ int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g) int status = 0; struct boardobjgrp *pboardobjgrp = NULL; struct nvgpu_clk_freq_controllers *pclk_freq_controllers; - struct nvgpu_avfsfllobjs *pfllobjs = &(g->clk_pmu->avfs_fllobjs); + struct nvgpu_avfsfllobjs *pfllobjs = g->clk_pmu->avfs_fllobjs; struct fll_device *pfll; struct clk_freq_controller *pclkfreqctrl; u8 i; @@ -405,7 +433,7 @@ int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pclk_freq_controllers = &g->clk_pmu->clk_freq_controllers; + pclk_freq_controllers = g->clk_pmu->clk_freq_controllers; status = boardobjgrpconstruct_e32(g, &pclk_freq_controllers->super); if (status != 0) { nvgpu_err(g, @@ -414,7 +442,7 @@ int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->clk_pmu->clk_freq_controllers.super.super; + pboardobjgrp = &g->clk_pmu->clk_freq_controllers->super.super; pboardobjgrp->pmudatainit = _clk_freq_controllers_pmudatainit; pboardobjgrp->pmudatainstget = @@ -461,3 +489,131 @@ done: nvgpu_log_info(g, " done status %x", status); return status; } + +int nvgpu_clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) +{ + struct pmu_cmd cmd; + struct pmu_payload payload; + int status; + u32 seqdesc; + struct nv_pmu_clk_rpc rpccall; + struct nvgpu_clk_freq_ctlr_rpc_pmucmdhandler_params handler; + struct nv_pmu_clk_load *clkload; + struct nvgpu_clk_freq_controllers *pclk_freq_controllers; + struct ctrl_boardobjgrp_mask_e32 *load_mask; + struct boardobjgrpmask_e32 isolate_cfc_mask; + + (void) memset(&payload, 0, sizeof(struct pmu_payload)); + (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); + (void) memset(&handler, 0, sizeof( + struct nvgpu_clk_freq_ctlr_rpc_pmucmdhandler_params)); + + pclk_freq_controllers = g->clk_pmu->clk_freq_controllers; + rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; + clkload = &rpccall.params.clk_load; + clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER; + clkload->action_mask = bload ? + NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_YES : + NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_NO; + + load_mask = &rpccall.params.clk_load.payload.freq_controllers.load_mask; + + status = boardobjgrpmask_e32_init(&isolate_cfc_mask, NULL); + + if (bit_idx == CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL) { + status = boardobjgrpmask_export( + &pclk_freq_controllers-> + freq_ctrl_load_mask.super, + pclk_freq_controllers-> + freq_ctrl_load_mask.super.bitcount, + &load_mask->super); + + + } else { + status = boardobjgrpmask_bitset(&isolate_cfc_mask.super, + bit_idx); + status = boardobjgrpmask_export(&isolate_cfc_mask.super, + isolate_cfc_mask.super.bitcount, + &load_mask->super); + if (bload) { + status = boardobjgrpmask_bitset( + &pclk_freq_controllers-> + freq_ctrl_load_mask.super, + bit_idx); + } else { + status = boardobjgrpmask_bitclr( + &pclk_freq_controllers-> + freq_ctrl_load_mask.super, + bit_idx); + } + } + + if (status != 0) { + nvgpu_err(g, "Error in generating mask used to select CFC"); + goto done; + } + + cmd.hdr.unit_id = PMU_UNIT_CLK; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + + (u32)sizeof(struct pmu_hdr); + + cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; + + payload.in.buf = (u8 *)&rpccall; + payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); + payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; + + payload.out.buf = (u8 *)&rpccall; + payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + nvgpu_assert(NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET < U64(U32_MAX)); + payload.out.offset = (u32)NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; + + handler.prpccall = &rpccall; + handler.success = 0; + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + nvgpu_clk_freq_ctlr_rpc_pmucmdhandler, (void *)&handler, + &seqdesc); + + if (status != 0) { + nvgpu_err(g, "unable to post clk RPC cmd %x", + cmd.cmd.clk.cmd_type); + goto done; + } + + pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + &handler.success, 1); + + if (handler.success == 0U) { + nvgpu_err(g, "rpc call to load freq cntlr cal failed"); + status = -EINVAL; + } + +done: + return status; +} + +int nvgpu_clk_freq_controller_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->clk_freq_controllers != NULL) { + return 0; + } + + g->clk_pmu->clk_freq_controllers = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->clk_freq_controllers)); + if (g->clk_pmu->clk_freq_controllers == NULL) { + return -ENOMEM; + } + + return 0; +} + +void nvgpu_clk_freq_controller_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->clk_freq_controllers); + g->clk_pmu->clk_freq_controllers = NULL; +} diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.h index 1ad538a8b..8a8a5d755 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.h +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_controller.h @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,6 +23,8 @@ #ifndef NVGPU_CLK_FREQ_CONTROLLER_H #define NVGPU_CLK_FREQ_CONTROLLER_H +struct boardobj; + #define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL 0xFFU #define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS 0x00U #define CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC 0x01U @@ -70,4 +72,8 @@ struct clk_freq_controller_pi { bool bpoison; }; +int nvgpu_clk_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx); +void nvgpu_clk_freq_ctlr_rpc_pmucmdhandler(struct gk20a *g, + struct pmu_msg *msg, void *param, u32 handle, u32 status); + #endif /* NVGPU_CLK_FREQ_CONTROLLER_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c index fde643530..abbf81fc9 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.c @@ -26,10 +26,10 @@ #include #include #include - -#include "clk.h" -#include "clk_freq_domain.h" -#include "clk_fll.h" +#include +#include +#include +#include struct domain_type { u8 type; @@ -154,11 +154,11 @@ int nvgpu_clk_freq_domain_sw_setup(struct gk20a *g) nvgpu_assert(tmp_num_of_domains <= U8_MAX); num_of_domains = (u8)tmp_num_of_domains; - pboardobjgrp = &g->clk_pmu->freq_domain_grp_objs.super.super; - pfreq_domain_grp = &g->clk_pmu->freq_domain_grp_objs; + pboardobjgrp = &g->clk_pmu->freq_domain_grp_objs->super.super; + pfreq_domain_grp = g->clk_pmu->freq_domain_grp_objs; status = boardobjgrpconstruct_e32(g, - &g->clk_pmu->freq_domain_grp_objs.super); + &g->clk_pmu->freq_domain_grp_objs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk freq domain, status - 0x%x", @@ -225,7 +225,7 @@ int nvgpu_clk_freq_domain_pmu_setup(struct gk20a *g) nvgpu_log_fn(g, " "); - pboardobjgrp = &g->clk_pmu->freq_domain_grp_objs.super.super; + pboardobjgrp = &g->clk_pmu->freq_domain_grp_objs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -236,3 +236,25 @@ int nvgpu_clk_freq_domain_pmu_setup(struct gk20a *g) return status; } + +int nvgpu_clk_freq_domain_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->freq_domain_grp_objs != NULL) { + return 0; + } + + g->clk_pmu->freq_domain_grp_objs = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->freq_domain_grp_objs)); + if (g->clk_pmu->freq_domain_grp_objs == NULL) { + return -ENOMEM; + } + + return 0; +} + +void nvgpu_clk_freq_domain_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->freq_domain_grp_objs); + g->clk_pmu->freq_domain_grp_objs = NULL; +} diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.h deleted file mode 100644 index cc8bfd6fb..000000000 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_freq_domain.h +++ /dev/null @@ -1,36 +0,0 @@ -/* -* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -* - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. -*/ - -#ifndef NVGPU_CLK_FREQ_DOMAIN_H -#define NVGPU_CLK_FREQ_DOMAIN_H - -#include -#include -#include -#include - -struct nvgpu_clk_freq_domain { - struct boardobj super; - u32 clk_domain; -}; - -#endif /* NVGPU_CLK_FREQ_DOMAIN_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_mclk.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_mclk.h deleted file mode 100644 index 90992c9ed..000000000 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_mclk.h +++ /dev/null @@ -1,39 +0,0 @@ -/* -* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. -* - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. -*/ - -#ifndef NVGPU_CLK_MCLK_H -#define NVGPU_CLK_MCLK_H - -#include - -#define GP106_MCLK_LOW_SPEED 0U -#define GP106_MCLK_MID_SPEED 1U -#define GP106_MCLK_HIGH_SPEED 2U -#define GP106_MCLK_NUM_SPEED 3U - -enum gk20a_mclk_speed { - gk20a_mclk_low_speed, - gk20a_mclk_mid_speed, - gk20a_mclk_high_speed, -}; - -#endif /* NVGPU_CLK_MCLK_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c index e840f803d..266446c04 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.c @@ -25,11 +25,13 @@ #include #include #include +#include #include #include #include +#include +#include -#include "clk.h" #include "clk_prog.h" static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs); @@ -113,7 +115,7 @@ int nvgpu_clk_prog_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - status = boardobjgrpconstruct_e255(g, &g->clk_pmu->clk_progobjs.super); + status = boardobjgrpconstruct_e255(g, &g->clk_pmu->clk_progobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk prog, status - 0x%x", @@ -121,8 +123,8 @@ int nvgpu_clk_prog_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->clk_pmu->clk_progobjs.super.super; - pclkprogobjs = &(g->clk_pmu->clk_progobjs); + pboardobjgrp = &g->clk_pmu->clk_progobjs->super.super; + pclkprogobjs = g->clk_pmu->clk_progobjs; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_PROG); @@ -144,7 +146,7 @@ int nvgpu_clk_prog_sw_setup(struct gk20a *g) goto done; } - status = clk_domain_clk_prog_link(g, g->clk_pmu); + status = g->clk_pmu->clk_domain_clk_prog_link(g, g->clk_pmu); if (status != 0) { nvgpu_err(g, "error constructing VF point board objects"); goto done; @@ -162,7 +164,7 @@ int nvgpu_clk_prog_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->clk_pmu->clk_progobjs.super.super; + pboardobjgrp = &g->clk_pmu->clk_progobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -741,7 +743,7 @@ static int clk_prog_pmudatainit_1x_master(struct gk20a *g, struct clk_prog_1x_master *pclk_prog_1x_master; struct nv_pmu_clk_clk_prog_1x_master_boardobj_set *pset; size_t vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * - g->clk_pmu->clk_progobjs.vf_entry_count; + g->clk_pmu->clk_progobjs->vf_entry_count; nvgpu_log_info(g, " "); @@ -774,7 +776,7 @@ static int clk_prog_pmudatainit_35_master(struct gk20a *g, struct nv_pmu_clk_clk_prog_35_master_boardobj_set *pset; size_t voltrail_sec_vfsize = sizeof(struct ctrl_clk_clk_prog_35_master_sec_vf_entry_voltrail) - * g->clk_pmu->clk_progobjs.vf_sec_entry_count; + * g->clk_pmu->clk_progobjs->vf_sec_entry_count; nvgpu_log_info(g, " "); @@ -801,7 +803,7 @@ static int clk_prog_pmudatainit_1x_master_ratio(struct gk20a *g, struct clk_prog_1x_master_ratio *pclk_prog_1x_master_ratio; struct nv_pmu_clk_clk_prog_1x_master_ratio_boardobj_set *pset; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, " "); @@ -830,7 +832,7 @@ static int clk_prog_pmudatainit_35_master_ratio(struct gk20a *g, struct clk_prog_35_master_ratio *pclk_prog_35_master_ratio; struct nv_pmu_clk_clk_prog_35_master_ratio_boardobj_set *pset; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, " "); @@ -860,7 +862,7 @@ static int clk_prog_pmudatainit_1x_master_table(struct gk20a *g, struct clk_prog_1x_master_table *pclk_prog_1x_master_table; struct nv_pmu_clk_clk_prog_1x_master_table_boardobj_set *pset; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, " "); @@ -888,7 +890,7 @@ static int clk_prog_pmudatainit_35_master_table(struct gk20a *g, struct clk_prog_35_master_table *pclk_prog_35_master_table; struct nv_pmu_clk_clk_prog_35_master_table_boardobj_set *pset; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, " "); @@ -926,8 +928,8 @@ static int _clk_prog_1x_master_rail_construct_vf_point(struct gk20a *g, status = -ENOMEM; goto done; } - status = pclk->clk_vf_pointobjs.super.super.objinsert( - &pclk->clk_vf_pointobjs.super.super, + status = pclk->clk_vf_pointobjs->super.super.objinsert( + &pclk->clk_vf_pointobjs->super.super, &p_vf_point->super, *p_vf_point_idx); if (status != 0) { @@ -1030,7 +1032,7 @@ static int clk_prog_construct_1x_master(struct gk20a *g, (struct clk_prog_1x_master *)pargs; int status = 0; size_t vfsize = sizeof(struct ctrl_clk_clk_prog_1x_master_vf_entry) * - g->clk_pmu->clk_progobjs.vf_entry_count; + g->clk_pmu->clk_progobjs->vf_entry_count; u8 railidx; nvgpu_log_info(g, " type - %x", BOARDOBJ_GET_TYPE(pargs)); @@ -1067,7 +1069,7 @@ static int clk_prog_construct_1x_master(struct gk20a *g, pclkprog->b_o_c_o_v_enabled = ptmpprog->b_o_c_o_v_enabled; for (railidx = 0; - railidx < g->clk_pmu->clk_progobjs.vf_entry_count; + railidx < g->clk_pmu->clk_progobjs->vf_entry_count; railidx++) { pclkprog->p_vf_entries[railidx].vf_point_idx_first = CTRL_CLK_CLK_VF_POINT_IDX_INVALID; @@ -1127,7 +1129,7 @@ static int clk_prog_construct_1x_master_ratio(struct gk20a *g, (struct clk_prog_1x_master_ratio *)pargs; int status = 0; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_1X_MASTER_RATIO) { return -EINVAL; @@ -1170,7 +1172,7 @@ static int clk_prog_construct_35_master_ratio(struct gk20a *g, (struct clk_prog_35_master_ratio *)pargs; int status = 0; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; if (BOARDOBJ_GET_TYPE(pargs) != CTRL_CLK_CLK_PROG_TYPE_35_MASTER_RATIO) { return -EINVAL; @@ -1213,7 +1215,7 @@ static int clk_prog_construct_1x_master_table(struct gk20a *g, (struct clk_prog_1x_master_table *)pargs; int status = 0; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_ratio_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs)); @@ -1265,7 +1267,7 @@ static int clk_prog_construct_35_master_table(struct gk20a *g, (struct clk_prog_35_master_table *)pargs; int status = 0; size_t slavesize = sizeof(struct ctrl_clk_clk_prog_1x_master_table_slave_entry) * - g->clk_pmu->clk_progobjs.slave_entry_count; + g->clk_pmu->clk_progobjs->slave_entry_count; nvgpu_log_info(g, "type - %x", BOARDOBJ_GET_TYPE(pargs)); @@ -1382,10 +1384,10 @@ static int vfflatten_prog_1x_master(struct gk20a *g, (void) memset(&vf_point_data, 0x0, sizeof(vf_point_data)); vf_point_idx = BOARDOBJGRP_NEXT_EMPTY_IDX( - &pclk->clk_vf_pointobjs.super.super); + &pclk->clk_vf_pointobjs->super.super); for (vf_rail_idx = 0; - vf_rail_idx < pclk->clk_progobjs.vf_entry_count; + vf_rail_idx < pclk->clk_progobjs->vf_entry_count; vf_rail_idx++) { u32 voltage_min_uv; u32 voltage_step_size_uv; @@ -1438,9 +1440,11 @@ static int vfflatten_prog_1x_master(struct gk20a *g, break; case CTRL_CLK_PROG_1X_SOURCE_FLL: - voltage_min_uv = CLK_FLL_LUT_MIN_VOLTAGE_UV(pclk); - voltage_step_size_uv = CLK_FLL_LUT_STEP_SIZE_UV(pclk); - step_count = CLK_FLL_LUT_VF_NUM_ENTRIES(pclk); + voltage_min_uv = g->clk_pmu->get_fll_lut_min_volt(pclk); + voltage_step_size_uv = + g->clk_pmu->get_fll_lut_step_size(pclk); + step_count = g->clk_pmu-> + get_fll_lut_vf_num_entries(pclk); /* FLL sources use a voltage-based VF_POINT.*/ if (ver == NVGPU_GPUID_TU104) { @@ -1500,7 +1504,7 @@ static int vflookup_prog_1x_master return -EINVAL; } - pclkprogobjs = &(pclk->clk_progobjs); + pclkprogobjs = pclk->clk_progobjs; slaveentrycount = pclkprogobjs->slave_entry_count; @@ -1656,7 +1660,7 @@ static int getfpoints_prog_1x_master return -EINVAL; } - pclkprogobjs = &(pclk->clk_progobjs); + pclkprogobjs = pclk->clk_progobjs; if (pclkprogobjs->vf_entry_count > CTRL_CLK_CLK_PROG_1X_MASTER_VF_ENTRY_MAX_ENTRIES) { @@ -1720,7 +1724,7 @@ static int getslaveclk_prog_1x_master(struct gk20a *g, } *pclkmhz = 0; - pclkprogobjs = &(pclk->clk_progobjs); + pclkprogobjs = pclk->clk_progobjs; slaveentrycount = pclkprogobjs->slave_entry_count; if(ver == NVGPU_GPUID_GV100) { @@ -1772,3 +1776,25 @@ static int getslaveclk_prog_1x_master(struct gk20a *g, } return 0; } + +int nvgpu_clk_prog_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->clk_progobjs != NULL) { + return 0; + } + + g->clk_pmu->clk_progobjs = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->clk_progobjs)); + if (g->clk_pmu->clk_progobjs == NULL) { + return -ENOMEM; + } + + return 0; +} + +void nvgpu_clk_prog_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->clk_progobjs); + g->clk_pmu->clk_progobjs = NULL; +} diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.h index 27a51939e..32176f8b2 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.h +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_prog.h @@ -29,6 +29,8 @@ #include #include #include +#include +#include struct clk_prog_1x_master; #define FREQ_STEP_SIZE_MHZ 15U @@ -124,6 +126,6 @@ struct clk_prog_35_master_table { #define CLK_CLK_PROG_GET(pclk, idx) \ ((struct clk_prog *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ - &pclk->clk_progobjs.super.super, (u8)(idx))) + &pclk->clk_progobjs->super.super, (u8)(idx))) #endif /* NVGPU_CLK_PROG_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c index dc4e13d00..67cc26ca2 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_vf_point.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -30,8 +31,34 @@ #include #include #include +#include +#include +#include +#include -#include "clk.h" +int nvgpu_clk_domain_volt_to_freq(struct gk20a *g, u8 clkdomain_idx, + u32 *pclkmhz, u32 *pvoltuv, u8 railidx) +{ + struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt rpc; + struct nvgpu_pmu *pmu = &g->pmu; + int status = -EINVAL; + + (void)memset(&rpc, 0, + sizeof(struct nv_pmu_rpc_clk_domain_35_prog_freq_to_volt)); + rpc.volt_rail_idx = + nvgpu_volt_rail_volt_domain_convert_to_idx(g, railidx); + rpc.clk_domain_idx = clkdomain_idx; + rpc.voltage_type = CTRL_VOLT_DOMAIN_LOGIC; + rpc.input.value = *pvoltuv; + PMU_RPC_EXECUTE_CPB(status, pmu, CLK, + CLK_DOMAIN_35_PROG_VOLT_TO_FREQ, &rpc, 0); + if (status != 0) { + nvgpu_err(g, "Failed to execute Freq to Volt RPC status=0x%x", + status); + } + *pclkmhz = rpc.output.value; + return status; +} static int _clk_vf_point_pmudatainit_super(struct gk20a *g, struct boardobj *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); @@ -102,7 +129,8 @@ int nvgpu_clk_vf_point_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - status = boardobjgrpconstruct_e255(g, &g->clk_pmu->clk_vf_pointobjs.super); + status = boardobjgrpconstruct_e255(g, + &g->clk_pmu->clk_vf_pointobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk vfpoint, status - 0x%x", @@ -110,7 +138,7 @@ int nvgpu_clk_vf_point_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->clk_pmu->clk_vf_pointobjs.super.super; + pboardobjgrp = &g->clk_pmu->clk_vf_pointobjs->super.super; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, CLK_VF_POINT); @@ -124,7 +152,7 @@ int nvgpu_clk_vf_point_sw_setup(struct gk20a *g) } status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, - &g->clk_pmu->clk_vf_pointobjs.super.super, + &g->clk_pmu->clk_vf_pointobjs->super.super, clk, CLK, clk_vf_point, CLK_VF_POINT); if (status != 0) { nvgpu_err(g, @@ -149,7 +177,7 @@ int nvgpu_clk_vf_point_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->clk_pmu->clk_vf_pointobjs.super.super; + pboardobjgrp = &g->clk_pmu->clk_vf_pointobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -464,188 +492,6 @@ static int clk_vf_point_update(struct gk20a *g, return 0; } -int nvgpu_clk_set_req_fll_clk_ps35(struct gk20a *g, struct nvgpu_clk_slave_freq *vf_point) -{ - struct nvgpu_pmu *pmu = &g->pmu; - struct nv_pmu_rpc_perf_change_seq_queue_change rpc; - struct ctrl_perf_change_seq_change_input change_input; - struct nvgpu_clk_domain *pclk_domain; - int status = 0; - u8 i = 0, gpcclk_domain=0; - u32 gpcclk_voltuv=0,gpcclk_clkmhz=0; - u32 max_clkmhz; - u16 max_ratio; - struct clk_set_info *p0_info; - u32 vmin_uv = 0, vmargin_uv = 0U; - - (void) memset(&change_input, 0, - sizeof(struct ctrl_perf_change_seq_change_input)); - BOARDOBJGRP_FOR_EACH(&(g->clk_pmu->clk_domainobjs.super.super), - struct nvgpu_clk_domain *, pclk_domain, i) { - - switch (pclk_domain->api_domain) { - case CTRL_CLK_DOMAIN_GPCCLK: - gpcclk_domain = i; - gpcclk_clkmhz = vf_point->gpc_mhz; - - p0_info = pstate_get_clk_set_info(g, - CTRL_PERF_PSTATE_P0, CLKWHICH_GPCCLK); - if(p0_info == NULL){ - nvgpu_err(g, "failed to get GPCCLK P0 info"); - break; - } - if ( vf_point->gpc_mhz < p0_info->min_mhz ) { - vf_point->gpc_mhz = p0_info->min_mhz; - } - if (vf_point->gpc_mhz > p0_info->max_mhz) { - vf_point->gpc_mhz = p0_info->max_mhz; - } - change_input.clk[i].clk_freq_khz = (u32)vf_point->gpc_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - break; - case CTRL_CLK_DOMAIN_XBARCLK: - p0_info = pstate_get_clk_set_info(g, - CTRL_PERF_PSTATE_P0, CLKWHICH_XBARCLK); - if(p0_info == NULL){ - nvgpu_err(g, "failed to get XBARCLK P0 info"); - break; - } - max_ratio = (vf_point->xbar_mhz*100U)/vf_point->gpc_mhz; - if ( vf_point->xbar_mhz < p0_info->min_mhz ) { - vf_point->xbar_mhz = p0_info->min_mhz; - } - if (vf_point->xbar_mhz > p0_info->max_mhz) { - vf_point->xbar_mhz = p0_info->max_mhz; - } - change_input.clk[i].clk_freq_khz = (u32)vf_point->xbar_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - if (vf_point->gpc_mhz < vf_point->xbar_mhz) { - max_clkmhz = (((u32)vf_point->xbar_mhz * 100U) / (u32)max_ratio); - if (gpcclk_clkmhz < max_clkmhz) { - gpcclk_clkmhz = max_clkmhz; - } - } - break; - case CTRL_CLK_DOMAIN_SYSCLK: - p0_info = pstate_get_clk_set_info(g, - CTRL_PERF_PSTATE_P0, CLKWHICH_SYSCLK); - if(p0_info == NULL){ - nvgpu_err(g, "failed to get SYSCLK P0 info"); - break; - } - max_ratio = (vf_point->sys_mhz*100U)/vf_point->gpc_mhz; - if ( vf_point->sys_mhz < p0_info->min_mhz ) { - vf_point->sys_mhz = p0_info->min_mhz; - } - if (vf_point->sys_mhz > p0_info->max_mhz) { - vf_point->sys_mhz = p0_info->max_mhz; - } - change_input.clk[i].clk_freq_khz = (u32)vf_point->sys_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - if (vf_point->gpc_mhz < vf_point->sys_mhz) { - max_clkmhz = (((u32)vf_point->sys_mhz * 100U) / (u32)max_ratio); - if (gpcclk_clkmhz < max_clkmhz) { - gpcclk_clkmhz = max_clkmhz; - } - } - break; - case CTRL_CLK_DOMAIN_NVDCLK: - p0_info = pstate_get_clk_set_info(g, - CTRL_PERF_PSTATE_P0, CLKWHICH_NVDCLK); - if(p0_info == NULL){ - nvgpu_err(g, "failed to get NVDCLK P0 info"); - break; - } - max_ratio = (vf_point->nvd_mhz*100U)/vf_point->gpc_mhz; - if ( vf_point->nvd_mhz < p0_info->min_mhz ) { - vf_point->nvd_mhz = p0_info->min_mhz; - } - if (vf_point->nvd_mhz > p0_info->max_mhz) { - vf_point->nvd_mhz = p0_info->max_mhz; - } - change_input.clk[i].clk_freq_khz = (u32)vf_point->nvd_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - if (vf_point->gpc_mhz < vf_point->nvd_mhz) { - max_clkmhz = (((u32)vf_point->nvd_mhz * 100U) / (u32)max_ratio); - if (gpcclk_clkmhz < max_clkmhz) { - gpcclk_clkmhz = max_clkmhz; - } - } - break; - case CTRL_CLK_DOMAIN_HOSTCLK: - p0_info = pstate_get_clk_set_info(g, - CTRL_PERF_PSTATE_P0, CLKWHICH_HOSTCLK); - if(p0_info == NULL){ - nvgpu_err(g, "failed to get HOSTCLK P0 info"); - break; - } - max_ratio = (vf_point->host_mhz*100U)/vf_point->gpc_mhz; - if ( vf_point->host_mhz < p0_info->min_mhz ) { - vf_point->host_mhz = p0_info->min_mhz; - } - if (vf_point->host_mhz > p0_info->max_mhz) { - vf_point->host_mhz = p0_info->max_mhz; - } - change_input.clk[i].clk_freq_khz = (u32)vf_point->host_mhz * 1000U; - change_input.clk_domains_mask.super.data[0] |= (u32) BIT(i); - if (vf_point->gpc_mhz < vf_point->host_mhz) { - max_clkmhz = (((u32)vf_point->host_mhz * 100U) / (u32)max_ratio); - if (gpcclk_clkmhz < max_clkmhz) { - gpcclk_clkmhz = max_clkmhz; - } - } - break; - default: - nvgpu_pmu_dbg(g, "Fixed clock domain"); - break; - } - } - - change_input.pstate_index = 0U; - change_input.flags = (u32)CTRL_PERF_CHANGE_SEQ_CHANGE_FORCE; - change_input.vf_points_cache_counter = 0xFFFFFFFFU; - - status = clk_domain_freq_to_volt(g, gpcclk_domain, - &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); - - status = nvgpu_vfe_get_volt_margin_limit(g, &vmargin_uv); - if (status != 0) { - nvgpu_err(g, "Failed to fetch Vmargin status=0x%x", status); - return status; - } - - gpcclk_voltuv += vmargin_uv; - status = nvgpu_volt_get_vmin_ps35(g, &vmin_uv); - if (status != 0) { - nvgpu_err(g, "Failed to execute Vmin get_status status=0x%x", - status); - } - if ((status == 0) && (vmin_uv > gpcclk_voltuv)) { - gpcclk_voltuv = vmin_uv; - nvgpu_log_fn(g, "Vmin is higher than evaluated Volt"); - } - - change_input.volt[0].voltage_uv = gpcclk_voltuv; - change_input.volt[0].voltage_min_noise_unaware_uv = gpcclk_voltuv; - change_input.volt_rails_mask.super.data[0] = 1U; - - /* RPC to PMU to queue to execute change sequence request*/ - (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_perf_change_seq_queue_change )); - rpc.change = change_input; - rpc.change.pstate_index = 0; - PMU_RPC_EXECUTE_CPB(status, pmu, PERF, CHANGE_SEQ_QUEUE_CHANGE, &rpc, 0); - if (status != 0) { - nvgpu_err(g, "Failed to execute Change Seq RPC status=0x%x", - status); - } - - /* Wait for sync change to complete. */ - if ((rpc.change.flags & CTRL_PERF_CHANGE_SEQ_CHANGE_ASYNC) == 0U) { - nvgpu_msleep(20); - } - return status; -} - int nvgpu_clk_arb_find_slave_points(struct nvgpu_clk_arb *arb, struct nvgpu_clk_slave_freq *vf_point) { @@ -718,7 +564,7 @@ int nvgpu_clk_vf_point_cache(struct gk20a *g) u32 ver = g->params.gpu_arch + g->params.gpu_impl; nvgpu_log_info(g, " "); - pclk_vf_points = &g->clk_pmu->clk_vf_pointobjs; + pclk_vf_points = g->clk_pmu->clk_vf_pointobjs; pboardobjgrp = &pclk_vf_points->super.super; pboardobjgrpmask = &pclk_vf_points->super.mask.super; @@ -747,13 +593,14 @@ int nvgpu_clk_vf_point_cache(struct gk20a *g) } } else { - voltage_min_uv = g->clk_pmu->avfs_fllobjs.lut_min_voltage_uv; - voltage_step_size_uv = g->clk_pmu->avfs_fllobjs.lut_step_size_uv; + voltage_min_uv = g->clk_pmu->avfs_fllobjs->lut_min_voltage_uv; + voltage_step_size_uv = + g->clk_pmu->avfs_fllobjs->lut_step_size_uv; BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct boardobj*, pboardobj, index) { pclk_vf_point = (struct clk_vf_point *)(void *)pboardobj; gpcclk_voltuv = voltage_min_uv + index * voltage_step_size_uv; - status = clk_domain_volt_to_freq(g, 0, + status = nvgpu_clk_domain_volt_to_freq(g, 0, &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); if (status != 0) { nvgpu_err(g, "Failed to get freq for requested voltage"); @@ -765,3 +612,25 @@ int nvgpu_clk_vf_point_cache(struct gk20a *g) } return status; } + +int nvgpu_clk_vf_point_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->clk_vf_pointobjs != NULL) { + return 0; + } + + g->clk_pmu->clk_vf_pointobjs = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->clk_vf_pointobjs)); + if (g->clk_pmu->clk_vf_pointobjs == NULL) { + return -ENOMEM; + } + + return 0; +} + +void nvgpu_clk_vf_point_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->clk_vf_pointobjs); + g->clk_pmu->clk_vf_pointobjs = NULL; +} diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c index a21fddeb1..5b4730a27 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.c @@ -24,16 +24,40 @@ #include #include #include +#include #include #include #include #include +#include #include "gp106/bios_gp106.h" - -#include "clk.h" #include "clk_vin.h" +struct nvgpu_clk_vin_rpc_pmucmdhandler_params { + struct nv_pmu_clk_rpc *prpccall; + u32 success; +}; + +void nvgpu_clk_vin_rpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + struct nvgpu_clk_vin_rpc_pmucmdhandler_params *phandlerparams = + (struct nvgpu_clk_vin_rpc_pmucmdhandler_params *)param; + + nvgpu_log_info(g, " "); + + if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) { + nvgpu_err(g, "unsupported msg for CLK LOAD RPC %x", + msg->msg.clk.msg_type); + return; + } + + if (phandlerparams->prpccall->b_supported) { + phandlerparams->success = 1; + } +} + static int devinit_get_vin_device_table(struct gk20a *g, struct nvgpu_avfsvinobjs *pvinobjs); @@ -46,7 +70,8 @@ static int vin_device_construct_v20(struct gk20a *g, static int vin_device_construct_super(struct gk20a *g, struct boardobj **ppboardobj, size_t size, void *pargs); -static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs); +static struct nvgpu_vin_device *construct_vin_device( + struct gk20a *g, void *pargs); static int vin_device_init_pmudata_v10(struct gk20a *g, struct boardobj *board_obj_ptr, @@ -58,36 +83,14 @@ static int vin_device_init_pmudata_super(struct gk20a *g, struct boardobj *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); -int nvgpu_clk_avfs_get_vin_cal_fuse_v10(struct gk20a *g, - struct nvgpu_avfsvinobjs *pvinobjs, - struct vin_device_v20 *pvindev) +static struct nvgpu_vin_device *clk_get_vin_from_index( + struct nvgpu_avfsvinobjs *pvinobjs, u8 idx) { - int status = 0; - u32 slope, intercept; - u8 i; - - if (pvinobjs->calibration_rev_vbios == g->ops.fuse.read_vin_cal_fuse_rev(g)) { - BOARDOBJGRP_FOR_EACH(&(pvinobjs->super.super), - struct vin_device_v20 *, pvindev, i) { - slope = 0; - intercept = 0; - pvindev = (struct vin_device_v20 *)CLK_GET_VIN_DEVICE(pvinobjs, i); - status = g->ops.fuse.read_vin_cal_slope_intercept_fuse(g, - pvindev->super.id, &slope, &intercept); - if (status != 0) { - nvgpu_err(g, - "err reading vin cal for id %x", pvindev->super.id); - return status; - } - pvindev->data.vin_cal.cal_v10.slope = slope; - pvindev->data.vin_cal.cal_v10.intercept = intercept; - } - } - return status; - + return ((struct nvgpu_vin_device *)BOARDOBJGRP_OBJ_GET_BY_IDX( + ((struct boardobjgrp *)&(pvinobjs->super.super)), idx)); } -int nvgpu_clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, +static int nvgpu_clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, struct nvgpu_avfsvinobjs *pvinobjs, struct vin_device_v20 *pvindev) { @@ -95,12 +98,14 @@ int nvgpu_clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, s8 gain, offset; u8 i; - if (pvinobjs->calibration_rev_vbios == g->ops.fuse.read_vin_cal_fuse_rev(g)) { + if (pvinobjs->calibration_rev_vbios == + g->ops.fuse.read_vin_cal_fuse_rev(g)) { BOARDOBJGRP_FOR_EACH(&(pvinobjs->super.super), - struct vin_device_v20 *, pvindev, i) { + struct vin_device_v20 *, pvindev, i) { gain = 0; offset = 0; - pvindev = (struct vin_device_v20 *)CLK_GET_VIN_DEVICE(pvinobjs, i); + pvindev = (struct vin_device_v20 *)(void *) + g->clk_pmu->clk_get_vin(pvinobjs, i); status = g->ops.fuse.read_vin_cal_gain_offset_fuse(g, pvindev->super.id, &gain, &offset); if (status != 0) { @@ -188,7 +193,7 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) nvgpu_log_info(g, " "); - status = boardobjgrpconstruct_e32(g, &g->clk_pmu->avfs_vinobjs.super); + status = boardobjgrpconstruct_e32(g, &g->clk_pmu->avfs_vinobjs->super); if (status != 0) { nvgpu_err(g, "error creating boardobjgrp for clk vin, statu - 0x%x", @@ -196,8 +201,8 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) goto done; } - pboardobjgrp = &g->clk_pmu->avfs_vinobjs.super.super; - pvinobjs = &g->clk_pmu->avfs_vinobjs; + pboardobjgrp = &g->clk_pmu->avfs_vinobjs->super.super; + pvinobjs = g->clk_pmu->avfs_vinobjs; BOARDOBJGRP_PMU_CONSTRUCT(pboardobjgrp, CLK, VIN_DEVICE); @@ -214,16 +219,16 @@ int nvgpu_clk_vin_sw_setup(struct gk20a *g) pboardobjgrp->pmudatainstget = _clk_vin_devgrp_pmudata_instget; pboardobjgrp->pmustatusinstget = _clk_vin_devgrp_pmustatus_instget; - status = devinit_get_vin_device_table(g, &g->clk_pmu->avfs_vinobjs); + status = devinit_get_vin_device_table(g, g->clk_pmu->avfs_vinobjs); if (status != 0) { goto done; } /*update vin calibration to fuse */ - g->ops.pmu_ver.clk.clk_avfs_get_vin_cal_data(g, pvinobjs, pvindev); + nvgpu_clk_avfs_get_vin_cal_fuse_v20(g, pvinobjs, pvindev); status = BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT(g, - &g->clk_pmu->avfs_vinobjs.super.super, + &g->clk_pmu->avfs_vinobjs->super.super, clk, CLK, clk_vin_device, CLK_VIN_DEVICE); if (status != 0) { nvgpu_err(g, @@ -244,7 +249,7 @@ int nvgpu_clk_vin_pmu_setup(struct gk20a *g) nvgpu_log_info(g, " "); - pboardobjgrp = &g->clk_pmu->avfs_vinobjs.super.super; + pboardobjgrp = &g->clk_pmu->avfs_vinobjs->super.super; if (!pboardobjgrp->bconstructed) { return -EINVAL; @@ -267,12 +272,12 @@ static int devinit_get_vin_device_table(struct gk20a *g, u32 index = 0; u32 slope=0, intercept=0; s8 offset = 0, gain = 0; - struct vin_device *pvin_dev; + struct nvgpu_vin_device *pvin_dev; u32 cal_type; union { struct boardobj boardobj; - struct vin_device vin_device; + struct nvgpu_vin_device vin_device; struct vin_device_v10 vin_device_v10; struct vin_device_v20 vin_device_v20; } vin_device_data; @@ -332,7 +337,8 @@ static int devinit_get_vin_device_table(struct gk20a *g, nvgpu_memcpy((u8 *)&vin_desc_table_entry, vin_tbl_entry_ptr, sizeof(struct vin_descriptor_entry_10)); - if (vin_desc_table_entry.vin_device_type == CTRL_CLK_VIN_TYPE_DISABLED) { + if (vin_desc_table_entry.vin_device_type == + CTRL_CLK_VIN_TYPE_DISABLED) { continue; } @@ -438,8 +444,9 @@ static int vin_device_construct_super(struct gk20a *g, struct boardobj **ppboardobj, size_t size, void *pargs) { - struct vin_device *pvin_device; - struct vin_device *ptmpvin_device = (struct vin_device *)pargs; + struct nvgpu_vin_device *pvin_device; + struct nvgpu_vin_device *ptmpvin_device = + (struct nvgpu_vin_device *)pargs; int status = 0; status = boardobj_construct_super(g, ppboardobj, size, pargs); @@ -447,7 +454,7 @@ static int vin_device_construct_super(struct gk20a *g, return -EINVAL; } - pvin_device = (struct vin_device *)*ppboardobj; + pvin_device = (struct nvgpu_vin_device *)*ppboardobj; pvin_device->super.pmudatainit = vin_device_init_pmudata_super; @@ -459,7 +466,8 @@ static int vin_device_construct_super(struct gk20a *g, return status; } -static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs) +static struct nvgpu_vin_device *construct_vin_device( + struct gk20a *g, void *pargs) { struct boardobj *board_obj_ptr = NULL; int status; @@ -486,7 +494,7 @@ static struct vin_device *construct_vin_device(struct gk20a *g, void *pargs) nvgpu_log_info(g, " Done"); - return (struct vin_device *)board_obj_ptr; + return (struct nvgpu_vin_device *)board_obj_ptr; } @@ -553,7 +561,7 @@ static int vin_device_init_pmudata_super(struct gk20a *g, struct nv_pmu_boardobj *ppmudata) { int status = 0; - struct vin_device *pvin_dev; + struct nvgpu_vin_device *pvin_dev; struct nv_pmu_clk_clk_vin_device_boardobj_set *perf_pmu_data; nvgpu_log_info(g, " "); @@ -563,7 +571,7 @@ static int vin_device_init_pmudata_super(struct gk20a *g, return status; } - pvin_dev = (struct vin_device *)board_obj_ptr; + pvin_dev = (struct nvgpu_vin_device *)board_obj_ptr; perf_pmu_data = (struct nv_pmu_clk_clk_vin_device_boardobj_set *) ppmudata; @@ -575,3 +583,92 @@ static int vin_device_init_pmudata_super(struct gk20a *g, return status; } + +int nvgpu_clk_pmu_vin_load(struct gk20a *g) +{ + struct pmu_cmd cmd; + struct pmu_payload payload; + int status; + u32 seqdesc; + struct nv_pmu_clk_rpc rpccall; + struct nvgpu_clk_vin_rpc_pmucmdhandler_params handler; + struct nv_pmu_clk_load *clkload; + + (void) memset(&payload, 0, sizeof(struct pmu_payload)); + (void) memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); + (void) memset(&handler, 0, + sizeof(struct nvgpu_clk_vin_rpc_pmucmdhandler_params)); + + rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; + clkload = &rpccall.params.clk_load; + clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_VIN; + clkload->action_mask = + NV_NV_PMU_CLK_LOAD_ACTION_MASK_VIN_HW_CAL_PROGRAM_YES << 4; + + cmd.hdr.unit_id = PMU_UNIT_CLK; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + + (u32)sizeof(struct pmu_hdr); + + cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; + cmd.cmd.clk.generic.b_perf_daemon_cmd = false; + + payload.in.buf = (u8 *)&rpccall; + payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + nvgpu_assert(NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET < U64(U32_MAX)); + payload.in.offset = (u32)NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; + + payload.out.buf = (u8 *)&rpccall; + payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + nvgpu_assert(NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET < U64(U32_MAX)); + payload.out.offset = (u32)NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; + + handler.prpccall = &rpccall; + handler.success = 0; + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + nvgpu_clk_vin_rpc_pmucmdhandler, (void *)&handler, + &seqdesc); + + if (status != 0) { + nvgpu_err(g, "unable to post clk RPC cmd %x", + cmd.cmd.clk.cmd_type); + goto done; + } + + pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g), + &handler.success, 1); + + if (handler.success == 0U) { + nvgpu_err(g, "rpc call to load vin cal failed"); + status = -EINVAL; + } + +done: + return status; +} + +int nvgpu_clk_vin_init_pmupstate(struct gk20a *g) +{ + /* If already allocated, do not re-allocate */ + if (g->clk_pmu->avfs_vinobjs != NULL) { + return 0; + } + + g->clk_pmu->avfs_vinobjs = nvgpu_kzalloc(g, + sizeof(*g->clk_pmu->avfs_vinobjs)); + if (g->clk_pmu->avfs_vinobjs == NULL) { + return -ENOMEM; + } + + g->clk_pmu->clk_get_vin = clk_get_vin_from_index; + + return 0; +} + +void nvgpu_clk_vin_free_pmupstate(struct gk20a *g) +{ + nvgpu_kfree(g, g->clk_pmu->avfs_vinobjs); + g->clk_pmu->avfs_vinobjs = NULL; +} diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.h b/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.h index 037a32a20..e3ee7b526 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.h +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_vin.h @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,41 +26,19 @@ #include #include #include - -struct vin_device; -struct nvgpu_clk_pmupstate; - -typedef u32 vin_device_state_load(struct gk20a *g, - struct nvgpu_clk_pmupstate *clk, struct vin_device *pdev); - -struct vin_device { - struct boardobj super; - u8 id; - u8 volt_domain; - u8 volt_domain_vbios; - u32 flls_shared_mask; - - vin_device_state_load *state_load; -}; +#include struct vin_device_v10 { - struct vin_device super; + struct nvgpu_vin_device super; struct ctrl_clk_vin_device_info_data_v10 data; }; struct vin_device_v20 { - struct vin_device super; + struct nvgpu_vin_device super; struct ctrl_clk_vin_device_info_data_v20 data; }; -/* get vin device object from descriptor table index*/ -#define CLK_GET_VIN_DEVICE(pvinobjs, dev_index) \ - ((struct vin_device *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ - ((struct boardobjgrp *)&(pvinobjs->super.super)), (dev_index))) - -int construct_vindevice(struct gk20a *g, struct boardobj **ppboardobj, - u16 size, void *args); -int vindeviceinit_pmudata_super(struct gk20a *g, struct boardobj *pboardobj, - struct nv_pmu_boardobj *pmudata); +void nvgpu_clk_vin_rpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status); #endif /* NVGPU_CLK_VIN_H */ diff --git a/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c b/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c index 37bf6d823..442eaa547 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "pmu_perf.h" @@ -127,7 +128,7 @@ static void build_change_seq_boot (struct gk20a *g) script_last->buf.change.data.flags = CTRL_PERF_CHANGE_SEQ_CHANGE_NONE; - BOARDOBJGRP_FOR_EACH(&(g->clk_pmu->clk_domainobjs.super.super), + BOARDOBJGRP_FOR_EACH(&(g->clk_pmu->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pdomain, i) { p0_info = pstate_get_clk_set_info(g, CTRL_PERF_PSTATE_P0, diff --git a/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c b/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c index c15c67c77..9a2ba6fbc 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/perf_gv100.c @@ -58,7 +58,6 @@ static int pmu_set_boot_clk_runcb_fn(void *arg) nvgpu_err(g, "Failed to execute RPC status=0x%x", status); } - status = nvgpu_clk_set_fll_clk_gv10x(g); } return 0; diff --git a/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c b/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c index 15d1b59c3..6dd9cef85 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/vfe_equ.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "pmu_perf.h" #include "vfe_equ.h" @@ -773,7 +774,7 @@ int nvgpu_vfe_get_freq_margin_limit(struct gk20a *g, u32 *fmargin_mhz) struct nv_pmu_rpc_struct_perf_vfe_eval rpc; int status = 0; u8 fmargin_idx; - struct nvgpu_avfsfllobjs *pfllobjs = &(g->clk_pmu->avfs_fllobjs); + struct nvgpu_avfsfllobjs *pfllobjs = g->clk_pmu->avfs_fllobjs; fmargin_idx = pfllobjs->freq_margin_vfe_idx; if (fmargin_idx == 255U) { diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 80fd33970..c954ae909 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c @@ -34,7 +34,6 @@ #include #include #include -#include /* PMU NS UCODE IMG */ #define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin" @@ -1337,15 +1336,8 @@ static int init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu, u32 app_version) boardobjgrp_pmugetstatus_impl_v1; g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid = is_boardobjgrp_pmucmd_id_valid_v1; - g->ops.pmu_ver.clk.get_vbios_clk_domain = - nvgpu_clk_get_vbios_clk_domain_gv10x; - g->ops.pmu_ver.clk.clk_avfs_get_vin_cal_data = - nvgpu_clk_avfs_get_vin_cal_fuse_v20; - g->ops.pmu_ver.clk.clk_vf_change_inject_data_fill = - nvgpu_clk_vf_change_inject_data_fill_gv10x; if (app_version == APP_VERSION_GV10X) { - g->ops.pmu_ver.clk.clk_set_boot_clk = - nvgpu_clk_set_boot_fll_clk_gv10x; + g->ops.pmu_ver.clk.clk_set_boot_clk = NULL; } else { g->ops.pmu_ver.clk.clk_set_boot_clk = NULL; } @@ -1508,12 +1500,6 @@ static int init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu, u32 app_version) boardobjgrp_pmugetstatus_impl; g->ops.pmu_ver.boardobj.is_boardobjgrp_pmucmd_id_valid = is_boardobjgrp_pmucmd_id_valid_v0; - g->ops.pmu_ver.clk.get_vbios_clk_domain = - nvgpu_clk_get_vbios_clk_domain_gp10x; - g->ops.pmu_ver.clk.clk_avfs_get_vin_cal_data = - nvgpu_clk_avfs_get_vin_cal_fuse_v10; - g->ops.pmu_ver.clk.clk_vf_change_inject_data_fill = - nvgpu_clk_vf_change_inject_data_fill_gp10x; break; case APP_VERSION_GM20B: g->ops.pmu_ver.pg_cmd_eng_buf_load_size = diff --git a/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c b/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c index b57b93398..07a328368 100644 --- a/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c +++ b/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c @@ -25,7 +25,18 @@ #include #include #include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -40,6 +51,13 @@ void gk20a_deinit_pstate_support(struct gk20a *g) pmgr_pmu_free_pmupstate(g); nvgpu_therm_pmu_free_pmupstate(g); nvgpu_perf_pmu_free_pmupstate(g); + nvgpu_clk_domain_free_pmupstate(g); + nvgpu_clk_prog_free_pmupstate(g); + nvgpu_clk_vf_point_free_pmupstate(g); + nvgpu_clk_freq_domain_free_pmupstate(g); + nvgpu_clk_freq_controller_free_pmupstate(g); + nvgpu_clk_fll_free_pmupstate(g); + nvgpu_clk_vin_free_pmupstate(g); nvgpu_clk_free_pmupstate(g); if (g->ops.clk.mclk_deinit != NULL) { @@ -65,6 +83,41 @@ int gk20a_init_pstate_support(struct gk20a *g) return err; } + err = nvgpu_clk_domain_init_pmupstate(g); + if (err != 0) { + return err; + } + + err = nvgpu_clk_prog_init_pmupstate(g); + if (err != 0) { + return err; + } + + err = nvgpu_clk_vf_point_init_pmupstate(g); + if (err != 0) { + return err; + } + + err = nvgpu_clk_freq_domain_init_pmupstate(g); + if (err != 0) { + return err; + } + + err = nvgpu_clk_freq_controller_init_pmupstate(g); + if (err != 0) { + return err; + } + + err = nvgpu_clk_vin_init_pmupstate(g); + if (err != 0) { + return err; + } + + err = nvgpu_clk_fll_init_pmupstate(g); + if (err != 0) { + return err; + } + err = nvgpu_perf_pmu_init_pmupstate(g); if (err != 0) { goto err_clk_init_pmupstate; @@ -410,7 +463,7 @@ static int parse_pstate_entry_5x(struct gk20a *g, struct nvgpu_clk_domain *clk_domain; clk_domain = (struct nvgpu_clk_domain *)BOARDOBJGRP_OBJ_GET_BY_IDX( - &g->clk_pmu->clk_domainobjs.super.super, clkidx); + &g->clk_pmu->clk_domainobjs->super.super, clkidx); pclksetinfo = &pstate->clklist.clksetinfo[clkidx]; clk_entry = (struct vbios_pstate_entry_clock_5x *)p; diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c index be576fe25..fbf336910 100644 --- a/drivers/gpu/nvgpu/gp106/clk_gp106.c +++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c @@ -1,7 +1,7 @@ /* * GP106 Clocks * - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,13 +30,12 @@ #include #include #include - -#include "common/pmu/clk/clk.h" +#include +#include +#include #include "clk_gp106.h" -#include - #define CLK_NAMEMAP_INDEX_GPC2CLK 0x00U #define CLK_NAMEMAP_INDEX_XBAR2CLK 0x02U #define CLK_NAMEMAP_INDEX_SYS2CLK 0x07U /* SYSPLL */ @@ -268,7 +267,7 @@ int gp106_clk_domain_get_f_points( return -EINVAL; } - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), + BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pdomain, i) { if (pdomain->api_domain == clkapidomain) { status = pdomain->clkdomainclkgetfpoints(g, pclk, diff --git a/drivers/gpu/nvgpu/gv100/clk_gv100.c b/drivers/gpu/nvgpu/gv100/clk_gv100.c index 2b1c838fd..73618e8d4 100644 --- a/drivers/gpu/nvgpu/gv100/clk_gv100.c +++ b/drivers/gpu/nvgpu/gv100/clk_gv100.c @@ -1,7 +1,7 @@ /* * GV100 Clocks * - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,10 +35,12 @@ #include #include #include +#include +#include #include "clk_gv100.h" -#include + #define CLK_NAMEMAP_INDEX_GPCCLK 0x00 #define CLK_NAMEMAP_INDEX_XBARCLK 0x02 @@ -220,7 +222,7 @@ int gv100_clk_domain_get_f_points( if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0U)) { return -EINVAL; } - BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super), + BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs->super.super), struct nvgpu_clk_domain *, pdomain, i) { if (pdomain->api_domain == clkapidomain) { status = pdomain->clkdomainclkgetfpoints(g, pclk, diff --git a/drivers/gpu/nvgpu/include/nvgpu/bios.h b/drivers/gpu/nvgpu/include/nvgpu/bios.h index 94c776f23..699f6b92f 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/bios.h +++ b/drivers/gpu/nvgpu/include/nvgpu/bios.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -615,7 +615,8 @@ struct vbios_pstate_entry_5x { u8 pstate_level; u8 flags0; u8 lpwr_entry_idx; - struct vbios_pstate_entry_clock_5x clockEntry[PERF_CLK_DOMAINS_IDX_MAX]; + struct vbios_pstate_entry_clock_5x + nvgpu_clockEntry[PERF_CLK_DOMAINS_IDX_MAX]; } __packed; #define VBIOS_PSTATE_5X_CLOCK_PROG_PARAM0_NOM_FREQ_MHZ_SHIFT 0U diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 7bce6eda1..53e7a48aa 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -1217,13 +1217,6 @@ struct gpu_ops { struct boardobjgrp_pmu_cmd *cmd); } boardobj; struct { - u32 (*get_vbios_clk_domain)(u32 vbios_domain); - int (*clk_avfs_get_vin_cal_data)(struct gk20a *g, - struct nvgpu_avfsvinobjs *pvinobjs, - struct vin_device_v20 *pvindev); - u32 (*clk_vf_change_inject_data_fill)(struct gk20a *g, - struct nv_pmu_clk_rpc *rpccall, - struct nvgpu_set_fll_clk *setfllclk); int (*clk_set_boot_clk)(struct gk20a *g); }clk; } pmu_ver; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk.h index d4efb7f46..1dc503b94 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk.h @@ -25,33 +25,95 @@ #ifndef NVGPU_PMU_CLK_H #define NVGPU_PMU_CLK_H -#include -#include -#include -#include -#include -#include -#include -#include +#include struct gk20a; +struct nvgpu_avfsfllobjs; +struct nvgpu_clk_domains; +struct nvgpu_clk_progs; +struct nvgpu_clk_vf_points; +struct nvgpu_clk_mclk_state; +struct nvgpu_clk_freq_controllers; +struct nvgpu_clk_freq_domain_grp; +struct nvgpu_clk_slave_freq; +struct ctrl_perf_change_seq_change_input; -struct nvgpu_clk_pmupstate { - struct nvgpu_avfsvinobjs avfs_vinobjs; - struct nvgpu_avfsfllobjs avfs_fllobjs; - struct nvgpu_clk_domains clk_domainobjs; - struct nvgpu_clk_progs clk_progobjs; - struct nvgpu_clk_vf_points clk_vf_pointobjs; - struct nvgpu_clk_mclk_state clk_mclk; - struct nvgpu_clk_freq_controllers clk_freq_controllers; - struct nvgpu_clk_freq_domain_grp freq_domain_grp_objs; +struct nvgpu_clkrpc_pmucmdhandler_params { + struct nv_pmu_clk_rpc *prpccall; + u32 success; }; +struct nvgpu_clockentry { + u8 vbios_clk_domain; + u8 clk_which; + u8 perf_index; + u32 api_clk_domain; +}; + +struct nvgpu_set_fll_clk { + u32 voltuv; + u16 gpc2clkmhz; + u8 current_regime_id_gpc; + u8 target_regime_id_gpc; + u16 sys2clkmhz; + u8 current_regime_id_sys; + u8 target_regime_id_sys; + u16 xbar2clkmhz; + u8 current_regime_id_xbar; + u8 target_regime_id_xbar; + u16 nvdclkmhz; + u8 current_regime_id_nvd; + u8 target_regime_id_nvd; + u16 hostclkmhz; + u8 current_regime_id_host; + u8 target_regime_id_host; +}; + +struct nvgpu_clk_pmupstate { + struct nvgpu_avfsvinobjs *avfs_vinobjs; + struct nvgpu_avfsfllobjs *avfs_fllobjs; + struct nvgpu_clk_domains *clk_domainobjs; + struct nvgpu_clk_progs *clk_progobjs; + struct nvgpu_clk_vf_points *clk_vf_pointobjs; + struct nvgpu_clk_freq_controllers *clk_freq_controllers; + struct nvgpu_clk_freq_domain_grp *freq_domain_grp_objs; + + /* clk_domain unit functions */ + int (*get_fll)(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk); + int (*set_boot_fll)(struct gk20a *g); + void (*set_p0_clks)(struct gk20a *g, u8 *gpcclk_domain, + u32 *gpcclk_clkmhz, struct nvgpu_clk_slave_freq *vf_point, + struct ctrl_perf_change_seq_change_input *change_input); + struct nvgpu_clk_domain *(*clk_get_clk_domain) + (struct nvgpu_clk_pmupstate *pclk, u8 idx); + int (*clk_domain_clk_prog_link)(struct gk20a *g, + struct nvgpu_clk_pmupstate *pclk); + + /* clk_vin unit functions */ + struct nvgpu_vin_device *(*clk_get_vin) + (struct nvgpu_avfsvinobjs *pvinobjs, u8 idx); + + /* clk_fll unit functions */ + u8 (*find_regime_id)(struct gk20a *g, u32 domain, u16 clkmhz); + int (*set_regime_id)(struct gk20a *g, u32 domain, u8 regimeid); + int (*get_regime_id)(struct gk20a *g, u32 domain, u8 *regimeid); + u8 (*get_fll_lut_vf_num_entries)(struct nvgpu_clk_pmupstate *pclk); + u32 (*get_fll_lut_min_volt)(struct nvgpu_clk_pmupstate *pclk); + u32 (*get_fll_lut_step_size)(struct nvgpu_clk_pmupstate *pclk); +}; + +void nvgpu_clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status); int nvgpu_clk_init_pmupstate(struct gk20a *g); void nvgpu_clk_free_pmupstate(struct gk20a *g); -int nvgpu_clk_set_fll_clk_gv10x(struct gk20a *g); -int nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g); int nvgpu_clk_set_boot_fll_clk_tu10x(struct gk20a *g); -int nvgpu_clk_get_fll_clks(struct gk20a *g, struct nvgpu_set_fll_clk *setfllclk); +int nvgpu_clk_get_fll_clks(struct gk20a *g, + struct nvgpu_set_fll_clk *setfllclk); +int nvgpu_clk_set_fll_clks(struct gk20a *g, + struct nvgpu_set_fll_clk *setfllclk); +int nvgpu_clk_domain_freq_to_volt(struct gk20a *g, u8 clkdomain_idx, + u32 *pclkmhz, u32 *pvoltuv, u8 railidx); +int nvgpu_clk_set_req_fll_clk_ps35(struct gk20a *g, + struct nvgpu_clk_slave_freq *vf_point); #endif /* NVGPU_PMU_CLK_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_domain.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_domain.h index d9f8ea6a9..7aef57956 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_domain.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_domain.h @@ -25,25 +25,24 @@ #ifndef NVGPU_PMU_CLK_DOMAIN_H #define NVGPU_PMU_CLK_DOMAIN_H -#include -#include #include -#include -#include struct gk20a; struct nvgpu_clk_domain; +struct nvgpu_clk_slave_freq; +struct ctrl_perf_change_seq_change_input; +struct nvgpu_clk_pmupstate; typedef int nvgpu_clkproglink(struct gk20a *g, struct nvgpu_clk_pmupstate *pclk, - struct nvgpu_clk_domain *pdomain); + struct nvgpu_clk_domain *pdomain); typedef int nvgpu_clkvfsearch(struct gk20a *g, struct nvgpu_clk_pmupstate *pclk, - struct nvgpu_clk_domain *pdomain, u16 *clkmhz, - u32 *voltuv, u8 rail); + struct nvgpu_clk_domain *pdomain, u16 *clkmhz, + u32 *voltuv, u8 rail); -typedef int nvgpu_clkgetfpoints(struct gk20a *g, struct nvgpu_clk_pmupstate *pclk, - struct nvgpu_clk_domain *pdomain, u32 *pfpointscount, - u16 *pfreqpointsinmhz, u8 rail); +typedef int nvgpu_clkgetfpoints(struct gk20a *g, + struct nvgpu_clk_pmupstate *pclk, struct nvgpu_clk_domain *pdomain, + u32 *pfpointscount, u16 *pfreqpointsinmhz, u8 rail); struct nvgpu_clk_domain { struct boardobj super; @@ -73,14 +72,16 @@ struct nvgpu_clk_domains { struct boardobjgrpmask_e32 master_domains_mask; struct ctrl_clk_clk_delta deltas; - struct nvgpu_clk_domain *ordered_noise_aware_list[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; + struct nvgpu_clk_domain + *ordered_noise_aware_list[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; - struct nvgpu_clk_domain *ordered_noise_unaware_list[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; + struct nvgpu_clk_domain + *ordered_noise_unaware_list[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS]; }; +int nvgpu_clk_domain_init_pmupstate(struct gk20a *g); +void nvgpu_clk_domain_free_pmupstate(struct gk20a *g); int nvgpu_clk_pmu_clk_domains_load(struct gk20a *g); -u32 nvgpu_clk_get_vbios_clk_domain_gv10x( u32 vbios_domain); -u32 nvgpu_clk_get_vbios_clk_domain_gp10x( u32 vbios_domain); int nvgpu_clk_domain_sw_setup(struct gk20a *g); int nvgpu_clk_domain_pmu_setup(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_fll.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_fll.h index 28601813e..fbd3b6428 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_fll.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_fll.h @@ -25,11 +25,14 @@ #ifndef NVGPU_PMU_CLK_FLL_H #define NVGPU_PMU_CLK_FLL_H -#include -#include #include struct gk20a; +struct fll_device; +struct boardobjgrp_e32; +struct boardobjgrpmask_e32; +struct nv_pmu_clk_lut_device_desc; +struct nv_pmu_clk_regime_desc; struct nvgpu_avfsfllobjs { struct boardobjgrp_e32 super; @@ -41,25 +44,35 @@ struct nvgpu_avfsfllobjs { u8 freq_margin_vfe_idx; }; -struct nvgpu_set_fll_clk { - u32 voltuv; - u16 gpc2clkmhz; - u8 current_regime_id_gpc; - u8 target_regime_id_gpc; - u16 sys2clkmhz; - u8 current_regime_id_sys; - u8 target_regime_id_sys; - u16 xbar2clkmhz; - u8 current_regime_id_xbar; - u8 target_regime_id_xbar; - u16 nvdclkmhz; - u8 current_regime_id_nvd; - u8 target_regime_id_nvd; - u16 hostclkmhz; - u8 current_regime_id_host; - u8 target_regime_id_host; +typedef int fll_lut_broadcast_slave_register(struct gk20a *g, + struct nvgpu_avfsfllobjs *pfllobjs, + struct fll_device *pfll, + struct fll_device *pfll_slave); + +struct fll_device { + struct boardobj super; + u8 id; + u8 mdiv; + u16 input_freq_mhz; + u32 clk_domain; + u8 vin_idx_logic; + u8 vin_idx_sram; + u8 rail_idx_for_lut; + struct nv_pmu_clk_lut_device_desc lut_device; + struct nv_pmu_clk_regime_desc regime_desc; + u8 min_freq_vfe_idx; + u8 freq_ctrl_idx; + u8 target_regime_id_override; + bool b_skip_pldiv_below_dvco_min; + bool b_dvco_1x; + struct boardobjgrpmask_e32 lut_prog_broadcast_slave_mask; + fll_lut_broadcast_slave_register *lut_broadcast_slave_register; }; + +int nvgpu_clk_fll_init_pmupstate(struct gk20a *g); +void nvgpu_clk_fll_free_pmupstate(struct gk20a *g); +u32 nvgpu_clk_get_vbios_clk_domain(u32 vbios_domain); int nvgpu_clk_fll_sw_setup(struct gk20a *g); int nvgpu_clk_fll_pmu_setup(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_controller.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_controller.h index 4603d3952..99036ef0c 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_controller.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_controller.h @@ -25,11 +25,11 @@ #ifndef NVGPU_PMU_CLK_FREQ_CONTROLLER_H #define NVGPU_PMU_CLK_FREQ_CONTROLLER_H -#include -#include #include struct gk20a; +struct boardobjgrp_e32; +struct boardobjgrpmask_e32; struct nvgpu_clk_freq_controllers { struct boardobjgrp_e32 super; @@ -39,6 +39,9 @@ struct nvgpu_clk_freq_controllers { void *pprereq_load; }; +int nvgpu_clk_freq_controller_init_pmupstate(struct gk20a *g); +void nvgpu_clk_freq_controller_free_pmupstate(struct gk20a *g); +int nvgpu_clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx); int nvgpu_clk_freq_controller_sw_setup(struct gk20a *g); int nvgpu_clk_freq_controller_pmu_setup(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_domain.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_domain.h index 2d8ff5d01..b07e6fe6d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_domain.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_freq_domain.h @@ -25,14 +25,24 @@ #ifndef NVGPU_PMU_CLK_FREQ_DOMAIN_H #define NVGPU_PMU_CLK_FREQ_DOMAIN_H -#include #include +struct gk20a; +struct boardobj; +struct boardobjgrp_e32; + struct nvgpu_clk_freq_domain_grp { struct boardobjgrp_e32 super; u32 init_flags; }; +struct nvgpu_clk_freq_domain { + struct boardobj super; + u32 clk_domain; +}; + +int nvgpu_clk_freq_domain_init_pmupstate(struct gk20a *g); +void nvgpu_clk_freq_domain_free_pmupstate(struct gk20a *g); int nvgpu_clk_freq_domain_sw_setup(struct gk20a *g); int nvgpu_clk_freq_domain_pmu_setup(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_mclk.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_mclk.h deleted file mode 100644 index 8cf7f63e5..000000000 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_mclk.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * general clock structures & definitions - * - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef NVGPU_PMU_CLK_MCLK_H_ -#define NVGPU_PMU_CLK_MCLK_H_ - -#include - -struct nvgpu_clk_mclk_state { - u32 speed; - struct nvgpu_mutex mclk_lock; - struct nvgpu_mutex data_lock; - u16 p5_min; - u16 p0_min; - void *vreg_buf; - bool init; - s64 switch_max; - s64 switch_min; - u64 switch_num; - s64 switch_avg; - s64 switch_std; - bool debugfs_set; -}; - -#endif /* NVGPU_PMU_CLK_MCLK_H_ */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_prog.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_prog.h index 22f55d0fd..890231221 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_prog.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_prog.h @@ -25,10 +25,10 @@ #ifndef NVGPU_PMU_CLK_PROG_H #define NVGPU_PMU_CLK_PROG_H -#include #include struct gk20a; +struct boardobjgrp_e255; struct nvgpu_clk_progs { struct boardobjgrp_e255 super; @@ -37,6 +37,8 @@ struct nvgpu_clk_progs { u8 vf_sec_entry_count; }; +int nvgpu_clk_prog_init_pmupstate(struct gk20a *g); +void nvgpu_clk_prog_free_pmupstate(struct gk20a *g); int nvgpu_clk_prog_sw_setup(struct gk20a *g); int nvgpu_clk_prog_pmu_setup(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vf_point.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vf_point.h index bf195cbcc..a69980025 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vf_point.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vf_point.h @@ -25,12 +25,12 @@ #ifndef NVGPU_PMU_CLK_VF_POINT_H #define NVGPU_PMU_CLK_VF_POINT_H -#include -#include #include -#include struct gk20a; +struct boardobjgrp_e255; +struct ctrl_clk_vf_pair; +struct ctrl_clk_freq_delta; struct nvgpu_clk_vf_points { struct boardobjgrp_e255 super; @@ -64,7 +64,7 @@ struct clk_vf_point_freq { #define CLK_CLK_VF_POINT_GET(pclk, idx) \ ((struct clk_vf_point *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ - &pclk->clk_vf_pointobjs.super.super, (u8)(idx))) + &pclk->clk_vf_pointobjs->super.super, (u8)(idx))) #define clkvfpointpairget(pvfpoint) \ (&((pvfpoint)->pair)) @@ -86,20 +86,16 @@ struct clk_vf_point_freq { #define clkvfpointvoltageuvget(pgpu, pvfpoint) \ CTRL_CLK_VF_PAIR_VOLTAGE_UV_GET(clkvfpointpairget(pvfpoint)) \ -u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g, - struct nv_pmu_clk_rpc *rpccall, - struct nvgpu_set_fll_clk *setfllclk); -u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g, - struct nv_pmu_clk_rpc *rpccall, - struct nvgpu_set_fll_clk *setfllclk); +int nvgpu_clk_vf_point_init_pmupstate(struct gk20a *g); +void nvgpu_clk_vf_point_free_pmupstate(struct gk20a *g); int nvgpu_clk_vf_point_sw_setup(struct gk20a *g); int nvgpu_clk_vf_point_pmu_setup(struct gk20a *g); struct clk_vf_point *nvgpu_construct_clk_vf_point(struct gk20a *g, void *pargs); -int nvgpu_clk_set_req_fll_clk_ps35(struct gk20a *g, - struct nvgpu_clk_slave_freq *vf_point); int nvgpu_clk_arb_find_slave_points(struct nvgpu_clk_arb *arb, struct nvgpu_clk_slave_freq *vf_point); int nvgpu_clk_vf_point_cache(struct gk20a *g); +int nvgpu_clk_domain_volt_to_freq(struct gk20a *g, u8 clkdomain_idx, + u32 *pclkmhz, u32 *pvoltuv, u8 railidx); #endif /* NVGPU_PMU_CLK_VF_POINT_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vin.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vin.h index 0253388e0..556196e8d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vin.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/clk/clk_vin.h @@ -25,11 +25,26 @@ #ifndef NVGPU_PMU_CLK_VIN_H #define NVGPU_PMU_CLK_VIN_H -#include #include struct gk20a; -struct vin_device_v20; +struct nvgpu_vin_device; +struct nvgpu_clk_pmupstate; +struct boardobj; +struct boardobjgrp_e32; + +typedef u32 vin_device_state_load(struct gk20a *g, + struct nvgpu_clk_pmupstate *clk, struct nvgpu_vin_device *pdev); + +struct nvgpu_vin_device { + struct boardobj super; + u8 id; + u8 volt_domain; + u8 volt_domain_vbios; + u32 flls_shared_mask; + + vin_device_state_load *state_load; +}; struct nvgpu_avfsvinobjs { struct boardobjgrp_e32 super; @@ -38,14 +53,10 @@ struct nvgpu_avfsvinobjs { bool vin_is_disable_allowed; }; +int nvgpu_clk_vin_init_pmupstate(struct gk20a *g); +void nvgpu_clk_vin_free_pmupstate(struct gk20a *g); int nvgpu_clk_pmu_vin_load(struct gk20a *g); int nvgpu_clk_vin_sw_setup(struct gk20a *g); int nvgpu_clk_vin_pmu_setup(struct gk20a *g); -int nvgpu_clk_avfs_get_vin_cal_fuse_v10(struct gk20a *g, - struct nvgpu_avfsvinobjs *pvinobjs, - struct vin_device_v20 *pvindev); -int nvgpu_clk_avfs_get_vin_cal_fuse_v20(struct gk20a *g, - struct nvgpu_avfsvinobjs *pvinobjs, - struct vin_device_v20 *pvindev); #endif /* NVGPU_PMU_CLK_VIN_H */ diff --git a/drivers/gpu/nvgpu/os/linux/debug_clk_gp106.c b/drivers/gpu/nvgpu/os/linux/debug_clk_gp106.c index 0a9cec1f1..67f304419 100644 --- a/drivers/gpu/nvgpu/os/linux/debug_clk_gp106.c +++ b/drivers/gpu/nvgpu/os/linux/debug_clk_gp106.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2018-2019, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -16,11 +16,16 @@ #include -#include -#include - #include "os_linux.h" -#include "common/pmu/clk/clk.h" + +#include +#include +#include +#include +#include +#include + +#include "common/pmu/clk/clk_freq_controller.h" void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock); @@ -43,7 +48,7 @@ static int sys_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS); /* val = 1 implies CLFC is loaded or enabled */ @@ -58,7 +63,7 @@ static int sys_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -70,7 +75,7 @@ static int ltc_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC); /* val = 1 implies CLFC is loaded or enabled */ @@ -85,7 +90,7 @@ static int ltc_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -97,7 +102,7 @@ static int xbar_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR); /* val = 1 implies CLFC is loaded or enabled */ @@ -112,7 +117,7 @@ static int xbar_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -125,7 +130,7 @@ static int gpc_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0); /* val = 1 implies CLFC is loaded or enabled */ @@ -140,7 +145,7 @@ static int gpc_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0); nvgpu_clk_arb_pstate_change_lock(g, false); diff --git a/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c b/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c index 5b3082918..6b9176798 100644 --- a/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c +++ b/drivers/gpu/nvgpu/os/linux/debug_clk_gv100.c @@ -20,8 +20,16 @@ #include "os_linux.h" #include -#include "common/pmu/clk/clk.h" +#include +#include +#include +#include +#include +#include +#include + #include "gv100/clk_gv100.h" +#include "common/pmu/clk/clk_freq_controller.h" void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock); @@ -44,7 +52,7 @@ static int sys_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS); /* val = 1 implies CLFC is loaded or enabled */ @@ -59,7 +67,7 @@ static int sys_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -71,7 +79,7 @@ static int ltc_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC); /* val = 1 implies CLFC is loaded or enabled */ @@ -86,7 +94,7 @@ static int ltc_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -98,7 +106,7 @@ static int xbar_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR); /* val = 1 implies CLFC is loaded or enabled */ @@ -113,7 +121,7 @@ static int xbar_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -126,7 +134,7 @@ static int gpc_cfc_read(void *data , u64 *val) { struct gk20a *g = (struct gk20a *)data; bool bload = boardobjgrpmask_bitget( - &g->clk_pmu->clk_freq_controllers.freq_ctrl_load_mask.super, + &g->clk_pmu->clk_freq_controllers->freq_ctrl_load_mask.super, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0); /* val = 1 implies CLFC is loaded or enabled */ @@ -141,7 +149,7 @@ static int gpc_cfc_write(void *data , u64 val) bool bload = val ? true : false; nvgpu_clk_arb_pstate_change_lock(g, true); - status = clk_pmu_freq_controller_load(g, bload, + status = nvgpu_clk_pmu_freq_controller_load(g, bload, CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0); nvgpu_clk_arb_pstate_change_lock(g, false); @@ -157,12 +165,12 @@ static int vftable_show(struct seq_file *s, void *unused) u32 voltage_min_uv, voltage_step_size_uv; u32 gpcclk_clkmhz = 0, gpcclk_voltuv = 0; - voltage_min_uv = g->clk_pmu->avfs_fllobjs.lut_min_voltage_uv; - voltage_step_size_uv = g->clk_pmu->avfs_fllobjs.lut_step_size_uv; + voltage_min_uv = g->clk_pmu->avfs_fllobjs->lut_min_voltage_uv; + voltage_step_size_uv = g->clk_pmu->avfs_fllobjs->lut_step_size_uv; for (index = 0; index < CTRL_CLK_LUT_NUM_ENTRIES_GV10x; index++) { gpcclk_voltuv = voltage_min_uv + index * voltage_step_size_uv; - status = clk_domain_volt_to_freq(g, 0, &gpcclk_clkmhz, + status = nvgpu_clk_domain_volt_to_freq(g, 0, &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); if (status != 0) {