gpu: nvgpu: effective freq load changes

Read clk frequency through PMU RPC

Bug 200399373

Change-Id: I9e887dcb1c5b622110eb4c1584f2f34434efd674
Signed-off-by: Vaikundanathan S <vaikuns@nvidia.com>
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1701276
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vaikundanathan S
2018-04-24 11:32:43 +05:30
committed by Tejal Kudav
parent 0aa8d6e273
commit 8a4e694530
4 changed files with 157 additions and 2 deletions

View File

@@ -55,6 +55,137 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
phandlerparams->success = 1;
}
int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload)
{
struct pmu_cmd cmd;
struct pmu_msg msg;
struct pmu_payload payload;
u32 status;
u32 seqdesc;
struct nv_pmu_clk_rpc rpccall;
struct clkrpc_pmucmdhandler_params handler;
struct nv_pmu_clk_load *clkload;
memset(&payload, 0, sizeof(struct pmu_payload));
memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
memset(&cmd, 0, sizeof(struct pmu_cmd));
rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
clkload = &rpccall.params.clk_load;
clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG;
clkload->action_mask = bload ?
NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES :
NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO;
cmd.hdr.unit_id = PMU_UNIT_CLK;
cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
(u32)sizeof(struct pmu_hdr);
cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
msg.hdr.size = sizeof(struct pmu_msg);
payload.in.buf = (u8 *)&rpccall;
payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
payload.out.buf = (u8 *)&rpccall;
payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
handler.prpccall = &rpccall;
handler.success = 0;
status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
PMU_COMMAND_QUEUE_LPQ,
clkrpc_pmucmdhandler, (void *)&handler,
&seqdesc, ~0);
if (status) {
nvgpu_err(g, "unable to post clk RPC cmd %x",
cmd.cmd.clk.cmd_type);
goto done;
}
pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g),
&handler.success, 1);
if (handler.success == 0) {
nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed");
status = -EINVAL;
}
done:
return status;
}
u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask) {
struct pmu_cmd cmd;
struct pmu_msg msg;
struct pmu_payload payload;
u32 status;
u32 seqdesc;
struct nv_pmu_clk_rpc rpccall;
struct clkrpc_pmucmdhandler_params handler;
struct nv_pmu_clk_freq_effective_avg *clk_freq_effective_avg;
memset(&payload, 0, sizeof(struct pmu_payload));
memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
memset(&cmd, 0, sizeof(struct pmu_cmd));
rpccall.function = NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG;
clk_freq_effective_avg = &rpccall.params.clk_freq_effective_avg;
clk_freq_effective_avg->clkDomainMask = clkDomainMask;
cmd.hdr.unit_id = PMU_UNIT_CLK;
cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
(u32)sizeof(struct pmu_hdr);
cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
msg.hdr.size = sizeof(struct pmu_msg);
payload.in.buf = (u8 *)&rpccall;
payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
payload.out.buf = (u8 *)&rpccall;
payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
handler.prpccall = &rpccall;
handler.success = 0;
status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
PMU_COMMAND_QUEUE_LPQ,
clkrpc_pmucmdhandler, (void *)&handler,
&seqdesc, ~0);
if (status) {
nvgpu_err(g, "unable to post clk RPC cmd %x",
cmd.cmd.clk.cmd_type);
goto done;
}
pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g),
&handler.success, 1);
if (handler.success == 0) {
nvgpu_err(g, "rpc call to get clk frequency average failed");
status = -EINVAL;
goto done;
}
return rpccall.params.clk_freq_effective_avg.freqkHz[clkDomainMask];
done:
return status;
}
int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx)
{
struct pmu_cmd cmd;
@@ -676,7 +807,6 @@ u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g)
status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK,
&gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC);
if (status) {
nvgpu_err(g,"failed 1");
return status;
}
@@ -695,6 +825,17 @@ u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g)
if (status)
nvgpu_err(g, "attempt to set boot gpcclk failed");
status = clk_pmu_freq_effective_avg_load(g, true);
/*
* Read clocks after some delay with below method
* & extract clock data from buffer
* clk_freq_effective_avg(g, CTRL_CLK_DOMAIN_GPCCLK |
* CTRL_CLK_DOMAIN_XBARCLK |
* CTRL_CLK_DOMAIN_SYSCLK |
* CTRL_CLK_DOMAIN_NVDCLK)
* */
return status;
}

View File

@@ -143,4 +143,6 @@ u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g,
struct nv_pmu_clk_rpc *rpccall,
struct set_fll_clk *setfllclk);
u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g);
int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload);
u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask);
#endif

View File

@@ -340,6 +340,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g,
CTRL_CLK_FLL_REGIME_ID_FFR;
fll_dev_data.regime_desc.fixed_freq_regime_limit_mhz =
(u16)fll_desc_table_entry.ffr_cutoff_freq_mhz;
fll_dev_data.regime_desc.target_regime_id_override=0;
/*construct fll device*/
pfll_dev = construct_fll_device(g, (void *)&fll_dev_data);

View File

@@ -226,6 +226,7 @@ struct nv_pmu_clk_lut_device_desc {
struct nv_pmu_clk_regime_desc {
u8 regime_id;
u8 target_regime_id_override;
u16 fixed_freq_regime_limit_mhz;
};
@@ -389,6 +390,12 @@ struct nv_pmu_clk_load {
struct nv_pmu_clk_load_payload_freq_controllers freq_controllers;
} payload;
};
struct nv_pmu_clk_freq_effective_avg {
u32 clkDomainMask;
u32 freqkHz[CTRL_BOARDOBJ_MAX_BOARD_OBJECTS];
};
/* CLK_FREQ_CONTROLLER */
#define NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER (0x00000003)
@@ -432,6 +439,10 @@ union nv_pmu_clk_clk_freq_controller_boardobj_set_union {
NV_PMU_BOARDOBJ_GRP_SET_MAKE_E32(clk, clk_freq_controller);
#define NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG (0x00000004)
#define NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO (0x00000000)
#define NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES (0x00000004)
/* CLK CMD ID definitions. */
#define NV_PMU_CLK_CMD_ID_BOARDOBJ_GRP_SET (0x00000001)
#define NV_PMU_CLK_CMD_ID_RPC (0x00000000)
@@ -441,7 +452,6 @@ NV_PMU_BOARDOBJ_GRP_SET_MAKE_E32(clk, clk_freq_controller);
#define NV_PMU_CLK_RPC_ID_CLK_VF_CHANGE_INJECT (0x00000000)
#define NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG (0x00000002)
struct nv_pmu_clk_cmd_rpc {
u8 cmd_type;
u8 pad[3];
@@ -476,6 +486,7 @@ struct nv_pmu_clk_rpc {
struct nv_pmu_clk_vf_change_inject clk_vf_change_inject;
struct nv_pmu_clk_vf_change_inject_v1 clk_vf_change_inject_v1;
struct nv_pmu_clk_load clk_load;
struct nv_pmu_clk_freq_effective_avg clk_freq_effective_avg;
} params;
};