gpu: nvgpu: vgpu: add freq capping support

Currently callbacks from the PM_QOS framework (for
thermal events), result in a RPC call to set GPU frequency.

Since the governor will now be responsible for setting desired
rate, the max PM_QOS callback will now cap the possible
GPU frequency w/ a new RPC call to the server. The server
is responsible for setting the ultimate frequency
based on the cap & desired rates.

Jira VFND-3699

Change-Id: I806e309c40abc2f1381b6a23f2d898cfe26f9794
Signed-off-by: Sachit Kadle <skadle@nvidia.com>
Reviewed-on: http://git-master/r/1295543
(cherry picked from commit e81693c6e087f8f10a985be83715042fc590d6db)
Reviewed-on: http://git-master/r/1282467
(cherry picked from commit 7b4e0db647572e82a8d53e823c36b465781f4942)
Reviewed-on: http://git-master/r/1321836
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sachit Kadle
2017-01-09 17:34:14 -08:00
committed by mobile promotions
parent f871b52fd3
commit 57dafc08a5
4 changed files with 30 additions and 12 deletions

View File

@@ -111,3 +111,28 @@ int vgpu_clk_get_freqs(struct device *dev,
return 0;
}
int vgpu_clk_cap_rate(struct device *dev, unsigned long rate)
{
struct gk20a_platform *platform = gk20a_get_platform(dev);
struct gk20a *g = platform->g;
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
int err = 0;
gk20a_dbg_fn("");
msg.cmd = TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE;
msg.handle = vgpu_get_handle(g);
p->rate = (u32)rate;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (err) {
gk20a_err(dev_from_gk20a(g),
"%s failed - %d", __func__, err);
return err;
}
return 0;
}

View File

@@ -21,4 +21,5 @@ long vgpu_clk_round_rate(struct device *dev, unsigned long rate);
int vgpu_clk_set_rate(struct device *dev, unsigned long rate);
int vgpu_clk_get_freqs(struct device *dev,
unsigned long **freqs, int *num_freqs);
int vgpu_clk_cap_rate(struct device *dev, unsigned long rate);
#endif

View File

@@ -449,20 +449,13 @@ static int vgpu_qos_notify(struct notifier_block *nb,
struct gk20a_scale_profile *profile =
container_of(nb, struct gk20a_scale_profile,
qos_notify_block);
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
u32 max_freq;
int err;
gk20a_dbg_fn("");
max_freq = (u32)pm_qos_read_max_bound(PM_QOS_GPU_FREQ_BOUNDS);
msg.cmd = TEGRA_VGPU_CMD_SET_GPU_CLK_RATE;
msg.handle = vgpu_get_handle_from_dev(profile->dev);
p->rate = max_freq;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
err = vgpu_clk_cap_rate(profile->dev, max_freq);
if (err)
gk20a_err(profile->dev, "%s failed, err=%d", __func__, err);
@@ -472,15 +465,13 @@ static int vgpu_qos_notify(struct notifier_block *nb,
static int vgpu_pm_qos_init(struct device *dev)
{
struct gk20a *g = get_gk20a(dev);
struct gk20a_scale_profile *profile;
struct gk20a_scale_profile *profile = g->scale_profile;
profile = kzalloc(sizeof(*profile), GFP_KERNEL);
if (!profile)
return -ENOMEM;
return -EINVAL;
profile->dev = dev;
profile->qos_notify_block.notifier_call = vgpu_qos_notify;
g->scale_profile = profile;
pm_qos_add_max_notifier(PM_QOS_GPU_FREQ_BOUNDS,
&profile->qos_notify_block);

View File

@@ -104,6 +104,7 @@ enum {
TEGRA_VGPU_CMD_CLEAR_SM_ERROR_STATE = 68,
TEGRA_VGPU_CMD_GET_GPU_CLK_RATE = 69,
TEGRA_VGPU_CMD_GET_GPU_FREQ_TABLE = 70,
TEGRA_VGPU_CMD_CAP_GPU_CLK_RATE = 71,
};
struct tegra_vgpu_connect_params {