mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
devfreq: podgov: use cycles/sec to estimate load
GPU clock may change within the period of time defined by smooth. Thus the result of averaging load percentages within smooth window has less meanning. New method keeps track of active GPU cycle count per time unit, and average load is average active cycle count divided by current GPU clock (total cycle count per time unit). Bug 1963732 Change-Id: I88cfb998f9bcfa0d6d0397f653f8e3096d4b3eed Signed-off-by: Peng Liu <pengliu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2033266 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Laxman Dewangan
parent
7641694f0a
commit
216ab9426c
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -67,7 +67,8 @@ struct podgov_info_rec {
|
||||
unsigned int p_user;
|
||||
unsigned int p_freq_request;
|
||||
|
||||
long idle;
|
||||
unsigned long cycles_norm;
|
||||
unsigned long cycles_avg;
|
||||
|
||||
int adjustment_type;
|
||||
unsigned long adjustment_frequency;
|
||||
@@ -78,7 +79,6 @@ struct podgov_info_rec {
|
||||
unsigned long *freqlist;
|
||||
int freq_count;
|
||||
|
||||
unsigned int idle_avg;
|
||||
int freq_avg;
|
||||
|
||||
struct kobj_attribute enable_3d_scaling_attr;
|
||||
@@ -275,12 +275,13 @@ static void podgov_set_freq_request(struct devfreq *df, int freq_request)
|
||||
|
||||
static unsigned long scaling_state_check(struct devfreq *df, ktime_t time)
|
||||
{
|
||||
struct podgov_info_rec *podgov = df->data;
|
||||
unsigned long dt;
|
||||
long max_boost, load, damp, freq, boost, res;
|
||||
struct podgov_info_rec *pg = df->data;
|
||||
struct devfreq_dev_status *ds = &df->last_status;
|
||||
unsigned long dt, busyness, rt_load;
|
||||
long max_boost, damp, freq, boost, res;
|
||||
|
||||
dt = (unsigned long) ktime_us_delta(time, podgov->last_scale);
|
||||
if (dt < podgov->p_block_window || df->previous_freq == 0)
|
||||
dt = (unsigned long) ktime_us_delta(time, pg->last_scale);
|
||||
if (dt < pg->p_block_window || df->previous_freq == 0)
|
||||
return 0;
|
||||
|
||||
/* convert to mhz to avoid overflow */
|
||||
@@ -288,20 +289,22 @@ static unsigned long scaling_state_check(struct devfreq *df, ktime_t time)
|
||||
max_boost = (df->max_freq/3) / 1000000;
|
||||
|
||||
/* calculate and trace load */
|
||||
load = 1000 - podgov->idle_avg;
|
||||
trace_podgov_busy(df->dev.parent, load);
|
||||
damp = podgov->p_damp;
|
||||
busyness = 1000ULL * pg->cycles_avg / ds->current_frequency;
|
||||
rt_load = 1000ULL * pg->cycles_norm / ds->current_frequency;
|
||||
trace_podgov_load(df->dev.parent, rt_load);
|
||||
trace_podgov_busy(df->dev.parent, busyness);
|
||||
|
||||
if ((1000 - podgov->idle) > podgov->p_load_max) {
|
||||
damp = pg->p_damp;
|
||||
|
||||
if (rt_load > pg->p_load_max) {
|
||||
/* if too busy, scale up max/3, do not damp */
|
||||
boost = max_boost;
|
||||
damp = 10;
|
||||
|
||||
} else {
|
||||
/* boost = bias * freq * (load - target)/target */
|
||||
boost = (load - podgov->p_load_target);
|
||||
boost *= (podgov->p_bias * freq);
|
||||
boost /= (100 * podgov->p_load_target);
|
||||
/* boost = bias * freq * (busyness - target)/target */
|
||||
boost = busyness - pg->p_load_target;
|
||||
boost *= (pg->p_bias * freq);
|
||||
boost /= (100 * pg->p_load_target);
|
||||
|
||||
/* clamp to max boost */
|
||||
boost = (boost < max_boost) ? boost : max_boost;
|
||||
@@ -311,11 +314,11 @@ static unsigned long scaling_state_check(struct devfreq *df, ktime_t time)
|
||||
res = freq + boost;
|
||||
|
||||
/* Maintain average request */
|
||||
podgov->freq_avg = (podgov->freq_avg * podgov->p_smooth) + res;
|
||||
podgov->freq_avg /= (podgov->p_smooth+1);
|
||||
pg->freq_avg = (pg->freq_avg * pg->p_smooth) + res;
|
||||
pg->freq_avg /= (pg->p_smooth+1);
|
||||
|
||||
/* Applying damping to frequencies */
|
||||
res = ((damp * res) + ((10 - damp)*podgov->freq_avg)) / 10;
|
||||
res = ((damp * res) + ((10 - damp)*pg->freq_avg)) / 10;
|
||||
|
||||
/* Convert to hz, limit, and apply */
|
||||
res = res * 1000000;
|
||||
@@ -523,13 +526,14 @@ static ssize_t freq_request_store(struct kobject *kobj,
|
||||
static int nvhost_pod_estimate_freq(struct devfreq *df,
|
||||
unsigned long *freq)
|
||||
{
|
||||
struct podgov_info_rec *podgov = df->data;
|
||||
struct devfreq_dev_status *dev_stat;
|
||||
struct podgov_info_rec *pg = df->data;
|
||||
struct devfreq_dev_status *ds;
|
||||
int err;
|
||||
ktime_t now;
|
||||
unsigned long long norm_load;
|
||||
|
||||
/* Ensure maximal clock when scaling is disabled */
|
||||
if (!podgov->enable) {
|
||||
if (!pg->enable) {
|
||||
*freq = df->max_freq;
|
||||
if (*freq == df->previous_freq)
|
||||
return GET_TARGET_FREQ_DONTSCALE;
|
||||
@@ -537,8 +541,8 @@ static int nvhost_pod_estimate_freq(struct devfreq *df,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (podgov->p_user) {
|
||||
*freq = podgov->p_freq_request;
|
||||
if (pg->p_user) {
|
||||
*freq = pg->p_freq_request;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -546,10 +550,10 @@ static int nvhost_pod_estimate_freq(struct devfreq *df,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev_stat = &df->last_status;
|
||||
ds = &df->last_status;
|
||||
|
||||
if (dev_stat->total_time == 0) {
|
||||
*freq = dev_stat->current_frequency;
|
||||
if (ds->total_time == 0) {
|
||||
*freq = ds->current_frequency;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -558,41 +562,38 @@ static int nvhost_pod_estimate_freq(struct devfreq *df,
|
||||
/* Local adjustments (i.e. requests from kernel threads) are
|
||||
* handled here */
|
||||
|
||||
if (podgov->adjustment_type == ADJUSTMENT_LOCAL) {
|
||||
if (pg->adjustment_type == ADJUSTMENT_LOCAL) {
|
||||
|
||||
podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;
|
||||
pg->adjustment_type = ADJUSTMENT_DEVICE_REQ;
|
||||
|
||||
/* Do not do unnecessary scaling */
|
||||
scaling_limit(df, &podgov->adjustment_frequency);
|
||||
scaling_limit(df, &pg->adjustment_frequency);
|
||||
|
||||
trace_podgov_estimate_freq(df->dev.parent,
|
||||
df->previous_freq,
|
||||
podgov->adjustment_frequency);
|
||||
pg->adjustment_frequency);
|
||||
|
||||
*freq = podgov->adjustment_frequency;
|
||||
*freq = pg->adjustment_frequency;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*freq = dev_stat->current_frequency;
|
||||
|
||||
/* Sustain local variables */
|
||||
podgov->idle = 1000 * (dev_stat->total_time - dev_stat->busy_time);
|
||||
podgov->idle = podgov->idle / dev_stat->total_time;
|
||||
podgov->idle_avg = (podgov->p_smooth * podgov->idle_avg) +
|
||||
podgov->idle;
|
||||
podgov->idle_avg = podgov->idle_avg / (podgov->p_smooth + 1);
|
||||
norm_load = (u64)ds->current_frequency * ds->busy_time / ds->total_time;
|
||||
pg->cycles_norm = norm_load;
|
||||
pg->cycles_avg = ((u64)pg->cycles_avg * pg->p_smooth + norm_load) /
|
||||
(pg->p_smooth + 1);
|
||||
|
||||
*freq = scaling_state_check(df, now);
|
||||
|
||||
if (!(*freq)) {
|
||||
*freq = dev_stat->current_frequency;
|
||||
*freq = ds->current_frequency;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (freqlist_up(podgov, *freq, 0) == dev_stat->current_frequency)
|
||||
if (freqlist_up(pg, *freq, 0) == ds->current_frequency)
|
||||
return 0;
|
||||
|
||||
podgov->last_scale = now;
|
||||
pg->last_scale = now;
|
||||
|
||||
trace_podgov_estimate_freq(df->dev.parent, df->previous_freq, *freq);
|
||||
|
||||
@@ -676,7 +677,6 @@ static int nvhost_pod_init(struct devfreq *df)
|
||||
df->max_freq = podgov->freqlist[podgov->freq_count - 1];
|
||||
podgov->p_freq_request = df->max_freq;
|
||||
|
||||
podgov->idle_avg = 0;
|
||||
podgov->freq_avg = 0;
|
||||
|
||||
nvhost_scale_emc_debug_init(df);
|
||||
|
||||
Reference in New Issue
Block a user