Files
linux-nvgpu/drivers/gpu/nvgpu/gv100/clk_arb_gv100.c
Abdul Salam 6945209418 gpu: nvgpu: Add unified check for clk_arb support
Currently clk_arb needs PSTATE to be true for dGPU.
Setting PSTATE only FALSE, causes issue as clk_arb fails.
There is no such dependency of PSTATE on iGPU.
Making it unified with a call to check_clk_arb_support().
This call is implemented based on its dependency in iGPU, dGPU.
check_clk_arb_support returns true if supported, else false.

Jira NVGPU-1948

Change-Id: I108dc12bd6ad8d0e074352080c978b7dda9bee05
Signed-off-by: Abdul Salam <absalam@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2014775
Reviewed-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2019-02-12 08:54:49 -08:00

755 lines
19 KiB
C

/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/clk_arb.h>
#include <nvgpu/pmu/clk.h>
#include <nvgpu/timers.h>
#include "clk_arb_gv100.h"
#include "common/pmu/clk/clk.h"
bool gv100_check_clk_arb_support(struct gk20a *g)
{
if ((g->ops.clk_arb.get_arbiter_clk_domains != NULL) &&
nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)){
return true;
}
else {
return false;
}
}
u32 gv100_get_arbiter_clk_domains(struct gk20a *g)
{
(void)g;
return (CTRL_CLK_DOMAIN_GPCCLK);
}
int gv100_get_arbiter_f_points(struct gk20a *g,u32 api_domain,
u32 *num_points, u16 *freqs_in_mhz)
{
return g->ops.clk.clk_domain_get_f_points(g,
api_domain, num_points, freqs_in_mhz);
}
int gv100_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
u16 *min_mhz, u16 *max_mhz)
{
u32 clkwhich;
struct clk_set_info *p0_info;
struct avfsfllobjs *pfllobjs = &(g->clk_pmu->avfs_fllobjs);
u16 limit_min_mhz;
bool error_status = false;
switch (api_domain) {
case CTRL_CLK_DOMAIN_MCLK:
clkwhich = CLKWHICH_MCLK;
break;
case CTRL_CLK_DOMAIN_GPCCLK:
clkwhich = CLKWHICH_GPCCLK;
break;
default:
error_status = true;
break;
}
if (error_status == true) {
return -EINVAL;
}
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (p0_info == NULL) {
return -EINVAL;
}
limit_min_mhz = p0_info->min_mhz;
/* WAR for DVCO min */
if (api_domain == CTRL_CLK_DOMAIN_GPCCLK) {
if ((pfllobjs->max_min_freq_mhz != 0U) &&
(pfllobjs->max_min_freq_mhz >= limit_min_mhz)) {
limit_min_mhz = pfllobjs->max_min_freq_mhz + 1U;
}
}
*min_mhz = limit_min_mhz;
*max_mhz = p0_info->max_mhz;
return 0;
}
int gv100_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
u16 *default_mhz)
{
u32 clkwhich;
struct clk_set_info *p0_info;
bool error_status = false;
switch (api_domain) {
case CTRL_CLK_DOMAIN_MCLK:
clkwhich = CLKWHICH_MCLK;
break;
case CTRL_CLK_DOMAIN_GPCCLK:
clkwhich = CLKWHICH_GPCCLK;
break;
default:
error_status = true;
break;
}
if (error_status == true) {
return -EINVAL;
}
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (p0_info == NULL) {
return -EINVAL;
}
*default_mhz = p0_info->max_mhz;
return 0;
}
int gv100_init_clk_arbiter(struct gk20a *g)
{
struct nvgpu_clk_arb *arb;
u16 default_mhz;
int err;
int index;
struct nvgpu_clk_vf_table *table;
clk_arb_dbg(g, " ");
if (g->clk_arb != NULL) {
return 0;
}
arb = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_arb));
if (arb == NULL) {
return -ENOMEM;
}
err = nvgpu_mutex_init(&arb->pstate_lock);
if (err != 0) {
goto mutex_fail;
}
nvgpu_spinlock_init(&arb->sessions_lock);
nvgpu_spinlock_init(&arb->users_lock);
nvgpu_spinlock_init(&arb->requests_lock);
arb->mclk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
if (arb->mclk_f_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
arb->gpc2clk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
if (arb->gpc2clk_f_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
for (index = 0; index < 2; index++) {
table = &arb->vf_table_pool[index];
table->gpc2clk_num_points = MAX_F_POINTS;
table->mclk_num_points = MAX_F_POINTS;
table->gpc2clk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
sizeof(struct nvgpu_clk_vf_point));
if (table->gpc2clk_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
table->mclk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
sizeof(struct nvgpu_clk_vf_point));
if (table->mclk_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
}
g->clk_arb = arb;
arb->g = g;
err = g->ops.clk_arb.get_arbiter_clk_default(g,
CTRL_CLK_DOMAIN_MCLK, &default_mhz);
if (err < 0) {
err = -EINVAL;
goto init_fail;
}
arb->mclk_default_mhz = default_mhz;
err = g->ops.clk_arb.get_arbiter_clk_default(g,
CTRL_CLK_DOMAIN_GPCCLK, &default_mhz);
if (err < 0) {
err = -EINVAL;
goto init_fail;
}
arb->gpc2clk_default_mhz = default_mhz;
arb->actual = &arb->actual_pool[0];
nvgpu_atomic_set(&arb->req_nr, 0);
nvgpu_atomic64_set(&arb->alarm_mask, 0);
err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue,
DEFAULT_EVENT_NUMBER);
if (err < 0) {
goto init_fail;
}
nvgpu_init_list_node(&arb->users);
nvgpu_init_list_node(&arb->sessions);
nvgpu_init_list_node(&arb->requests);
(void)nvgpu_cond_init(&arb->request_wq);
nvgpu_init_list_node(&arb->update_vf_table_work_item.worker_item);
nvgpu_init_list_node(&arb->update_arb_work_item.worker_item);
arb->update_vf_table_work_item.arb = arb;
arb->update_arb_work_item.arb = arb;
arb->update_vf_table_work_item.item_type = CLK_ARB_WORK_UPDATE_VF_TABLE;
arb->update_arb_work_item.item_type = CLK_ARB_WORK_UPDATE_ARB;
err = nvgpu_clk_arb_worker_init(g);
if (err < 0) {
goto init_fail;
}
#ifdef CONFIG_DEBUG_FS
arb->debug = &arb->debug_pool[0];
if (!arb->debugfs_set) {
if (nvgpu_clk_arb_debugfs_init(g))
arb->debugfs_set = true;
}
#endif
err = clk_vf_point_cache(g);
if (err < 0) {
goto init_fail;
}
err = nvgpu_clk_arb_update_vf_table(arb);
if (err < 0) {
goto init_fail;
}
do {
/* Check that first run is completed */
nvgpu_smp_mb();
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
nvgpu_atomic_read(&arb->req_nr), 0);
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
return arb->status;
init_fail:
nvgpu_kfree(g, arb->gpc2clk_f_points);
nvgpu_kfree(g, arb->mclk_f_points);
for (index = 0; index < 2; index++) {
nvgpu_kfree(g, arb->vf_table_pool[index].gpc2clk_points);
nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
}
nvgpu_mutex_destroy(&arb->pstate_lock);
mutex_fail:
nvgpu_kfree(g, arb);
return err;
}
static u8 nvgpu_clk_arb_find_vf_point(struct nvgpu_clk_arb *arb,
u16 *gpc2clk, u16 *sys2clk, u16 *xbar2clk, u16 *mclk,
u32 *voltuv, u32 *voltuv_sram, u32 *nuvmin, u32 *nuvmin_sram)
{
u16 gpc2clk_target, mclk_target;
u32 gpc2clk_voltuv, gpc2clk_voltuv_sram;
u32 mclk_voltuv, mclk_voltuv_sram;
u32 current_pstate = VF_POINT_INVALID_PSTATE;
struct nvgpu_clk_vf_table *table;
u32 index, index_mclk;
struct nvgpu_clk_vf_point *mclk_vf = NULL;
do {
gpc2clk_target = *gpc2clk;
mclk_target = *mclk;
gpc2clk_voltuv = 0;
gpc2clk_voltuv_sram = 0;
mclk_voltuv = 0;
mclk_voltuv_sram = 0;
table = NV_ACCESS_ONCE(arb->current_vf_table);
/* pointer to table can be updated by callback */
nvgpu_smp_rmb();
if (table == NULL) {
continue;
}
if ((table->gpc2clk_num_points == 0U) || (table->mclk_num_points == 0U)) {
nvgpu_err(arb->g, "found empty table");
goto find_exit;
}
/* First we check MCLK to find out which PSTATE we are
* are requesting, and from there try to find the minimum
* GPC2CLK on the same PSTATE that satisfies the request.
* If no GPC2CLK can be found, then we need to up the PSTATE
*/
recalculate_vf_point:
for (index = 0; index < table->mclk_num_points; index++) {
if (table->mclk_points[index].mem_mhz >= mclk_target) {
mclk_vf = &table->mclk_points[index];
break;
}
}
if (index == table->mclk_num_points) {
mclk_vf = &table->mclk_points[index-1U];
index = table->mclk_num_points - 1U;
}
index_mclk = index;
/* round up the freq requests */
for (index = 0; index < table->gpc2clk_num_points; index++) {
current_pstate = VF_POINT_COMMON_PSTATE(
&table->gpc2clk_points[index], mclk_vf);
if ((table->gpc2clk_points[index].gpc_mhz >=
gpc2clk_target) &&
(current_pstate != VF_POINT_INVALID_PSTATE)) {
gpc2clk_target =
table->gpc2clk_points[index].gpc_mhz;
*sys2clk =
table->gpc2clk_points[index].sys_mhz;
*xbar2clk =
table->gpc2clk_points[index].xbar_mhz;
gpc2clk_voltuv =
table->gpc2clk_points[index].uvolt;
gpc2clk_voltuv_sram =
table->gpc2clk_points[index].uvolt_sram;
break;
}
}
if (index == table->gpc2clk_num_points) {
current_pstate = VF_POINT_COMMON_PSTATE(
&table->gpc2clk_points[index-1U], mclk_vf);
if (current_pstate != VF_POINT_INVALID_PSTATE) {
gpc2clk_target =
table->gpc2clk_points[index-1U].gpc_mhz;
*sys2clk =
table->gpc2clk_points[index-1U].sys_mhz;
*xbar2clk =
table->gpc2clk_points[index-1U].xbar_mhz;
gpc2clk_voltuv =
table->gpc2clk_points[index-1U].uvolt;
gpc2clk_voltuv_sram =
table->gpc2clk_points[index-1U].
uvolt_sram;
} else if (index_mclk >= table->mclk_num_points - 1U) {
/* There is no available combination of MCLK
* and GPC2CLK, we need to fail this
*/
gpc2clk_target = 0;
mclk_target = 0;
current_pstate = VF_POINT_INVALID_PSTATE;
goto find_exit;
} else {
/* recalculate with higher PSTATE */
gpc2clk_target = *gpc2clk;
mclk_target = table->mclk_points[index_mclk + 1U].
mem_mhz;
goto recalculate_vf_point;
}
}
mclk_target = mclk_vf->mem_mhz;
mclk_voltuv = mclk_vf->uvolt;
mclk_voltuv_sram = mclk_vf->uvolt_sram;
} while ((table == NULL) ||
(NV_ACCESS_ONCE(arb->current_vf_table) != table));
find_exit:
*voltuv = gpc2clk_voltuv > mclk_voltuv ? gpc2clk_voltuv : mclk_voltuv;
*voltuv_sram = gpc2clk_voltuv_sram > mclk_voltuv_sram ?
gpc2clk_voltuv_sram : mclk_voltuv_sram;
/* noise unaware vmin */
*nuvmin = mclk_voltuv;
*nuvmin_sram = mclk_voltuv_sram;
*gpc2clk = gpc2clk_target < *gpc2clk ? gpc2clk_target : *gpc2clk;
*mclk = mclk_target;
return (u8)current_pstate;
}
static int nvgpu_clk_arb_change_vf_point(struct gk20a *g, u16 gpc2clk_target,
u16 sys2clk_target, u16 xbar2clk_target, u16 mclk_target, u32 voltuv,
u32 voltuv_sram)
{
struct set_fll_clk fllclk;
struct nvgpu_clk_arb *arb = g->clk_arb;
int status;
fllclk.gpc2clkmhz = gpc2clk_target;
fllclk.sys2clkmhz = sys2clk_target;
fllclk.xbar2clkmhz = xbar2clk_target;
fllclk.voltuv = voltuv;
/* if voltage ascends we do:
* (1) FLL change
* (2) Voltage change
* (3) MCLK change
* If it goes down
* (1) MCLK change
* (2) Voltage change
* (3) FLL change
*/
/* descending */
if (voltuv < arb->voltuv_actual) {
if (g->ops.clk.mclk_change != NULL) {
status = g->ops.clk.mclk_change(g, mclk_target);
if (status < 0) {
return status;
}
}
status = volt_set_voltage(g, voltuv, voltuv_sram);
if (status < 0) {
return status;
}
status = clk_set_fll_clks(g, &fllclk);
if (status < 0) {
return status;
}
} else {
status = clk_set_fll_clks(g, &fllclk);
if (status < 0) {
return status;
}
status = volt_set_voltage(g, voltuv, voltuv_sram);
if (status < 0) {
return status;
}
if (g->ops.clk.mclk_change != NULL) {
status = g->ops.clk.mclk_change(g, mclk_target);
if (status < 0) {
return status;
}
}
}
return 0;
}
void gv100_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
{
struct nvgpu_clk_session *session;
struct nvgpu_clk_dev *dev;
struct nvgpu_clk_dev *tmp;
struct nvgpu_clk_arb_target *target, *actual;
struct gk20a *g = arb->g;
u32 current_pstate = VF_POINT_INVALID_PSTATE;
u32 voltuv=0, voltuv_sram;
bool mclk_set, gpc2clk_set;
u32 nuvmin, nuvmin_sram;
u32 alarms_notified = 0;
u32 current_alarm;
int status = 0;
u32 ver = g->params.gpu_arch + g->params.gpu_impl;
/* Temporary variables for checking target frequency */
u16 gpc2clk_target, sys2clk_target, xbar2clk_target, mclk_target;
u16 gpc2clk_session_target, mclk_session_target;
struct nvgpu_clk_slave_freq vf_point;
#ifdef CONFIG_DEBUG_FS
s64 t0, t1;
struct nvgpu_clk_arb_debug *debug;
#endif
clk_arb_dbg(g, " ");
/* bail out if gpu is down */
if (nvgpu_atomic64_read(&arb->alarm_mask) & EVENT(ALARM_GPU_LOST)) {
goto exit_arb;
}
#ifdef CONFIG_DEBUG_FS
t0 = nvgpu_current_time_ns();
#endif
/* Only one arbiter should be running */
gpc2clk_target = 0;
mclk_target = 0;
nvgpu_spinlock_acquire(&arb->sessions_lock);
nvgpu_list_for_each_entry(session, &arb->sessions,
nvgpu_clk_session, link) {
if (!session->zombie) {
mclk_set = false;
gpc2clk_set = false;
target = (session->target == &session->target_pool[0] ?
&session->target_pool[1] :
&session->target_pool[0]);
nvgpu_spinlock_acquire(&session->session_lock);
if (!nvgpu_list_empty(&session->targets)) {
/* Copy over state */
target->mclk = session->target->mclk;
target->gpc2clk = session->target->gpc2clk;
/* Query the latest committed request */
nvgpu_list_for_each_entry_safe(dev, tmp,
&session->targets, nvgpu_clk_dev, node) {
if ((mclk_set == false) && (dev->mclk_target_mhz != 0U)) {
target->mclk =
dev->mclk_target_mhz;
mclk_set = true;
}
if ((gpc2clk_set == false) &&
(dev->gpc2clk_target_mhz != 0U)) {
target->gpc2clk =
dev->gpc2clk_target_mhz;
gpc2clk_set = true;
}
nvgpu_ref_get(&dev->refcount);
nvgpu_list_del(&dev->node);
nvgpu_spinlock_acquire(
&arb->requests_lock);
nvgpu_list_add(
&dev->node, &arb->requests);
nvgpu_spinlock_release(&arb->requests_lock);
}
session->target = target;
}
nvgpu_spinlock_release(
&session->session_lock);
mclk_target = mclk_target > session->target->mclk ?
mclk_target : session->target->mclk;
gpc2clk_target =
gpc2clk_target > session->target->gpc2clk ?
gpc2clk_target : session->target->gpc2clk;
}
}
nvgpu_spinlock_release(&arb->sessions_lock);
gpc2clk_target = (gpc2clk_target > 0U) ? gpc2clk_target :
arb->gpc2clk_default_mhz;
if (gpc2clk_target < arb->gpc2clk_min) {
gpc2clk_target = arb->gpc2clk_min;
}
if (gpc2clk_target > arb->gpc2clk_max) {
gpc2clk_target = arb->gpc2clk_max;
}
mclk_target = (mclk_target > 0U) ? mclk_target :
arb->mclk_default_mhz;
if (mclk_target < arb->mclk_min) {
mclk_target = arb->mclk_min;
}
if (mclk_target > arb->mclk_max) {
mclk_target = arb->mclk_max;
}
sys2clk_target = 0;
xbar2clk_target = 0;
gpc2clk_session_target = gpc2clk_target;
mclk_session_target = mclk_target;
if (ver == NVGPU_GPUID_GV100) {
/* Query the table for the closest vf point to program */
current_pstate = (u8)nvgpu_clk_arb_find_vf_point(arb, &gpc2clk_target,
&sys2clk_target, &xbar2clk_target, &mclk_target, &voltuv,
&voltuv_sram, &nuvmin, &nuvmin_sram);
if ((gpc2clk_target < gpc2clk_session_target) ||
(mclk_target < mclk_session_target)) {
nvgpu_clk_arb_set_global_alarm(g,
EVENT(ALARM_TARGET_VF_NOT_POSSIBLE));
}
if ((arb->actual->gpc2clk == gpc2clk_target) &&
(arb->actual->mclk == mclk_target) &&
(arb->voltuv_actual == voltuv)) {
goto exit_arb;
}
/* Program clocks */
/* A change in both mclk of gpc2clk may require a change in voltage */
nvgpu_mutex_acquire(&arb->pstate_lock);
status = nvgpu_clk_arb_change_vf_point(g, gpc2clk_target,
sys2clk_target, xbar2clk_target, mclk_target, voltuv,
voltuv_sram);
if (status < 0) {
arb->status = status;
nvgpu_mutex_release(&arb->pstate_lock);
/* make status visible */
nvgpu_smp_mb();
goto exit_arb;
}
}
else {
vf_point.gpc_mhz=gpc2clk_target;
(void)nvgpu_clk_arb_find_slave_points(arb, &vf_point);
if (status != 0) {
nvgpu_err(g, "Unable to get slave frequency");
goto exit_arb;
}
status = nvgpu_clk_set_req_fll_clk_ps35(g, &vf_point);
if (status != 0) {
nvgpu_err(g, "Unable to program frequency");
goto exit_arb;
}
}
actual = NV_ACCESS_ONCE(arb->actual) == &arb->actual_pool[0] ?
&arb->actual_pool[1] : &arb->actual_pool[0];
/* do not reorder this pointer */
nvgpu_smp_rmb();
actual->gpc2clk = gpc2clk_target;
actual->mclk = mclk_target;
arb->voltuv_actual = voltuv;
actual->pstate = current_pstate;
arb->status = status;
/* Make changes visible to other threads */
nvgpu_smp_wmb();
arb->actual = actual;
/* status must be visible before atomic inc */
nvgpu_smp_wmb();
nvgpu_atomic_inc(&arb->req_nr);
if (ver == NVGPU_GPUID_GV100) {
/* Unlock pstate change for PG */
nvgpu_mutex_release(&arb->pstate_lock);
}
/* VF Update complete */
nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE));
nvgpu_cond_signal_interruptible(&arb->request_wq);
#ifdef CONFIG_DEBUG_FS
t1 = nvgpu_current_time_ns();
debug = arb->debug == &arb->debug_pool[0] ?
&arb->debug_pool[1] : &arb->debug_pool[0];
memcpy(debug, arb->debug, sizeof(arb->debug_pool[0]));
debug->switch_num++;
if (debug->switch_num == 1) {
debug->switch_max = debug->switch_min =
debug->switch_avg = (t1-t0)/1000;
debug->switch_std = 0;
} else {
s64 prev_avg;
s64 curr = (t1-t0)/1000;
debug->switch_max = curr > debug->switch_max ?
curr : debug->switch_max;
debug->switch_min = debug->switch_min ?
(curr < debug->switch_min ?
curr : debug->switch_min) : curr;
prev_avg = debug->switch_avg;
debug->switch_avg = (curr +
(debug->switch_avg * (debug->switch_num-1))) /
debug->switch_num;
debug->switch_std +=
(curr - debug->switch_avg) * (curr - prev_avg);
}
/* commit changes before exchanging debug pointer */
nvgpu_smp_wmb();
arb->debug = debug;
#endif
exit_arb:
if (status < 0) {
nvgpu_err(g, "Error in arbiter update");
nvgpu_clk_arb_set_global_alarm(g,
EVENT(ALARM_CLOCK_ARBITER_FAILED));
}
current_alarm = (u32) nvgpu_atomic64_read(&arb->alarm_mask);
/* notify completion for all requests */
nvgpu_spinlock_acquire(&arb->requests_lock);
nvgpu_list_for_each_entry_safe(dev, tmp, &arb->requests,
nvgpu_clk_dev, node) {
nvgpu_atomic_set(&dev->poll_mask,
NVGPU_POLLIN | NVGPU_POLLRDNORM);
nvgpu_clk_arb_event_post_event(dev);
nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
nvgpu_list_del(&dev->node);
}
nvgpu_spinlock_release(&arb->requests_lock);
nvgpu_atomic_set(&arb->notification_queue.head,
nvgpu_atomic_read(&arb->notification_queue.tail));
/* notify event for all users */
nvgpu_spinlock_acquire(&arb->users_lock);
nvgpu_list_for_each_entry(dev, &arb->users, nvgpu_clk_dev, link) {
alarms_notified |=
nvgpu_clk_arb_notify(dev, arb->actual, current_alarm);
}
nvgpu_spinlock_release(&arb->users_lock);
/* clear alarms */
nvgpu_clk_arb_clear_global_alarm(g, alarms_notified &
~EVENT(ALARM_GPU_LOST));
}
void gv100_clk_arb_cleanup(struct nvgpu_clk_arb *arb)
{
struct gk20a *g = arb->g;
int index;
nvgpu_kfree(g, arb->gpc2clk_f_points);
nvgpu_kfree(g, arb->mclk_f_points);
for (index = 0; index < 2; index++) {
nvgpu_kfree(g,
arb->vf_table_pool[index].gpc2clk_points);
nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
}
nvgpu_mutex_destroy(&g->clk_arb->pstate_lock);
nvgpu_kfree(g, g->clk_arb);
g->clk_arb = NULL;
}