mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: prepare pmu mutexes unit
PMU mutexes used by FIFO and runlists is functionality independent of the PMU command and message management. Remove related functionality from pmu_ipc.c and prepare pmu_mutex.c. Prepare PMU HAL unit that contains gk20a specific PMU mutexes handling. JIRA NVGPU-1970 Change-Id: I0204be2ef9d2c000004667af3c18dc527d7ac25f Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2079142 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
499da418c1
commit
9a55ecb5d2
@@ -88,6 +88,7 @@ nvgpu-y += \
|
||||
common/init/nvgpu_init.o \
|
||||
common/pmu/pmu.o \
|
||||
common/pmu/pmu_ipc.o \
|
||||
common/pmu/pmu_mutex.o \
|
||||
common/pmu/pmu_seq.o \
|
||||
common/pmu/pmu_fw.o \
|
||||
common/pmu/pg/pmu_pg.o \
|
||||
@@ -258,7 +259,8 @@ nvgpu-y += \
|
||||
hal/nvlink/minion_tu104.o \
|
||||
hal/nvlink/link_mode_transitions_gv100.o \
|
||||
hal/nvlink/link_mode_transitions_tu104.o \
|
||||
hal/gsp/gsp_gv100.o
|
||||
hal/gsp/gsp_gv100.o \
|
||||
hal/pmu/pmu_hal_gk20a.o
|
||||
|
||||
# Linux specific parts of nvgpu.
|
||||
nvgpu-y += \
|
||||
|
||||
@@ -127,6 +127,7 @@ srcs += common/sim.c \
|
||||
common/netlist/netlist_tu104.c \
|
||||
common/pmu/pmu.c \
|
||||
common/pmu/pmu_ipc.c \
|
||||
common/pmu/pmu_mutex.c \
|
||||
common/pmu/pmu_seq.c \
|
||||
common/pmu/pmu_fw.c \
|
||||
common/pmu/pg/pmu_pg.c \
|
||||
@@ -388,7 +389,8 @@ srcs += common/sim.c \
|
||||
hal/nvlink/minion_tu104.c \
|
||||
hal/nvlink/link_mode_transitions_gv100.c \
|
||||
hal/nvlink/link_mode_transitions_tu104.c \
|
||||
hal/gsp/gsp_gv100.c
|
||||
hal/gsp/gsp_gv100.c \
|
||||
hal/pmu/pmu_hal_gk20a.c
|
||||
|
||||
ifeq ($(NVGPU_DEBUGGER),1)
|
||||
srcs += common/debugger.c
|
||||
|
||||
@@ -295,7 +295,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
|
||||
}
|
||||
|
||||
if (g->ops.pmu.is_pmu_supported(g)) {
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
@@ -348,7 +348,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
|
||||
|
||||
clean_up:
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
|
||||
@@ -443,8 +443,8 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(
|
||||
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(
|
||||
g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
|
||||
g->ops.runlist.hw_submit(
|
||||
@@ -457,8 +457,8 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
|
||||
g->ops.runlist.wait_pending(g, ch->runlist_id);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(
|
||||
&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
nvgpu_pmu_lock_release(
|
||||
g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||
|
||||
@@ -495,14 +495,14 @@ static int gk20a_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = gk20a_runlist_update_locked(g, runlist_id, ch, add,
|
||||
wait_for_finish);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||
@@ -590,13 +590,13 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
|
||||
runlists_mask, runlist_state);
|
||||
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
g->ops.runlist.write_state(g, runlists_mask, runlist_state);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -142,7 +142,6 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = mm->pmu.vm;
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
u8 *ptr;
|
||||
|
||||
@@ -154,11 +153,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
|
||||
/* Create thread to handle PMU state machine */
|
||||
nvgpu_init_task_pg_init(g);
|
||||
if (pmu->sw_ready) {
|
||||
for (i = 0; i < pmu->mutex_cnt; i++) {
|
||||
pmu->mutex[i].id = i;
|
||||
pmu->mutex[i].index = i;
|
||||
}
|
||||
|
||||
nvgpu_pmu_mutexes_init(&pmu->mutexes);
|
||||
nvgpu_pmu_sequences_init(&pmu->sequences);
|
||||
|
||||
nvgpu_log_fn(g, "skip init");
|
||||
@@ -169,18 +164,12 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
|
||||
|
||||
/* TBD: sysmon subtask */
|
||||
|
||||
pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size();
|
||||
pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt *
|
||||
sizeof(struct pmu_mutex));
|
||||
if (pmu->mutex == NULL) {
|
||||
err = -ENOMEM;
|
||||
err = nvgpu_pmu_mutexes_alloc(g, &pmu->mutexes);
|
||||
if (err != 0) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < pmu->mutex_cnt; i++) {
|
||||
pmu->mutex[i].id = i;
|
||||
pmu->mutex[i].index = i;
|
||||
}
|
||||
nvgpu_pmu_mutexes_init(&pmu->mutexes);
|
||||
|
||||
err = nvgpu_pmu_sequences_alloc(g, &pmu->sequences);
|
||||
if (err != 0) {
|
||||
@@ -237,7 +226,7 @@ skip_init:
|
||||
err_free_seq:
|
||||
nvgpu_pmu_sequences_free(g, &pmu->sequences);
|
||||
err_free_mutex:
|
||||
nvgpu_kfree(g, pmu->mutex);
|
||||
nvgpu_pmu_mutexes_free(g, &pmu->mutexes);
|
||||
err:
|
||||
nvgpu_log_fn(g, "fail");
|
||||
return err;
|
||||
@@ -671,3 +660,31 @@ void nvgpu_pmu_report_bar0_pri_err_status(struct gk20a *g, u32 bar0_status,
|
||||
GPU_PMU_BAR0_ERROR_TIMEOUT, bar0_status, error_type);
|
||||
return;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
u32 id, u32 *token)
|
||||
{
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!pmu->pmu_pg.initialized) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nvgpu_pmu_mutex_acquire(g, &pmu->mutexes, id, token);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
u32 id, u32 *token)
|
||||
{
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!pmu->pmu_pg.initialized) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nvgpu_pmu_mutex_release(g, &pmu->mutexes, id, token);
|
||||
}
|
||||
|
||||
@@ -1657,6 +1657,7 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
|
||||
nvgpu_mutex_destroy(&pmu->isr_mutex);
|
||||
nvgpu_mutex_destroy(&pmu->pmu_copy_lock);
|
||||
nvgpu_pmu_sequences_free(g, &pmu->sequences);
|
||||
nvgpu_pmu_mutexes_free(g, &pmu->mutexes);
|
||||
}
|
||||
|
||||
static int init_pmu_ucode(struct nvgpu_pmu *pmu)
|
||||
|
||||
@@ -283,119 +283,6 @@ void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id)
|
||||
PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
|
||||
}
|
||||
|
||||
int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_mutex *mutex;
|
||||
u32 data, owner, max_retry;
|
||||
|
||||
if (!pmu->pmu_pg.initialized) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUG_ON(token == NULL);
|
||||
BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
|
||||
BUG_ON(id > pmu->mutex_cnt);
|
||||
|
||||
mutex = &pmu->mutex[id];
|
||||
|
||||
owner = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
|
||||
if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
|
||||
BUG_ON(mutex->ref_cnt == 0U);
|
||||
gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token);
|
||||
mutex->ref_cnt++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
max_retry = 40;
|
||||
do {
|
||||
data = pwr_pmu_mutex_id_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_id_r()));
|
||||
if (data == pwr_pmu_mutex_id_value_init_v() ||
|
||||
data == pwr_pmu_mutex_id_value_not_avail_v()) {
|
||||
nvgpu_warn(g,
|
||||
"fail to generate mutex token: val 0x%08x",
|
||||
owner);
|
||||
nvgpu_usleep_range(20, 40);
|
||||
continue;
|
||||
}
|
||||
|
||||
owner = data;
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
pwr_pmu_mutex_value_f(owner));
|
||||
|
||||
data = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
|
||||
if (owner == data) {
|
||||
mutex->ref_cnt = 1;
|
||||
gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x",
|
||||
mutex->index, *token);
|
||||
*token = owner;
|
||||
return 0;
|
||||
} else {
|
||||
nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
|
||||
mutex->index);
|
||||
|
||||
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
data = set_field(data,
|
||||
pwr_pmu_mutex_id_release_value_m(),
|
||||
pwr_pmu_mutex_id_release_value_f(owner));
|
||||
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
|
||||
nvgpu_usleep_range(20, 40);
|
||||
continue;
|
||||
}
|
||||
} while (max_retry-- > 0U);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_mutex *mutex;
|
||||
u32 owner, data;
|
||||
|
||||
if (!pmu->pmu_pg.initialized) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUG_ON(token == NULL);
|
||||
BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
|
||||
BUG_ON(id > pmu->mutex_cnt);
|
||||
|
||||
mutex = &pmu->mutex[id];
|
||||
|
||||
owner = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
|
||||
if (*token != owner) {
|
||||
nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x",
|
||||
*token, owner);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (--mutex->ref_cnt > 0U) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
pwr_pmu_mutex_value_initial_lock_f());
|
||||
|
||||
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
data = set_field(data, pwr_pmu_mutex_id_release_value_m(),
|
||||
pwr_pmu_mutex_id_release_value_f(owner));
|
||||
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
|
||||
gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x",
|
||||
mutex->index, *token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
|
||||
u32 *head, bool set)
|
||||
{
|
||||
|
||||
@@ -46,9 +46,6 @@ void gk20a_pmu_init_perfmon_counter(struct gk20a *g);
|
||||
|
||||
void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id);
|
||||
|
||||
int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
|
||||
int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
|
||||
u32 *head, bool set);
|
||||
int gk20a_pmu_queue_tail(struct gk20a *g, u32 queue_id, u32 queue_index,
|
||||
|
||||
@@ -34,29 +34,6 @@
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
|
||||
/* mutex */
|
||||
int nvgpu_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return g->ops.pmu.pmu_mutex_acquire(pmu, id, token);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
if (!g->support_ls_pmu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return g->ops.pmu.pmu_mutex_release(pmu, id, token);
|
||||
}
|
||||
|
||||
/* FB queue init */
|
||||
int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu,
|
||||
u32 id, union pmu_init_msg_pmu *init)
|
||||
|
||||
111
drivers/gpu/nvgpu/common/pmu/pmu_mutex.c
Normal file
111
drivers/gpu/nvgpu/common/pmu/pmu_mutex.c
Normal file
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu/mutex.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
|
||||
int nvgpu_pmu_mutexes_alloc(struct gk20a *g, struct pmu_mutexes *mutexes)
|
||||
{
|
||||
mutexes->cnt = g->ops.pmu.pmu_mutex_size();
|
||||
mutexes->mutex = nvgpu_kzalloc(g, mutexes->cnt *
|
||||
sizeof(struct pmu_mutex));
|
||||
if (mutexes->mutex == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_mutexes_init(struct pmu_mutexes *mutexes)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < mutexes->cnt; i++) {
|
||||
mutexes->mutex[i].id = i;
|
||||
mutexes->mutex[i].index = i;
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_pmu_mutexes_free(struct gk20a *g, struct pmu_mutexes *mutexes)
|
||||
{
|
||||
nvgpu_kfree(g, mutexes->mutex);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token)
|
||||
{
|
||||
struct pmu_mutex *mutex;
|
||||
u32 owner;
|
||||
int err;
|
||||
|
||||
WARN_ON(token == NULL);
|
||||
WARN_ON(!PMU_MUTEX_ID_IS_VALID(id));
|
||||
WARN_ON(id > mutexes->cnt);
|
||||
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
owner = g->ops.pmu.pmu_mutex_owner(g, mutexes, id);
|
||||
|
||||
if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
|
||||
WARN_ON(mutex->ref_cnt == 0U);
|
||||
nvgpu_err(g, "already acquired by owner : 0x%08x", *token);
|
||||
mutex->ref_cnt++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = g->ops.pmu.pmu_mutex_acquire(g, mutexes, id, token);
|
||||
|
||||
if (err == 0) {
|
||||
mutex->ref_cnt = 1;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_mutex_release(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token)
|
||||
{
|
||||
struct pmu_mutex *mutex;
|
||||
u32 owner;
|
||||
|
||||
WARN_ON(token == NULL);
|
||||
WARN_ON(!PMU_MUTEX_ID_IS_VALID(id));
|
||||
WARN_ON(id > mutexes->cnt);
|
||||
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
owner = g->ops.pmu.pmu_mutex_owner(g, mutexes, id);
|
||||
|
||||
if (*token != owner) {
|
||||
nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x",
|
||||
*token, owner);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (--mutex->ref_cnt > 0U) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
g->ops.pmu.pmu_mutex_release(g, mutexes, id, token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -926,13 +926,13 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch)
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
nvgpu_fifo_lock_active_runlists(g);
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, ch->chid, false);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
nvgpu_fifo_unlock_active_runlists(g);
|
||||
@@ -963,13 +963,13 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
nvgpu_fifo_lock_active_runlists(g);
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
nvgpu_fifo_unlock_active_runlists(g);
|
||||
|
||||
@@ -74,6 +74,7 @@
|
||||
#include "hal/gr/config/gr_config_gm20b.h"
|
||||
#include "hal/gr/ctxsw_prog/ctxsw_prog_gm20b.h"
|
||||
#include "hal/falcon/falcon_gk20a.h"
|
||||
#include "hal/pmu/pmu_hal_gk20a.h"
|
||||
|
||||
#include "common/ptimer/ptimer_gk20a.h"
|
||||
#include "common/netlist/netlist_gm20b.h"
|
||||
@@ -795,6 +796,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_is_interrupted = gk20a_pmu_is_interrupted,
|
||||
|
||||
@@ -92,6 +92,7 @@
|
||||
#include "hal/gr/ctxsw_prog/ctxsw_prog_gm20b.h"
|
||||
#include "hal/gr/ctxsw_prog/ctxsw_prog_gp10b.h"
|
||||
#include "hal/falcon/falcon_gk20a.h"
|
||||
#include "hal/pmu/pmu_hal_gk20a.h"
|
||||
|
||||
#include "common/ptimer/ptimer_gk20a.h"
|
||||
#include "common/netlist/netlist_gp10b.h"
|
||||
@@ -887,6 +888,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.pmu_queue_tail = gk20a_pmu_queue_tail,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_is_interrupted = gk20a_pmu_is_interrupted,
|
||||
|
||||
@@ -89,6 +89,7 @@
|
||||
#include "hal/falcon/falcon_gk20a.h"
|
||||
#include "hal/nvdec/nvdec_gp106.h"
|
||||
#include "hal/gsp/gsp_gv100.h"
|
||||
#include "hal/pmu/pmu_hal_gk20a.h"
|
||||
|
||||
#include "common/ptimer/ptimer_gk20a.h"
|
||||
#include "common/xve/xve_gp106.h"
|
||||
@@ -1085,6 +1086,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
.pmu_init_perfmon = nvgpu_pmu_init_perfmon,
|
||||
.pmu_perfmon_start_sampling = nvgpu_pmu_perfmon_start_sampling,
|
||||
.pmu_perfmon_stop_sampling = nvgpu_pmu_perfmon_stop_sampling,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_is_lpwr_feature_supported =
|
||||
gp106_pmu_is_lpwr_feature_supported,
|
||||
|
||||
@@ -545,13 +545,13 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
/* WAR for Bug 2065990 */
|
||||
gk20a_tsg_disable_sched(g, tsg);
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
int err = nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO,
|
||||
int err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO,
|
||||
&token);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
|
||||
@@ -588,7 +588,7 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g,
|
||||
/* runlist_lock are locked by teardown and sched are disabled too */
|
||||
nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask);
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
/* issue runlist preempt */
|
||||
@@ -609,7 +609,7 @@ static void gv11b_fifo_locked_preempt_runlists_rc(struct gk20a *g,
|
||||
}
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
int err = nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO,
|
||||
int err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO,
|
||||
&token);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
|
||||
@@ -634,7 +634,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
nvgpu_err(g, "runlist id unknown, abort active tsgs in runlists");
|
||||
|
||||
/* runlist_lock are locked by teardown */
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, &g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
@@ -695,7 +695,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
}
|
||||
}
|
||||
if (mutex_ret == 0) {
|
||||
err = nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO,
|
||||
err = nvgpu_pmu_lock_release(g, &g->pmu, PMU_MUTEX_ID_FIFO,
|
||||
&token);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
|
||||
|
||||
@@ -87,6 +87,7 @@
|
||||
#include "hal/gr/ctxsw_prog/ctxsw_prog_gp10b.h"
|
||||
#include "hal/gr/ctxsw_prog/ctxsw_prog_gv11b.h"
|
||||
#include "hal/falcon/falcon_gk20a.h"
|
||||
#include "hal/pmu/pmu_hal_gk20a.h"
|
||||
|
||||
#include "common/ptimer/ptimer_gk20a.h"
|
||||
#include "common/netlist/netlist_gv11b.h"
|
||||
@@ -1044,6 +1045,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
/* mutex */
|
||||
.pmu_mutex_size = pwr_pmu_mutex__size_1_v,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
/* power-gating */
|
||||
|
||||
116
drivers/gpu/nvgpu/hal/pmu/pmu_hal_gk20a.c
Normal file
116
drivers/gpu/nvgpu/hal/pmu/pmu_hal_gk20a.c
Normal file
@@ -0,0 +1,116 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <nvgpu/pmu/mutex.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/io.h>
|
||||
|
||||
#include <nvgpu/hw/gk20a/hw_pwr_gk20a.h>
|
||||
|
||||
#include "pmu_hal_gk20a.h"
|
||||
|
||||
u32 gk20a_pmu_mutex_owner(struct gk20a *g, struct pmu_mutexes *mutexes, u32 id)
|
||||
{
|
||||
struct pmu_mutex *mutex;
|
||||
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
return pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
}
|
||||
|
||||
int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token)
|
||||
{
|
||||
struct pmu_mutex *mutex;
|
||||
u32 data, owner, max_retry;
|
||||
int ret = -EBUSY;
|
||||
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
owner = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
|
||||
max_retry = 40;
|
||||
do {
|
||||
data = pwr_pmu_mutex_id_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_id_r()));
|
||||
if (data == pwr_pmu_mutex_id_value_init_v() ||
|
||||
data == pwr_pmu_mutex_id_value_not_avail_v()) {
|
||||
nvgpu_warn(g,
|
||||
"fail to generate mutex token: val 0x%08x",
|
||||
owner);
|
||||
nvgpu_usleep_range(20, 40);
|
||||
continue;
|
||||
}
|
||||
|
||||
owner = data;
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
pwr_pmu_mutex_value_f(owner));
|
||||
|
||||
data = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
|
||||
if (owner == data) {
|
||||
nvgpu_log_info(g, "mutex acquired: id=%d, token=0x%x",
|
||||
mutex->index, *token);
|
||||
*token = owner;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
|
||||
mutex->index);
|
||||
|
||||
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
data = set_field(data,
|
||||
pwr_pmu_mutex_id_release_value_m(),
|
||||
pwr_pmu_mutex_id_release_value_f(owner));
|
||||
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
|
||||
nvgpu_usleep_range(20, 40);
|
||||
} while (max_retry-- > 0U);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void gk20a_pmu_mutex_release(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token)
|
||||
{
|
||||
struct pmu_mutex *mutex;
|
||||
u32 owner, data;
|
||||
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
owner = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
pwr_pmu_mutex_value_initial_lock_f());
|
||||
|
||||
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
data = set_field(data, pwr_pmu_mutex_id_release_value_m(),
|
||||
pwr_pmu_mutex_id_release_value_f(owner));
|
||||
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
|
||||
nvgpu_log_info(g, "mutex released: id=%d, token=0x%x",
|
||||
mutex->index, *token);
|
||||
}
|
||||
37
drivers/gpu/nvgpu/hal/pmu/pmu_hal_gk20a.h
Normal file
37
drivers/gpu/nvgpu/hal/pmu/pmu_hal_gk20a.h
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef NVGPU_PMU_MUTEX_GK20A_H
|
||||
#define NVGPU_PMU_MUTEX_GK20A_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct pmu_mutexes;
|
||||
struct gk20a;
|
||||
|
||||
u32 gk20a_pmu_mutex_owner(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id);
|
||||
int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token);
|
||||
void gk20a_pmu_mutex_release(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token);
|
||||
|
||||
#endif /* NVGPU_PMU_MUTEX_GK20A_H */
|
||||
@@ -1383,10 +1383,15 @@ struct gpu_ops {
|
||||
void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu,
|
||||
u32 *tail, bool set);
|
||||
u32 (*pmu_mutex_size)(void);
|
||||
int (*pmu_mutex_acquire)(struct nvgpu_pmu *pmu,
|
||||
u32 id, u32 *token);
|
||||
int (*pmu_mutex_release)(struct nvgpu_pmu *pmu,
|
||||
u32 id, u32 *token);
|
||||
u32 (*pmu_mutex_owner)(struct gk20a *g,
|
||||
struct pmu_mutexes *mutexes,
|
||||
u32 id);
|
||||
int (*pmu_mutex_acquire)(struct gk20a *g,
|
||||
struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token);
|
||||
void (*pmu_mutex_release)(struct gk20a *g,
|
||||
struct pmu_mutexes *mutexes, u32 id,
|
||||
u32 *token);
|
||||
bool (*pmu_is_interrupted)(struct nvgpu_pmu *pmu);
|
||||
void (*pmu_isr)(struct gk20a *g);
|
||||
void (*pmu_init_perfmon_counter)(struct gk20a *g);
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/pmu/pmu_pg.h>
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
#include <nvgpu/pmu/mutex.h>
|
||||
|
||||
#define nvgpu_pmu_dbg(g, fmt, args...) \
|
||||
nvgpu_log(g, gpu_dbg_pmu, fmt, ##args)
|
||||
@@ -243,12 +244,6 @@ struct pmu_ucode_desc {
|
||||
u32 compressed;
|
||||
};
|
||||
|
||||
struct pmu_mutex {
|
||||
u32 id;
|
||||
u32 index;
|
||||
u32 ref_cnt;
|
||||
};
|
||||
|
||||
struct nvgpu_pmu {
|
||||
struct gk20a *g;
|
||||
struct nvgpu_falcon flcn;
|
||||
@@ -279,8 +274,7 @@ struct nvgpu_pmu {
|
||||
|
||||
struct pmu_sequences sequences;
|
||||
|
||||
struct pmu_mutex *mutex;
|
||||
u32 mutex_cnt;
|
||||
struct pmu_mutexes mutexes;
|
||||
|
||||
struct nvgpu_mutex pmu_copy_lock;
|
||||
|
||||
@@ -339,9 +333,10 @@ struct pg_init_sequence_list {
|
||||
u32 writeval;
|
||||
};
|
||||
|
||||
/* PMU IPC Methods */
|
||||
int nvgpu_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
u32 id, u32 *token);
|
||||
int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
u32 id, u32 *token);
|
||||
|
||||
int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, u32 id,
|
||||
union pmu_init_msg_pmu *init);
|
||||
|
||||
72
drivers/gpu/nvgpu/include/nvgpu/pmu/mutex.h
Normal file
72
drivers/gpu/nvgpu/include/nvgpu/pmu/mutex.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_PMU_MUTEX_H
|
||||
#define NVGPU_PMU_MUTEX_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
|
||||
/* List of valid logical mutex identifiers */
|
||||
#define PMU_MUTEX_ID_RSVD1 0U
|
||||
#define PMU_MUTEX_ID_GPUSER 1U
|
||||
#define PMU_MUTEX_ID_QUEUE_BIOS 2U
|
||||
#define PMU_MUTEX_ID_QUEUE_SMI 3U
|
||||
#define PMU_MUTEX_ID_GPMUTEX 4U
|
||||
#define PMU_MUTEX_ID_I2C 5U
|
||||
#define PMU_MUTEX_ID_RMLOCK 6U
|
||||
#define PMU_MUTEX_ID_MSGBOX 7U
|
||||
#define PMU_MUTEX_ID_FIFO 8U
|
||||
#define PMU_MUTEX_ID_PG 9U
|
||||
#define PMU_MUTEX_ID_GR 10U
|
||||
#define PMU_MUTEX_ID_CLK 11U
|
||||
#define PMU_MUTEX_ID_RSVD6 12U
|
||||
#define PMU_MUTEX_ID_RSVD7 13U
|
||||
#define PMU_MUTEX_ID_RSVD8 14U
|
||||
#define PMU_MUTEX_ID_RSVD9 15U
|
||||
#define PMU_MUTEX_ID_INVALID 16U
|
||||
|
||||
#define PMU_MUTEX_ID_IS_VALID(id) \
|
||||
((id) < PMU_MUTEX_ID_INVALID)
|
||||
|
||||
#define PMU_INVALID_MUTEX_OWNER_ID 0U
|
||||
|
||||
struct pmu_mutex {
|
||||
u32 id;
|
||||
u32 index;
|
||||
u32 ref_cnt;
|
||||
};
|
||||
|
||||
struct pmu_mutexes {
|
||||
struct pmu_mutex *mutex;
|
||||
u32 cnt;
|
||||
};
|
||||
|
||||
int nvgpu_pmu_mutexes_alloc(struct gk20a *g, struct pmu_mutexes *mutexes);
|
||||
void nvgpu_pmu_mutexes_init(struct pmu_mutexes *mutexes);
|
||||
void nvgpu_pmu_mutexes_free(struct gk20a *g, struct pmu_mutexes *mutexes);
|
||||
int nvgpu_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token);
|
||||
int nvgpu_pmu_mutex_release(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
u32 id, u32 *token);
|
||||
#endif /* NVGPU_PMU_MUTEX_H */
|
||||
@@ -46,32 +46,6 @@
|
||||
#define PMU_IS_MESSAGE_QUEUE(id) \
|
||||
((id) == PMU_MESSAGE_QUEUE)
|
||||
|
||||
/* An enumeration containing all valid logical mutex identifiers */
|
||||
enum {
|
||||
PMU_MUTEX_ID_RSVD1 = 0,
|
||||
PMU_MUTEX_ID_GPUSER,
|
||||
PMU_MUTEX_ID_QUEUE_BIOS,
|
||||
PMU_MUTEX_ID_QUEUE_SMI,
|
||||
PMU_MUTEX_ID_GPMUTEX,
|
||||
PMU_MUTEX_ID_I2C,
|
||||
PMU_MUTEX_ID_RMLOCK,
|
||||
PMU_MUTEX_ID_MSGBOX,
|
||||
PMU_MUTEX_ID_FIFO,
|
||||
PMU_MUTEX_ID_PG,
|
||||
PMU_MUTEX_ID_GR,
|
||||
PMU_MUTEX_ID_CLK,
|
||||
PMU_MUTEX_ID_RSVD6,
|
||||
PMU_MUTEX_ID_RSVD7,
|
||||
PMU_MUTEX_ID_RSVD8,
|
||||
PMU_MUTEX_ID_RSVD9,
|
||||
PMU_MUTEX_ID_INVALID
|
||||
};
|
||||
|
||||
#define PMU_MUTEX_ID_IS_VALID(id) \
|
||||
((id) < PMU_MUTEX_ID_INVALID)
|
||||
|
||||
#define PMU_INVALID_MUTEX_OWNER_ID 0U
|
||||
|
||||
/*
|
||||
* The PMU's frame-buffer interface block has several slots/indices
|
||||
* which can be bound to support DMA to various surfaces in memory
|
||||
|
||||
@@ -96,6 +96,7 @@
|
||||
#include "hal/falcon/falcon_gk20a.h"
|
||||
#include "hal/nvdec/nvdec_tu104.h"
|
||||
#include "hal/gsp/gsp_gv100.h"
|
||||
#include "hal/pmu/pmu_hal_gk20a.h"
|
||||
|
||||
#include "common/ptimer/ptimer_gk20a.h"
|
||||
#include "common/xve/xve_gp106.h"
|
||||
@@ -1123,6 +1124,7 @@ static const struct gpu_ops tu104_ops = {
|
||||
.pmu_init_perfmon = nvgpu_pmu_init_perfmon,
|
||||
.pmu_perfmon_start_sampling = nvgpu_pmu_perfmon_start_sampling,
|
||||
.pmu_perfmon_stop_sampling = nvgpu_pmu_perfmon_stop_sampling,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_is_lpwr_feature_supported =
|
||||
gp106_pmu_is_lpwr_feature_supported,
|
||||
|
||||
Reference in New Issue
Block a user