mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: add pmu hals
Add following PMU hals: - pmu_get_mutex_reg - pmu_set_mutex_reg - pmu_get_mutex_id - pmu_get_mutex_id_release - pmu_set_mutex_id_release JIRA NVGPU-9758 Change-Id: Ic73ad8a9e07defadeb49a2ca3440fe000203a42f Signed-off-by: Divya <dsinghatwari@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2904414 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -1390,6 +1390,11 @@ static const struct gops_pmu ga100_ops_pmu = {
|
||||
.is_pmu_supported = ga100_is_pmu_supported,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
|
||||
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
|
||||
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
|
||||
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
|
||||
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
.pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v,
|
||||
.pmu_reset = nvgpu_pmu_reset,
|
||||
|
||||
@@ -1459,6 +1459,11 @@ static const struct gops_pmu ga10b_ops_pmu = {
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
|
||||
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
|
||||
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
|
||||
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
|
||||
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
|
||||
/* power-gating */
|
||||
.pmu_setup_elpg = NULL,
|
||||
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,
|
||||
|
||||
@@ -858,6 +858,11 @@ static const struct gops_pmu gm20b_ops_pmu = {
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
|
||||
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
|
||||
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
|
||||
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
|
||||
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
|
||||
.pmu_is_interrupted = gk20a_pmu_is_interrupted,
|
||||
.get_irqmask = gk20a_pmu_get_irqmask,
|
||||
.set_mailbox1 = gk20a_pmu_set_mailbox1,
|
||||
|
||||
@@ -1220,6 +1220,11 @@ static const struct gops_pmu gv11b_ops_pmu = {
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_mutex_release = gk20a_pmu_mutex_release,
|
||||
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
|
||||
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
|
||||
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
|
||||
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
|
||||
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
|
||||
/* power-gating */
|
||||
.pmu_setup_elpg = gv11b_pmu_setup_elpg,
|
||||
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,
|
||||
|
||||
@@ -1270,6 +1270,11 @@ static const struct gops_pmu tu104_ops_pmu = {
|
||||
.pmu_enable_irq = gv11b_pmu_enable_irq,
|
||||
.is_pmu_supported = tu104_is_pmu_supported,
|
||||
.pmu_mutex_owner = gk20a_pmu_mutex_owner,
|
||||
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
|
||||
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
|
||||
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
|
||||
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
|
||||
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
|
||||
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
|
||||
.pmu_msgq_tail = gk20a_pmu_msgq_tail,
|
||||
.pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -28,13 +28,6 @@
|
||||
#define DMA_OFFSET_START 0U
|
||||
#define DMEM_DATA_0 0x0U
|
||||
#define DMEM_DATA_1 0x1U
|
||||
#define PMU_IDLE_THRESHOLD_V 0x7FFFFFFF
|
||||
#define IDLE_COUNTER_0 0
|
||||
#define IDLE_COUNTER_1 1
|
||||
#define IDLE_COUNTER_2 2
|
||||
#define IDLE_COUNTER_3 3
|
||||
#define IDLE_COUNTER_4 4
|
||||
#define IDLE_COUNTER_6 6
|
||||
#define right_shift_8bits(v) (v >> 8U)
|
||||
#define left_shift_8bits(v) (v << 8U)
|
||||
|
||||
|
||||
@@ -229,7 +229,7 @@ u32 gk20a_pmu_mutex_owner(struct gk20a *g, struct pmu_mutexes *mutexes, u32 id)
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
return pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
|
||||
}
|
||||
|
||||
int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
@@ -242,12 +242,12 @@ int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
owner = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
|
||||
|
||||
max_retry = 40;
|
||||
do {
|
||||
data = pwr_pmu_mutex_id_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_id_r()));
|
||||
g->ops.pmu.pmu_get_mutex_id(g));
|
||||
if (data == pwr_pmu_mutex_id_value_init_v() ||
|
||||
data == pwr_pmu_mutex_id_value_not_avail_v()) {
|
||||
nvgpu_warn(g,
|
||||
@@ -258,11 +258,11 @@ int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
}
|
||||
|
||||
owner = data;
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
g->ops.pmu.pmu_set_mutex_reg(g, mutex->index,
|
||||
pwr_pmu_mutex_value_f(owner));
|
||||
|
||||
data = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
|
||||
|
||||
if (owner == data) {
|
||||
nvgpu_log_info(g, "mutex acquired: id=%d, token=0x%x",
|
||||
@@ -275,11 +275,11 @@ int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
|
||||
mutex->index);
|
||||
|
||||
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
data = g->ops.pmu.pmu_get_mutex_id_release(g);
|
||||
data = set_field(data,
|
||||
pwr_pmu_mutex_id_release_value_m(),
|
||||
pwr_pmu_mutex_id_release_value_f(owner));
|
||||
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
g->ops.pmu.pmu_set_mutex_id_release(g, data);
|
||||
|
||||
nvgpu_usleep_range(20, 40);
|
||||
} while (max_retry-- > 0U);
|
||||
@@ -296,15 +296,15 @@ void gk20a_pmu_mutex_release(struct gk20a *g, struct pmu_mutexes *mutexes,
|
||||
mutex = &mutexes->mutex[id];
|
||||
|
||||
owner = pwr_pmu_mutex_value_v(
|
||||
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
|
||||
g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
|
||||
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
g->ops.pmu.pmu_set_mutex_reg(g, mutex->index,
|
||||
pwr_pmu_mutex_value_initial_lock_f());
|
||||
|
||||
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
data = g->ops.pmu.pmu_get_mutex_id_release(g);
|
||||
data = set_field(data, pwr_pmu_mutex_id_release_value_m(),
|
||||
pwr_pmu_mutex_id_release_value_f(owner));
|
||||
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
g->ops.pmu.pmu_set_mutex_id_release(g, data);
|
||||
|
||||
nvgpu_log_info(g, "mutex released: id=%d, token=0x%x",
|
||||
mutex->index, *token);
|
||||
|
||||
@@ -51,6 +51,11 @@ u32 gk20a_pmu_get_bar0_fecs_error(struct gk20a *g);
|
||||
void gk20a_pmu_set_bar0_fecs_error(struct gk20a *g, u32 val);
|
||||
u32 gk20a_pmu_get_mailbox(struct gk20a *g, u32 i);
|
||||
u32 gk20a_pmu_get_pmu_debug(struct gk20a *g, u32 i);
|
||||
u32 gk20a_pmu_get_mutex_reg(struct gk20a *g, u32 i);
|
||||
void gk20a_pmu_set_mutex_reg(struct gk20a *g, u32 i, u32 data);
|
||||
u32 gk20a_pmu_get_mutex_id(struct gk20a *g);
|
||||
u32 gk20a_pmu_get_mutex_id_release(struct gk20a *g);
|
||||
void gk20a_pmu_set_mutex_id_release(struct gk20a *g, u32 data);
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
|
||||
|
||||
@@ -123,6 +123,31 @@ u32 gk20a_pmu_get_pmu_debug(struct gk20a *g, u32 i)
|
||||
return nvgpu_readl(g, pwr_pmu_debug_r(i));
|
||||
}
|
||||
|
||||
u32 gk20a_pmu_get_mutex_reg(struct gk20a *g, u32 i)
|
||||
{
|
||||
return nvgpu_readl(g, pwr_pmu_mutex_r(i));
|
||||
}
|
||||
|
||||
void gk20a_pmu_set_mutex_reg(struct gk20a *g, u32 i, u32 data)
|
||||
{
|
||||
nvgpu_writel(g, pwr_pmu_mutex_r(i), data);
|
||||
}
|
||||
|
||||
u32 gk20a_pmu_get_mutex_id(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_readl(g, pwr_pmu_mutex_id_r());
|
||||
}
|
||||
|
||||
u32 gk20a_pmu_get_mutex_id_release(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_readl(g, pwr_pmu_mutex_id_release_r());
|
||||
}
|
||||
|
||||
void gk20a_pmu_set_mutex_id_release(struct gk20a *g, u32 data)
|
||||
{
|
||||
nvgpu_writel(g, pwr_pmu_mutex_id_release_r(), data);
|
||||
}
|
||||
|
||||
void gk20a_pmu_isr(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
|
||||
@@ -493,6 +493,11 @@ struct gops_pmu {
|
||||
void (*set_bar0_fecs_error)(struct gk20a *g, u32 val);
|
||||
u32 (*get_mailbox)(struct gk20a *g, u32 i);
|
||||
u32 (*get_pmu_debug)(struct gk20a *g, u32 i);
|
||||
u32 (*pmu_get_mutex_reg)(struct gk20a *g, u32 i);
|
||||
void (*pmu_set_mutex_reg)(struct gk20a *g, u32 i, u32 data);
|
||||
u32 (*pmu_get_mutex_id)(struct gk20a *g);
|
||||
u32 (*pmu_get_mutex_id_release)(struct gk20a *g);
|
||||
void (*pmu_set_mutex_id_release)(struct gk20a *g, u32 data);
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
u32 (*get_inst_block_config)(struct gk20a *g);
|
||||
|
||||
@@ -216,6 +216,14 @@ struct nvgpu_clk_pmupstate;
|
||||
|
||||
#define NVGPU_PWRCLK_RATE 204000000UL
|
||||
|
||||
#define IDLE_COUNTER_0 0
|
||||
#define IDLE_COUNTER_1 1
|
||||
#define IDLE_COUNTER_2 2
|
||||
#define IDLE_COUNTER_3 3
|
||||
#define IDLE_COUNTER_4 4
|
||||
#define IDLE_COUNTER_6 6
|
||||
#define PMU_IDLE_THRESHOLD_V 0x7FFFFFFF
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
struct rpc_handler_payload {
|
||||
void *rpc_buff;
|
||||
|
||||
Reference in New Issue
Block a user