gpu: nvgpu: add pmu hals

Add following PMU hals:

- pmu_get_mutex_reg
- pmu_set_mutex_reg
- pmu_get_mutex_id
- pmu_get_mutex_id_release
- pmu_set_mutex_id_release

JIRA NVGPU-9758

Change-Id: Ic73ad8a9e07defadeb49a2ca3440fe000203a42f
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2904414
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Divya
2023-05-15 11:23:41 +00:00
committed by mobile promotions
parent a67ac8bac9
commit 912cb15999
11 changed files with 81 additions and 20 deletions

View File

@@ -1390,6 +1390,11 @@ static const struct gops_pmu ga100_ops_pmu = {
.is_pmu_supported = ga100_is_pmu_supported, .is_pmu_supported = ga100_is_pmu_supported,
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
.pmu_msgq_tail = gk20a_pmu_msgq_tail, .pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v, .pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v,
.pmu_reset = nvgpu_pmu_reset, .pmu_reset = nvgpu_pmu_reset,

View File

@@ -1459,6 +1459,11 @@ static const struct gops_pmu ga10b_ops_pmu = {
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
/* power-gating */ /* power-gating */
.pmu_setup_elpg = NULL, .pmu_setup_elpg = NULL,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config, .pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -858,6 +858,11 @@ static const struct gops_pmu gm20b_ops_pmu = {
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
.pmu_is_interrupted = gk20a_pmu_is_interrupted, .pmu_is_interrupted = gk20a_pmu_is_interrupted,
.get_irqmask = gk20a_pmu_get_irqmask, .get_irqmask = gk20a_pmu_get_irqmask,
.set_mailbox1 = gk20a_pmu_set_mailbox1, .set_mailbox1 = gk20a_pmu_set_mailbox1,

View File

@@ -1220,6 +1220,11 @@ static const struct gops_pmu gv11b_ops_pmu = {
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_mutex_release = gk20a_pmu_mutex_release, .pmu_mutex_release = gk20a_pmu_mutex_release,
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
/* power-gating */ /* power-gating */
.pmu_setup_elpg = gv11b_pmu_setup_elpg, .pmu_setup_elpg = gv11b_pmu_setup_elpg,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config, .pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -1270,6 +1270,11 @@ static const struct gops_pmu tu104_ops_pmu = {
.pmu_enable_irq = gv11b_pmu_enable_irq, .pmu_enable_irq = gv11b_pmu_enable_irq,
.is_pmu_supported = tu104_is_pmu_supported, .is_pmu_supported = tu104_is_pmu_supported,
.pmu_mutex_owner = gk20a_pmu_mutex_owner, .pmu_mutex_owner = gk20a_pmu_mutex_owner,
.pmu_get_mutex_reg = gk20a_pmu_get_mutex_reg,
.pmu_set_mutex_reg = gk20a_pmu_set_mutex_reg,
.pmu_get_mutex_id = gk20a_pmu_get_mutex_id,
.pmu_get_mutex_id_release = gk20a_pmu_get_mutex_id_release,
.pmu_set_mutex_id_release = gk20a_pmu_set_mutex_id_release,
.pmu_mutex_acquire = gk20a_pmu_mutex_acquire, .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
.pmu_msgq_tail = gk20a_pmu_msgq_tail, .pmu_msgq_tail = gk20a_pmu_msgq_tail,
.pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v, .pmu_get_queue_head_size = tu104_pmu_queue_head__size_1_v,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,13 +28,6 @@
#define DMA_OFFSET_START 0U #define DMA_OFFSET_START 0U
#define DMEM_DATA_0 0x0U #define DMEM_DATA_0 0x0U
#define DMEM_DATA_1 0x1U #define DMEM_DATA_1 0x1U
#define PMU_IDLE_THRESHOLD_V 0x7FFFFFFF
#define IDLE_COUNTER_0 0
#define IDLE_COUNTER_1 1
#define IDLE_COUNTER_2 2
#define IDLE_COUNTER_3 3
#define IDLE_COUNTER_4 4
#define IDLE_COUNTER_6 6
#define right_shift_8bits(v) (v >> 8U) #define right_shift_8bits(v) (v >> 8U)
#define left_shift_8bits(v) (v << 8U) #define left_shift_8bits(v) (v << 8U)

View File

@@ -229,7 +229,7 @@ u32 gk20a_pmu_mutex_owner(struct gk20a *g, struct pmu_mutexes *mutexes, u32 id)
mutex = &mutexes->mutex[id]; mutex = &mutexes->mutex[id];
return pwr_pmu_mutex_value_v( return pwr_pmu_mutex_value_v(
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
} }
int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes, int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
@@ -242,12 +242,12 @@ int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
mutex = &mutexes->mutex[id]; mutex = &mutexes->mutex[id];
owner = pwr_pmu_mutex_value_v( owner = pwr_pmu_mutex_value_v(
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
max_retry = 40; max_retry = 40;
do { do {
data = pwr_pmu_mutex_id_value_v( data = pwr_pmu_mutex_id_value_v(
gk20a_readl(g, pwr_pmu_mutex_id_r())); g->ops.pmu.pmu_get_mutex_id(g));
if (data == pwr_pmu_mutex_id_value_init_v() || if (data == pwr_pmu_mutex_id_value_init_v() ||
data == pwr_pmu_mutex_id_value_not_avail_v()) { data == pwr_pmu_mutex_id_value_not_avail_v()) {
nvgpu_warn(g, nvgpu_warn(g,
@@ -258,11 +258,11 @@ int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
} }
owner = data; owner = data;
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), g->ops.pmu.pmu_set_mutex_reg(g, mutex->index,
pwr_pmu_mutex_value_f(owner)); pwr_pmu_mutex_value_f(owner));
data = pwr_pmu_mutex_value_v( data = pwr_pmu_mutex_value_v(
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
if (owner == data) { if (owner == data) {
nvgpu_log_info(g, "mutex acquired: id=%d, token=0x%x", nvgpu_log_info(g, "mutex acquired: id=%d, token=0x%x",
@@ -275,11 +275,11 @@ int gk20a_pmu_mutex_acquire(struct gk20a *g, struct pmu_mutexes *mutexes,
nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x", nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x",
mutex->index); mutex->index);
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); data = g->ops.pmu.pmu_get_mutex_id_release(g);
data = set_field(data, data = set_field(data,
pwr_pmu_mutex_id_release_value_m(), pwr_pmu_mutex_id_release_value_m(),
pwr_pmu_mutex_id_release_value_f(owner)); pwr_pmu_mutex_id_release_value_f(owner));
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); g->ops.pmu.pmu_set_mutex_id_release(g, data);
nvgpu_usleep_range(20, 40); nvgpu_usleep_range(20, 40);
} while (max_retry-- > 0U); } while (max_retry-- > 0U);
@@ -296,15 +296,15 @@ void gk20a_pmu_mutex_release(struct gk20a *g, struct pmu_mutexes *mutexes,
mutex = &mutexes->mutex[id]; mutex = &mutexes->mutex[id];
owner = pwr_pmu_mutex_value_v( owner = pwr_pmu_mutex_value_v(
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); g->ops.pmu.pmu_get_mutex_reg(g, mutex->index));
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), g->ops.pmu.pmu_set_mutex_reg(g, mutex->index,
pwr_pmu_mutex_value_initial_lock_f()); pwr_pmu_mutex_value_initial_lock_f());
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); data = g->ops.pmu.pmu_get_mutex_id_release(g);
data = set_field(data, pwr_pmu_mutex_id_release_value_m(), data = set_field(data, pwr_pmu_mutex_id_release_value_m(),
pwr_pmu_mutex_id_release_value_f(owner)); pwr_pmu_mutex_id_release_value_f(owner));
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); g->ops.pmu.pmu_set_mutex_id_release(g, data);
nvgpu_log_info(g, "mutex released: id=%d, token=0x%x", nvgpu_log_info(g, "mutex released: id=%d, token=0x%x",
mutex->index, *token); mutex->index, *token);

View File

@@ -51,6 +51,11 @@ u32 gk20a_pmu_get_bar0_fecs_error(struct gk20a *g);
void gk20a_pmu_set_bar0_fecs_error(struct gk20a *g, u32 val); void gk20a_pmu_set_bar0_fecs_error(struct gk20a *g, u32 val);
u32 gk20a_pmu_get_mailbox(struct gk20a *g, u32 i); u32 gk20a_pmu_get_mailbox(struct gk20a *g, u32 i);
u32 gk20a_pmu_get_pmu_debug(struct gk20a *g, u32 i); u32 gk20a_pmu_get_pmu_debug(struct gk20a *g, u32 i);
u32 gk20a_pmu_get_mutex_reg(struct gk20a *g, u32 i);
void gk20a_pmu_set_mutex_reg(struct gk20a *g, u32 i, u32 data);
u32 gk20a_pmu_get_mutex_id(struct gk20a *g);
u32 gk20a_pmu_get_mutex_id_release(struct gk20a *g);
void gk20a_pmu_set_mutex_id_release(struct gk20a *g, u32 data);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);

View File

@@ -123,6 +123,31 @@ u32 gk20a_pmu_get_pmu_debug(struct gk20a *g, u32 i)
return nvgpu_readl(g, pwr_pmu_debug_r(i)); return nvgpu_readl(g, pwr_pmu_debug_r(i));
} }
u32 gk20a_pmu_get_mutex_reg(struct gk20a *g, u32 i)
{
return nvgpu_readl(g, pwr_pmu_mutex_r(i));
}
void gk20a_pmu_set_mutex_reg(struct gk20a *g, u32 i, u32 data)
{
nvgpu_writel(g, pwr_pmu_mutex_r(i), data);
}
u32 gk20a_pmu_get_mutex_id(struct gk20a *g)
{
return nvgpu_readl(g, pwr_pmu_mutex_id_r());
}
u32 gk20a_pmu_get_mutex_id_release(struct gk20a *g)
{
return nvgpu_readl(g, pwr_pmu_mutex_id_release_r());
}
void gk20a_pmu_set_mutex_id_release(struct gk20a *g, u32 data)
{
nvgpu_writel(g, pwr_pmu_mutex_id_release_r(), data);
}
void gk20a_pmu_isr(struct gk20a *g) void gk20a_pmu_isr(struct gk20a *g)
{ {
struct nvgpu_pmu *pmu = g->pmu; struct nvgpu_pmu *pmu = g->pmu;

View File

@@ -493,6 +493,11 @@ struct gops_pmu {
void (*set_bar0_fecs_error)(struct gk20a *g, u32 val); void (*set_bar0_fecs_error)(struct gk20a *g, u32 val);
u32 (*get_mailbox)(struct gk20a *g, u32 i); u32 (*get_mailbox)(struct gk20a *g, u32 i);
u32 (*get_pmu_debug)(struct gk20a *g, u32 i); u32 (*get_pmu_debug)(struct gk20a *g, u32 i);
u32 (*pmu_get_mutex_reg)(struct gk20a *g, u32 i);
void (*pmu_set_mutex_reg)(struct gk20a *g, u32 i, u32 data);
u32 (*pmu_get_mutex_id)(struct gk20a *g);
u32 (*pmu_get_mutex_id_release)(struct gk20a *g);
void (*pmu_set_mutex_id_release)(struct gk20a *g, u32 data);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
u32 (*get_inst_block_config)(struct gk20a *g); u32 (*get_inst_block_config)(struct gk20a *g);

View File

@@ -216,6 +216,14 @@ struct nvgpu_clk_pmupstate;
#define NVGPU_PWRCLK_RATE 204000000UL #define NVGPU_PWRCLK_RATE 204000000UL
#define IDLE_COUNTER_0 0
#define IDLE_COUNTER_1 1
#define IDLE_COUNTER_2 2
#define IDLE_COUNTER_3 3
#define IDLE_COUNTER_4 4
#define IDLE_COUNTER_6 6
#define PMU_IDLE_THRESHOLD_V 0x7FFFFFFF
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
struct rpc_handler_payload { struct rpc_handler_payload {
void *rpc_buff; void *rpc_buff;