gpu: nvgpu: add pmu HALs

Add following PMU Hals:

- get_pmu_msgq_head
- set_pmu_msgq_head
- set_pmu_new_instblk

JIRA NVGPU-9758

Change-Id: Iba1e37a299309e0e31970a8fbdf248d662bd759b
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2906872
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Divya
2023-05-18 17:01:23 +00:00
committed by mobile promotions
parent ad320f60b9
commit 943eb77b20
14 changed files with 47 additions and 11 deletions

View File

@@ -1377,6 +1377,9 @@ static const struct gops_pmu ga100_ops_pmu = {
.set_bar0_fecs_error = gk20a_pmu_set_bar0_fecs_error,
.get_mailbox = gk20a_pmu_get_mailbox,
.get_pmu_debug = gk20a_pmu_get_pmu_debug,
.get_pmu_msgq_head = gk20a_pmu_get_pmu_msgq_head,
.set_pmu_msgq_head = gk20a_pmu_set_pmu_msgq_head,
.set_pmu_new_instblk = gk20a_pmu_set_new_instblk,
.pmu_isr = gk20a_pmu_isr,
.pmu_init_perfmon_counter = ga10b_pmu_init_perfmon_counter,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -1435,6 +1435,10 @@ static const struct gops_pmu ga10b_ops_pmu = {
.set_bar0_fecs_error = gk20a_pmu_set_bar0_fecs_error,
.get_mailbox = gk20a_pmu_get_mailbox,
.get_pmu_debug = gk20a_pmu_get_pmu_debug,
.get_pmu_msgq_head = gk20a_pmu_get_pmu_msgq_head,
.set_pmu_msgq_head = gk20a_pmu_set_pmu_msgq_head,
.set_pmu_new_instblk = gk20a_pmu_set_new_instblk,
#ifdef CONFIG_NVGPU_LS_PMU
.pmu_seq_cleanup = nvgpu_pmu_seq_free_release,
.get_inst_block_config = ga10b_pmu_get_inst_block_config,

View File

@@ -882,6 +882,9 @@ static const struct gops_pmu gm20b_ops_pmu = {
.set_bar0_fecs_error = gk20a_pmu_set_bar0_fecs_error,
.get_mailbox = gk20a_pmu_get_mailbox,
.get_pmu_debug = gk20a_pmu_get_pmu_debug,
.get_pmu_msgq_head = gk20a_pmu_get_pmu_msgq_head,
.set_pmu_msgq_head = gk20a_pmu_set_pmu_msgq_head,
.set_pmu_new_instblk = gk20a_pmu_set_new_instblk,
.pmu_isr = gk20a_pmu_isr,
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -1194,6 +1194,9 @@ static const struct gops_pmu gv11b_ops_pmu = {
.set_bar0_fecs_error = gk20a_pmu_set_bar0_fecs_error,
.get_mailbox = gk20a_pmu_get_mailbox,
.get_pmu_debug = gk20a_pmu_get_pmu_debug,
.get_pmu_msgq_head = gk20a_pmu_get_pmu_msgq_head,
.set_pmu_msgq_head = gk20a_pmu_set_pmu_msgq_head,
.set_pmu_new_instblk = gk20a_pmu_set_new_instblk,
.pmu_isr = gk20a_pmu_isr,
.handle_ext_irq = gv11b_pmu_handle_ext_irq,
#ifdef CONFIG_NVGPU_LS_PMU

View File

@@ -1257,6 +1257,9 @@ static const struct gops_pmu tu104_ops_pmu = {
.set_bar0_fecs_error = gk20a_pmu_set_bar0_fecs_error,
.get_mailbox = gk20a_pmu_get_mailbox,
.get_pmu_debug = gk20a_pmu_get_pmu_debug,
.get_pmu_msgq_head = gk20a_pmu_get_pmu_msgq_head,
.set_pmu_msgq_head = gk20a_pmu_set_pmu_msgq_head,
.set_pmu_new_instblk = gk20a_pmu_set_new_instblk,
.pmu_isr = gk20a_pmu_isr,
.pmu_init_perfmon_counter = gk20a_pmu_init_perfmon_counter,
.pmu_pg_idle_counter_config = gk20a_pmu_pg_idle_counter_config,

View File

@@ -101,7 +101,7 @@ static int ga10b_pmu_ns_falcon_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
pwr_falcon_itfen_ctxen_enable_f());
inst_block_ptr = nvgpu_inst_block_ptr(g, &mm->pmu.inst_block);
nvgpu_writel(g, pwr_pmu_new_instblk_r(),
g->ops.pmu.set_pmu_new_instblk(g,
pwr_pmu_new_instblk_ptr_f(inst_block_ptr) |
pwr_pmu_new_instblk_valid_f(1) |
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?

View File

@@ -341,10 +341,9 @@ int gk20a_pmu_queue_head(struct gk20a *g, u32 queue_id, u32 queue_index,
} else {
if (!set) {
*head = pwr_pmu_msgq_head_val_v(
gk20a_readl(g, pwr_pmu_msgq_head_r()));
g->ops.pmu.get_pmu_msgq_head(g));
} else {
gk20a_writel(g,
pwr_pmu_msgq_head_r(),
g->ops.pmu.set_pmu_msgq_head(g,
pwr_pmu_msgq_head_val_f(*head));
}
}
@@ -645,7 +644,7 @@ int gk20a_pmu_ns_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
pwr_falcon_itfen_ctxen_enable_f());
tmp_addr = nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
gk20a_writel(g, pwr_pmu_new_instblk_r(),
g->ops.pmu.set_pmu_new_instblk(g,
pwr_pmu_new_instblk_ptr_f((u32)tmp_addr) |
pwr_pmu_new_instblk_valid_f(1) |
pwr_pmu_new_instblk_target_sys_coh_f());

View File

@@ -56,6 +56,9 @@ void gk20a_pmu_set_mutex_reg(struct gk20a *g, u32 i, u32 data);
u32 gk20a_pmu_get_mutex_id(struct gk20a *g);
u32 gk20a_pmu_get_mutex_id_release(struct gk20a *g);
void gk20a_pmu_set_mutex_id_release(struct gk20a *g, u32 data);
u32 gk20a_pmu_get_pmu_msgq_head(struct gk20a *g);
void gk20a_pmu_set_pmu_msgq_head(struct gk20a *g, u32 data);
void gk20a_pmu_set_new_instblk(struct gk20a *g, u32 data);
#ifdef CONFIG_NVGPU_LS_PMU
void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);

View File

@@ -148,6 +148,21 @@ void gk20a_pmu_set_mutex_id_release(struct gk20a *g, u32 data)
nvgpu_writel(g, pwr_pmu_mutex_id_release_r(), data);
}
u32 gk20a_pmu_get_pmu_msgq_head(struct gk20a *g)
{
return nvgpu_readl(g, pwr_pmu_msgq_head_r());
}
void gk20a_pmu_set_pmu_msgq_head(struct gk20a *g, u32 data)
{
nvgpu_writel(g, pwr_pmu_msgq_head_r(), data);
}
void gk20a_pmu_set_new_instblk(struct gk20a *g, u32 data)
{
nvgpu_writel(g, pwr_pmu_new_instblk_r(), data);
}
void gk20a_pmu_isr(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;

View File

@@ -207,7 +207,7 @@ void gm20b_pmu_flcn_setup_boot_config(struct gk20a *g)
*/
inst_block_ptr = nvgpu_inst_block_ptr(g, &mm->pmu.inst_block);
gk20a_writel(g, pwr_pmu_new_instblk_r(),
g->ops.pmu.set_pmu_new_instblk(g,
pwr_pmu_new_instblk_ptr_f(inst_block_ptr) |
pwr_pmu_new_instblk_valid_f(1U) |
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -166,7 +166,7 @@ int gv11b_pmu_bootstrap(struct gk20a *g, struct nvgpu_pmu *pmu,
pwr_falcon_itfen_ctxen_enable_f());
inst_block_ptr = nvgpu_inst_block_ptr(g, &mm->pmu.inst_block);
nvgpu_writel(g, pwr_pmu_new_instblk_r(),
g->ops.pmu.set_pmu_new_instblk(g,
pwr_pmu_new_instblk_ptr_f(inst_block_ptr) |
pwr_pmu_new_instblk_valid_f(1) |
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?

View File

@@ -223,7 +223,7 @@ void gv11b_pmu_flcn_setup_boot_config(struct gk20a *g)
*/
inst_block_ptr = nvgpu_inst_block_ptr(g, &mm->pmu.inst_block);
nvgpu_writel(g, pwr_pmu_new_instblk_r(),
g->ops.pmu.set_pmu_new_instblk(g,
pwr_pmu_new_instblk_ptr_f(inst_block_ptr) |
pwr_pmu_new_instblk_valid_f(1U) |
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -99,7 +99,7 @@ void tu104_pmu_setup_apertures(struct gk20a *g)
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
inst_block_ptr = nvgpu_inst_block_ptr(g, &mm->pmu.inst_block);
gk20a_writel(g, pwr_pmu_new_instblk_r(),
g->ops.pmu.set_pmu_new_instblk(g,
pwr_pmu_new_instblk_ptr_f(inst_block_ptr) |
pwr_pmu_new_instblk_valid_f(1) |
nvgpu_aperture_mask(g, &mm->pmu.inst_block,

View File

@@ -498,6 +498,9 @@ struct gops_pmu {
u32 (*pmu_get_mutex_id)(struct gk20a *g);
u32 (*pmu_get_mutex_id_release)(struct gk20a *g);
void (*pmu_set_mutex_id_release)(struct gk20a *g, u32 data);
u32 (*get_pmu_msgq_head)(struct gk20a *g);
void (*set_pmu_msgq_head)(struct gk20a *g, u32 data);
void (*set_pmu_new_instblk)(struct gk20a *g, u32 data);
#ifdef CONFIG_NVGPU_LS_PMU
u32 (*get_inst_block_config)(struct gk20a *g);