mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: create PMU super surface unit
Created PMU super surface unit & moved structs/functions related to super surface under a unit, separated super surface structs into private/public based on its usage/access, made changes to supper surface dependent files to reflect supper surface changes respective to unit. JIRA NVGPU-3045 Change-Id: I6ac426052eb60f00b432d9533460aa0afd939fe3 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2088405 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
cff9f19941
commit
a22a9c2bcf
@@ -95,7 +95,6 @@ nvgpu-y += \
|
||||
common/pmu/pg/pmu_pg.o \
|
||||
common/pmu/pg/pmu_aelpg.o \
|
||||
common/pmu/pmu_perfmon.o \
|
||||
common/pmu/pmu_super_surface.o \
|
||||
common/pmu/pmu_debug.o \
|
||||
common/pmu/pmu_gk20a.o \
|
||||
common/pmu/pmu_gm20b.o \
|
||||
@@ -121,6 +120,7 @@ nvgpu-y += \
|
||||
common/acr/acr_sw_gv100.o \
|
||||
common/acr/acr_sw_gv11b.o \
|
||||
common/acr/acr_sw_tu104.o \
|
||||
common/pmu/super_surface/super_surface.o \
|
||||
common/pmu/lsfm/lsfm.o \
|
||||
common/pmu/lsfm/lsfm_sw_gm20b.o \
|
||||
common/pmu/lsfm/lsfm_sw_gp10b.o \
|
||||
|
||||
@@ -134,7 +134,6 @@ srcs += common/sim.c \
|
||||
common/pmu/pg/pmu_pg.c \
|
||||
common/pmu/pg/pmu_aelpg.c \
|
||||
common/pmu/pmu_perfmon.c \
|
||||
common/pmu/pmu_super_surface.c \
|
||||
common/pmu/pmu_debug.c \
|
||||
common/pmu/pmu_gk20a.c \
|
||||
common/pmu/pmu_gm20b.c \
|
||||
@@ -207,6 +206,7 @@ srcs += common/sim.c \
|
||||
common/regops/regops_gv100.c \
|
||||
common/regops/regops_gv11b.c \
|
||||
common/regops/regops_tu104.c \
|
||||
common/pmu/super_surface/super_surface.c \
|
||||
common/pmu/lsfm/lsfm.c \
|
||||
common/pmu/lsfm/lsfm_sw_gm20b.c \
|
||||
common/pmu/lsfm/lsfm_sw_gp10b.c \
|
||||
|
||||
@@ -25,6 +25,8 @@
|
||||
#include <nvgpu/boardobj.h>
|
||||
#include <nvgpu/pmuif/ctrlboardobj.h>
|
||||
#include <nvgpu/pmu/cmd.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
/*
|
||||
* Inserts a previously constructed Board Object into a Board Object Group for
|
||||
* tracking. Objects are inserted in the array based on the given index.
|
||||
@@ -598,7 +600,8 @@ int boardobjgrp_pmuset_impl_v1(struct gk20a *g,
|
||||
* copy constructed pmu boardobjgrp data from
|
||||
* sysmem to pmu super surface present in FB
|
||||
*/
|
||||
nvgpu_mem_wr_n(g, &pmu->super_surface_buf,
|
||||
nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
pcmd->super_surface_offset, pcmd->buf,
|
||||
pcmd->fbsize);
|
||||
|
||||
@@ -733,7 +736,9 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg
|
||||
* copy constructed pmu boardobjgrp data from
|
||||
* sysmem to pmu super surface present in FB
|
||||
*/
|
||||
nvgpu_mem_wr_n(g, &pmu->super_surface_buf, pcmd->super_surface_offset,
|
||||
nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
pcmd->super_surface_offset,
|
||||
pcmd->buf, pcmd->fbsize);
|
||||
/* Send the GET_STATUS PMU CMD to the PMU */
|
||||
status = boardobjgrp_pmucmdsend_rpc(g, pboardobjgrp,
|
||||
@@ -744,7 +749,9 @@ boardobjgrp_pmugetstatus_impl_v1(struct gk20a *g, struct boardobjgrp *pboardobjg
|
||||
}
|
||||
|
||||
/*copy the data back to sysmem buffer that belongs to command*/
|
||||
nvgpu_mem_rd_n(g, &pmu->super_surface_buf,pcmd->super_surface_offset,
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
pcmd->super_surface_offset,
|
||||
pcmd->buf, pcmd->fbsize);
|
||||
|
||||
boardobjgrp_pmugetstatus_exit:
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/engine_fb_queue.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <nvgpu/pmu/volt.h>
|
||||
#include <nvgpu/pmu/therm.h>
|
||||
#include <nvgpu/pmu/lsfm.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
static int pmu_payload_extract(struct nvgpu_pmu *pmu, struct pmu_sequence *seq)
|
||||
{
|
||||
@@ -48,7 +49,8 @@ static int pmu_payload_extract(struct nvgpu_pmu *pmu, struct pmu_sequence *seq)
|
||||
(nvgpu_pmu_seq_get_fbq_element_index(seq) *
|
||||
nvgpu_engine_fb_queue_get_element_size(fb_queue));
|
||||
|
||||
nvgpu_mem_rd_n(g, &pmu->super_surface_buf, fbq_payload_offset,
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface), fbq_payload_offset,
|
||||
nvgpu_pmu_seq_get_out_payload(seq),
|
||||
pv->pmu_allocation_get_dmem_size(pmu,
|
||||
pv->get_pmu_seq_out_a_ptr(seq)));
|
||||
@@ -312,11 +314,12 @@ static void pmu_read_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
{
|
||||
u32 fbq_msg_queue_ss_offset = 0U;
|
||||
|
||||
fbq_msg_queue_ss_offset = (u32)offsetof(
|
||||
struct nv_pmu_super_surface,
|
||||
fbq.msg_queue.element[element_index]);
|
||||
fbq_msg_queue_ss_offset =
|
||||
nvgpu_pmu_get_ss_msg_fbq_element_offset(g, pmu,
|
||||
pmu->super_surface, element_index);
|
||||
|
||||
nvgpu_mem_rd_n(g, &pmu->super_surface_buf, fbq_msg_queue_ss_offset,
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface), fbq_msg_queue_ss_offset,
|
||||
buffer, size);
|
||||
}
|
||||
|
||||
@@ -455,15 +458,17 @@ static int pmu_process_init_msg(struct nvgpu_pmu *pmu,
|
||||
|
||||
|
||||
err = nvgpu_pmu_queues_init(g, init, &pmu->queues,
|
||||
&pmu->super_surface_buf);
|
||||
nvgpu_pmu_super_surface_mem(g, pmu,
|
||||
pmu->super_surface));
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dmem_allocator_init(g, &pmu->dmem, init);
|
||||
|
||||
if (g->ops.pmu.create_ssmd_lookup_table != NULL) {
|
||||
g->ops.pmu.create_ssmd_lookup_table(pmu);
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
nvgpu_pmu_ss_create_ssmd_lookup_table(g,
|
||||
pmu, pmu->super_surface);
|
||||
}
|
||||
|
||||
pmu->pmu_ready = true;
|
||||
|
||||
@@ -26,12 +26,14 @@
|
||||
#include <nvgpu/pmu/cmd.h>
|
||||
#include <nvgpu/pmu/queue.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
/* FB queue init */
|
||||
static int pmu_fb_queue_init(struct gk20a *g, struct pmu_queues *queues,
|
||||
u32 id, union pmu_init_msg_pmu *init,
|
||||
struct nvgpu_mem *super_surface_buf)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct nvgpu_engine_fb_queue_params params = {0};
|
||||
u32 oflag = 0;
|
||||
int err = 0;
|
||||
@@ -55,9 +57,9 @@ static int pmu_fb_queue_init(struct gk20a *g, struct pmu_queues *queues,
|
||||
oflag = OFLAG_WRITE;
|
||||
|
||||
params.super_surface_mem = super_surface_buf;
|
||||
params.fbq_offset = (u32)offsetof(
|
||||
struct nv_pmu_super_surface,
|
||||
fbq.cmd_queues.queue[id]);
|
||||
params.fbq_offset =
|
||||
nvgpu_pmu_get_ss_cmd_fbq_offset(g, pmu,
|
||||
pmu->super_surface, id);
|
||||
params.size = NV_PMU_FBQ_CMD_NUM_ELEMENTS;
|
||||
params.fbq_element_size = NV_PMU_FBQ_CMD_ELEMENT_SIZE;
|
||||
} else if (PMU_IS_MESSAGE_QUEUE(id)) {
|
||||
@@ -69,9 +71,9 @@ static int pmu_fb_queue_init(struct gk20a *g, struct pmu_queues *queues,
|
||||
oflag = OFLAG_READ;
|
||||
|
||||
params.super_surface_mem = super_surface_buf;
|
||||
params.fbq_offset = (u32)offsetof(
|
||||
struct nv_pmu_super_surface,
|
||||
fbq.msg_queue);
|
||||
params.fbq_offset =
|
||||
nvgpu_pmu_get_ss_msg_fbq_offset(g, pmu,
|
||||
pmu->super_surface);
|
||||
params.size = NV_PMU_FBQ_MSG_NUM_ELEMENTS;
|
||||
params.fbq_element_size = NV_PMU_FBQ_MSG_ELEMENT_SIZE;
|
||||
} else {
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/pmuif/ctrlclk.h>
|
||||
@@ -31,6 +30,7 @@
|
||||
#include <nvgpu/pmu/clk/clk_domain.h>
|
||||
#include <nvgpu/pmu/perf.h>
|
||||
#include <nvgpu/pmu/cmd.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
#include "pmu_perf.h"
|
||||
|
||||
@@ -117,12 +117,13 @@ static void build_change_seq_boot (struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
script_last->super_surface_offset =
|
||||
nvgpu_pmu_get_ss_member_set_offset(pmu,
|
||||
nvgpu_pmu_get_ss_member_set_offset(g, pmu,
|
||||
NV_PMU_SUPER_SURFACE_MEMBER_CHANGE_SEQ_GRP) +
|
||||
(u32)(sizeof(struct perf_change_seq_pmu_script) *
|
||||
SEQ_SCRIPT_LAST);
|
||||
|
||||
nvgpu_mem_rd_n(g, &pmu->super_surface_buf,
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
script_last->super_surface_offset,
|
||||
&script_last->buf,
|
||||
(u32) sizeof(struct perf_change_seq_pmu_script));
|
||||
@@ -158,7 +159,8 @@ static void build_change_seq_boot (struct gk20a *g)
|
||||
/* Assume everything is P0 - Need to find the index for P0 */
|
||||
script_last->buf.change.data.pstate_index = 0;
|
||||
|
||||
nvgpu_mem_wr_n(g, &pmu->super_surface_buf,
|
||||
nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
script_last->super_surface_offset,
|
||||
&script_last->buf,
|
||||
(u32) sizeof(struct perf_change_seq_pmu_script));
|
||||
@@ -219,12 +221,13 @@ int nvgpu_perf_change_seq_pmu_setup(struct gk20a *g)
|
||||
perf_change_seq_pmu->b_lock;
|
||||
|
||||
perf_change_seq_pmu->script_last.super_surface_offset =
|
||||
nvgpu_pmu_get_ss_member_set_offset(pmu,
|
||||
nvgpu_pmu_get_ss_member_set_offset(g, pmu,
|
||||
NV_PMU_SUPER_SURFACE_MEMBER_CHANGE_SEQ_GRP) +
|
||||
(u32)(sizeof(struct perf_change_seq_pmu_script) *
|
||||
SEQ_SCRIPT_LAST);
|
||||
|
||||
nvgpu_mem_rd_n(g, &pmu->super_surface_buf,
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
perf_change_seq_pmu->script_last.super_surface_offset,
|
||||
&perf_change_seq_pmu->script_last.buf,
|
||||
(u32) sizeof(struct perf_change_seq_pmu_script));
|
||||
@@ -232,7 +235,8 @@ int nvgpu_perf_change_seq_pmu_setup(struct gk20a *g)
|
||||
/* Assume everything is P0 - Need to find the index for P0 */
|
||||
perf_change_seq_pmu->script_last.buf.change.data.pstate_index = 0;
|
||||
|
||||
nvgpu_mem_wr_n(g, &pmu->super_surface_buf,
|
||||
nvgpu_mem_wr_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface),
|
||||
perf_change_seq_pmu->script_last.super_surface_offset,
|
||||
&perf_change_seq_pmu->script_last.buf,
|
||||
(u32) sizeof(struct perf_change_seq_pmu_script));
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/engine_queue.h>
|
||||
#include <nvgpu/barrier.h>
|
||||
@@ -37,6 +36,7 @@
|
||||
#include <nvgpu/power_features/cg.h>
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/pmu/lsfm.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
static void pmu_report_error(struct gk20a *g, u32 err_type,
|
||||
u32 status, u32 pmu_err_type)
|
||||
@@ -197,10 +197,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
|
||||
|
||||
pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
|
||||
|
||||
if (g->ops.pmu.alloc_super_surface != NULL) {
|
||||
err = g->ops.pmu.alloc_super_surface(g,
|
||||
&pmu->super_surface_buf,
|
||||
sizeof(struct nv_pmu_super_surface));
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
err = nvgpu_pmu_super_surface_buf_alloc(g,
|
||||
pmu, pmu->super_surface);
|
||||
if (err != 0) {
|
||||
goto err_free_seq_buf;
|
||||
}
|
||||
@@ -219,8 +218,9 @@ skip_init:
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
err_free_super_surface:
|
||||
if (g->ops.pmu.alloc_super_surface != NULL) {
|
||||
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
nvgpu_dma_unmap_free(vm, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface));
|
||||
}
|
||||
err_free_seq_buf:
|
||||
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
|
||||
@@ -487,3 +487,10 @@ int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
|
||||
return nvgpu_pmu_mutex_release(g, &pmu->mutexes, id, token);
|
||||
}
|
||||
|
||||
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_dma_free(g, mem);
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <nvgpu/pmu/clk/clk.h>
|
||||
#include <nvgpu/pmu/allocator.h>
|
||||
#include <nvgpu/pmu/lsfm.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
/* PMU NS UCODE IMG */
|
||||
#define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin"
|
||||
@@ -156,8 +157,9 @@ static void config_pmu_cmdline_args_super_surface_v6(struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
if (g->ops.pmu.alloc_super_surface != NULL) {
|
||||
nvgpu_pmu_surface_describe(g, &pmu->super_surface_buf,
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
nvgpu_pmu_surface_describe(g,
|
||||
nvgpu_pmu_super_surface_mem(g, pmu, pmu->super_surface),
|
||||
&pmu->args_v6.super_surface);
|
||||
}
|
||||
}
|
||||
@@ -1651,8 +1653,8 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
|
||||
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
|
||||
}
|
||||
|
||||
if (nvgpu_mem_is_valid(&pmu->super_surface_buf)) {
|
||||
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
nvgpu_pmu_super_surface_deinit(g, pmu, pmu->super_surface);
|
||||
}
|
||||
|
||||
nvgpu_pmu_lsfm_deinit(g, pmu, pmu->lsfm);
|
||||
@@ -1787,6 +1789,14 @@ int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
|
||||
goto init_failed;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
err = nvgpu_pmu_super_surface_init(g, pmu,
|
||||
&pmu->super_surface);
|
||||
if (err != 0) {
|
||||
goto init_failed;
|
||||
}
|
||||
}
|
||||
|
||||
pmu->remove_support = nvgpu_remove_pmu_support;
|
||||
|
||||
goto exit;
|
||||
|
||||
@@ -23,17 +23,24 @@
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
|
||||
struct nvgpu_mem *mem_surface, u32 size)
|
||||
#include "super_surface_priv.h"
|
||||
|
||||
int nvgpu_pmu_super_surface_buf_alloc(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_super_surface *ss)
|
||||
{
|
||||
struct vm_gk20a *vm = g->mm.pmu.vm;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_dma_alloc_map(vm, size, mem_surface);
|
||||
if (ss == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = nvgpu_dma_alloc_map(vm, sizeof(struct nv_pmu_super_surface),
|
||||
&ss->super_surface_buf);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to allocate pmu suffer surface\n");
|
||||
}
|
||||
@@ -41,11 +48,10 @@ int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
struct nvgpu_mem *nvgpu_pmu_super_surface_mem(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_dma_free(g, mem);
|
||||
return &ss->super_surface_buf;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -55,21 +61,25 @@ void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
* table, i.e one table is for SET ID TYPE & second table for
|
||||
* GET_STATUS ID_TYPE.
|
||||
*/
|
||||
void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu)
|
||||
void nvgpu_pmu_ss_create_ssmd_lookup_table(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
struct nv_pmu_super_surface_member_descriptor ssmd;
|
||||
u32 ssmd_size = (u32)sizeof(
|
||||
struct nv_pmu_super_surface_member_descriptor);
|
||||
u32 ssmd_size = (u32)
|
||||
sizeof(struct nv_pmu_super_surface_member_descriptor);
|
||||
u32 idx = 0U;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (ss == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (idx = 0U; idx < NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT;
|
||||
idx++) {
|
||||
(void) memset(&ssmd, 0x0, ssmd_size);
|
||||
|
||||
nvgpu_mem_rd_n(g, &pmu->super_surface_buf, idx * ssmd_size,
|
||||
nvgpu_mem_rd_n(g, &ss->super_surface_buf, idx * ssmd_size,
|
||||
&ssmd, ssmd_size);
|
||||
|
||||
nvgpu_pmu_dbg(g, "ssmd: id-0x%x offset-0x%x size-%x rsvd-0x%x",
|
||||
@@ -85,10 +95,11 @@ void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu)
|
||||
*/
|
||||
ssmd.id &= 0xFFFFU;
|
||||
/*use member ID as index for lookup table too*/
|
||||
(void) memcpy(&pmu->ssmd_set[ssmd.id], &ssmd,
|
||||
(void) memcpy(&ss->ssmd_set[ssmd.id], &ssmd,
|
||||
ssmd_size);
|
||||
} else if ((ssmd.id &
|
||||
NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS) != 0U) {
|
||||
NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS)
|
||||
!= 0U) {
|
||||
/*
|
||||
* clear member type from member ID as we create
|
||||
* different table for each type & use ID as index
|
||||
@@ -96,7 +107,7 @@ void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu)
|
||||
*/
|
||||
ssmd.id &= 0xFFFFU;
|
||||
/*use member ID as index for lookup table too*/
|
||||
(void) memcpy(&pmu->ssmd_get_status[ssmd.id], &ssmd,
|
||||
(void) memcpy(&ss->ssmd_get_status[ssmd.id], &ssmd,
|
||||
ssmd_size);
|
||||
} else {
|
||||
continue;
|
||||
@@ -104,25 +115,77 @@ void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu)
|
||||
}
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_member_set_offset(struct nvgpu_pmu *pmu, u32 member_id)
|
||||
u32 nvgpu_pmu_get_ss_member_set_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id)
|
||||
{
|
||||
return pmu->ssmd_set[member_id].offset;
|
||||
return pmu->super_surface->ssmd_set[member_id].offset;
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_member_set_size(struct nvgpu_pmu *pmu, u32 member_id)
|
||||
u32 nvgpu_pmu_get_ss_member_set_size(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id)
|
||||
{
|
||||
return pmu->ssmd_set[member_id].size;
|
||||
return pmu->super_surface->ssmd_set[member_id].size;
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_offset(struct nvgpu_pmu *pmu,
|
||||
u32 member_id)
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id)
|
||||
{
|
||||
return pmu->ssmd_get_status[member_id].offset;
|
||||
return pmu->super_surface->ssmd_get_status[member_id].offset;
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_size(struct nvgpu_pmu *pmu,
|
||||
u32 member_id)
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_size(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id)
|
||||
{
|
||||
return pmu->ssmd_get_status[member_id].size;
|
||||
return pmu->super_surface->ssmd_get_status[member_id].size;
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_cmd_fbq_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss, u32 id)
|
||||
{
|
||||
return (u32)offsetof(struct nv_pmu_super_surface,
|
||||
fbq.cmd_queues.queue[id]);
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_msg_fbq_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss)
|
||||
{
|
||||
return (u32)offsetof(struct nv_pmu_super_surface,
|
||||
fbq.msg_queue);
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_get_ss_msg_fbq_element_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss, u32 idx)
|
||||
{
|
||||
return (u32)offsetof(struct nv_pmu_super_surface,
|
||||
fbq.msg_queue.element[idx]);
|
||||
}
|
||||
|
||||
void nvgpu_pmu_super_surface_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_super_surface *ss)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (ss == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (nvgpu_mem_is_valid(&ss->super_surface_buf)) {
|
||||
nvgpu_dma_free(g, &ss->super_surface_buf);
|
||||
}
|
||||
|
||||
nvgpu_kfree(g, ss);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_super_surface_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_super_surface **super_surface)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
*super_surface = (struct pmu_super_surface *) nvgpu_kzalloc(g,
|
||||
sizeof(struct pmu_super_surface));
|
||||
if (*super_surface == NULL) {
|
||||
err = -ENOMEM;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -20,14 +20,19 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVGPU_PMUIF_GPMU_SUPER_SURF_IF_H
|
||||
#define NVGPU_PMUIF_GPMU_SUPER_SURF_IF_H
|
||||
#ifndef SUPER_SURFACE_PRIV_H
|
||||
#define SUPER_SURFACE_PRIV_H
|
||||
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/pmuif/gpmuifboardobj.h>
|
||||
#include <nvgpu/pmuif/gpmuifvolt.h>
|
||||
#include <nvgpu/pmuif/gpmuifclk.h>
|
||||
#include <nvgpu/pmuif/gpmuifperf.h>
|
||||
#include <nvgpu/pmuif/gpmuif_cmn.h>
|
||||
#include <nvgpu/flcnif_cmn.h>
|
||||
|
||||
struct nvgpu_mem;
|
||||
struct nv_pmu_super_surface_member_descriptor;
|
||||
|
||||
/* PMU super surface */
|
||||
/* 1MB Bytes for SUPER_SURFACE_SIZE */
|
||||
@@ -43,33 +48,18 @@
|
||||
#define SS_UNMAPPED_MEMBERS_SIZE (SUPER_SURFACE_SIZE - \
|
||||
(FBQ_CMD_QUEUES_SIZE + FBQ_MSG_QUEUE_SIZE + SSMD_SIZE + SS_HDR_SIZE))
|
||||
|
||||
/*
|
||||
* Super surface member BIT identification used in member_mask indicating
|
||||
* which members in the super surface are valid.
|
||||
*
|
||||
* The ordering here is very important because it defines the order of
|
||||
* processing in the PMU and takes dependencies into consideration.
|
||||
*/
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_THERM_DEVICE_GRP 0x00U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_THERM_CHANNEL_GRP 0x01U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VFE_VAR_GRP 0x03U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VFE_EQU_GRP 0x04U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_DEVICE_GRP 0x0BU
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_RAIL_GRP 0x0CU
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_POLICY_GRP 0x0DU
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_DOMAIN_GRP 0x12U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_PROG_GRP 0x13U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_VIN_DEVICE_GRP 0x15U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FLL_DEVICE_GRP 0x16U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_VF_POINT_GRP 0x17U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FREQ_CONTROLLER_GRP 0x18U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FREQ_DOMAIN_GRP 0x19U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CHANGE_SEQ_GRP 0x1EU
|
||||
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_COUNT 0x1FU
|
||||
|
||||
/* SSMD */
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT 32U
|
||||
|
||||
/*
|
||||
* Defines the structure of the @ nv_pmu_super_surface_member_descriptor::id
|
||||
*/
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_GROUP 0x0000U
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_GROUP_INVALID 0xFFFFU
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_SET BIT(16)
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS BIT(17)
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_RSVD (0x00UL << 20U)
|
||||
|
||||
struct nv_pmu_super_surface_member_descriptor {
|
||||
/* The member ID (@see NV_PMU_SUPER_SURFACE_MEMBER_ID_<xyz>). */
|
||||
u32 id;
|
||||
@@ -84,15 +74,7 @@ struct nv_pmu_super_surface_member_descriptor {
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
/*
|
||||
* Defines the structure of the @ nv_pmu_super_surface_member_descriptor::id
|
||||
*/
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_GROUP 0x0000U
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_GROUP_INVALID 0xFFFFU
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_SET BIT(16)
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS BIT(17)
|
||||
#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_RSVD (0x00UL << 20U)
|
||||
|
||||
/* PMU super surface */
|
||||
struct nv_pmu_super_surface_hdr {
|
||||
struct falc_u64 address;
|
||||
u32 member_mask;
|
||||
@@ -127,35 +109,55 @@ struct nv_pmu_super_surface {
|
||||
* in nvgpu using these members, instead use ssmd
|
||||
* member present above to know the offset of
|
||||
* required boardobj from super surface in nvgpu
|
||||
* */
|
||||
*/
|
||||
struct {
|
||||
struct nv_pmu_volt_volt_device_boardobj_grp_set volt_device_grp_set;
|
||||
struct nv_pmu_volt_volt_policy_boardobj_grp_set volt_policy_grp_set;
|
||||
struct nv_pmu_volt_volt_rail_boardobj_grp_set volt_rail_grp_set;
|
||||
struct nv_pmu_volt_volt_device_boardobj_grp_set
|
||||
volt_device_grp_set;
|
||||
struct nv_pmu_volt_volt_policy_boardobj_grp_set
|
||||
volt_policy_grp_set;
|
||||
struct nv_pmu_volt_volt_rail_boardobj_grp_set
|
||||
volt_rail_grp_set;
|
||||
|
||||
struct nv_pmu_volt_volt_policy_boardobj_grp_get_status volt_policy_grp_get_status;
|
||||
struct nv_pmu_volt_volt_rail_boardobj_grp_get_status volt_rail_grp_get_status;
|
||||
struct nv_pmu_volt_volt_policy_boardobj_grp_get_status
|
||||
volt_policy_grp_get_status;
|
||||
struct nv_pmu_volt_volt_rail_boardobj_grp_get_status
|
||||
volt_rail_grp_get_status;
|
||||
} volt;
|
||||
struct {
|
||||
struct nv_pmu_clk_clk_vin_device_boardobj_grp_set clk_vin_device_grp_set;
|
||||
struct nv_pmu_clk_clk_domain_boardobj_grp_set clk_domain_grp_set;
|
||||
struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set clk_freq_controller_grp_set;
|
||||
struct nv_pmu_clk_clk_fll_device_boardobj_grp_set clk_fll_device_grp_set;
|
||||
struct nv_pmu_clk_clk_prog_boardobj_grp_set clk_prog_grp_set;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set;
|
||||
struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status clk_vin_device_grp_get_status;
|
||||
struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status clk_fll_device_grp_get_status;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status;
|
||||
struct nv_pmu_clk_clk_freq_domain_boardobj_grp_set clk_freq_domain_grp_set;
|
||||
struct nv_pmu_clk_clk_vin_device_boardobj_grp_set
|
||||
clk_vin_device_grp_set;
|
||||
struct nv_pmu_clk_clk_domain_boardobj_grp_set
|
||||
clk_domain_grp_set;
|
||||
struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set
|
||||
clk_freq_controller_grp_set;
|
||||
struct nv_pmu_clk_clk_fll_device_boardobj_grp_set
|
||||
clk_fll_device_grp_set;
|
||||
struct nv_pmu_clk_clk_prog_boardobj_grp_set
|
||||
clk_prog_grp_set;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_set
|
||||
clk_vf_point_grp_set;
|
||||
struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status
|
||||
clk_vin_device_grp_get_status;
|
||||
struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status
|
||||
clk_fll_device_grp_get_status;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status
|
||||
clk_vf_point_grp_get_status;
|
||||
struct nv_pmu_clk_clk_freq_domain_boardobj_grp_set
|
||||
clk_freq_domain_grp_set;
|
||||
} clk;
|
||||
struct {
|
||||
struct nv_pmu_perf_vfe_equ_boardobj_grp_set_pack vfe_equ_grp_set;
|
||||
struct nv_pmu_perf_vfe_var_boardobj_grp_set_pack vfe_var_grp_set;
|
||||
struct nv_pmu_perf_vfe_var_boardobj_grp_get_status_pack vfe_var_grp_get_status;
|
||||
struct nv_pmu_perf_vfe_equ_boardobj_grp_set_pack
|
||||
vfe_equ_grp_set;
|
||||
struct nv_pmu_perf_vfe_var_boardobj_grp_set_pack
|
||||
vfe_var_grp_set;
|
||||
struct nv_pmu_perf_vfe_var_boardobj_grp_get_status_pack
|
||||
vfe_var_grp_get_status;
|
||||
} perf;
|
||||
struct {
|
||||
struct nv_pmu_therm_therm_channel_boardobj_grp_set therm_channel_grp_set;
|
||||
struct nv_pmu_therm_therm_device_boardobj_grp_set therm_device_grp_set;
|
||||
struct nv_pmu_therm_therm_channel_boardobj_grp_set
|
||||
therm_channel_grp_set;
|
||||
struct nv_pmu_therm_therm_device_boardobj_grp_set
|
||||
therm_device_grp_set;
|
||||
} therm;
|
||||
struct {
|
||||
struct perf_change_seq_pmu_script script_curr;
|
||||
@@ -163,10 +165,24 @@ struct nv_pmu_super_surface {
|
||||
struct perf_change_seq_pmu_script script_query;
|
||||
} change_seq;
|
||||
struct {
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status;
|
||||
}clk_35;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_set
|
||||
clk_vf_point_grp_set;
|
||||
struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status
|
||||
clk_vf_point_grp_get_status;
|
||||
} clk_35;
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* NVGPU_PMUIF_GPMU_SUPER_SURF_IF_H */
|
||||
/* nvgpu super surface */
|
||||
struct pmu_super_surface {
|
||||
/* super surface members */
|
||||
struct nvgpu_mem super_surface_buf;
|
||||
|
||||
struct nv_pmu_super_surface_member_descriptor
|
||||
ssmd_set[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT];
|
||||
|
||||
struct nv_pmu_super_surface_member_descriptor
|
||||
ssmd_get_status[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT];
|
||||
};
|
||||
|
||||
#endif /* SUPER_SURFACE_PRIV_H */
|
||||
@@ -1121,11 +1121,9 @@ static const struct gpu_ops gv100_ops = {
|
||||
.is_engine_in_reset = gp106_pmu_is_engine_in_reset,
|
||||
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
|
||||
.get_irqdest = gk20a_pmu_get_irqdest,
|
||||
.alloc_super_surface = nvgpu_pmu_super_surface_alloc,
|
||||
.is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en,
|
||||
.setup_apertures = gp106_pmu_setup_apertures,
|
||||
.secured_pmu_start = gm20b_secured_pmu_start,
|
||||
.create_ssmd_lookup_table = nvgpu_pmu_create_ssmd_lookup_table,
|
||||
.save_zbc = gk20a_pmu_save_zbc,
|
||||
.pmu_clear_bar0_host_err_status =
|
||||
gm20b_clear_pmu_bar0_host_err_status,
|
||||
@@ -1472,6 +1470,7 @@ int gv100_init_hal(struct gk20a *g)
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_ZBC_STENCIL, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_PREEMPTION_GFXP, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE, true);
|
||||
|
||||
/*
|
||||
* gv100 bypasses the IOMMU since it uses the nvlink path memory.
|
||||
|
||||
@@ -34,7 +34,7 @@ struct pmu_surface;
|
||||
#include <nvgpu/boardobjgrpmask.h>
|
||||
#include <nvgpu/list.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
/*
|
||||
* Board Object Group destructor.
|
||||
@@ -336,9 +336,9 @@ do { \
|
||||
NV_PMU_##ENG##_MSG_ID_BOARDOBJ_GRP_SET, /* msgid */ \
|
||||
(u32)sizeof(union nv_pmu_##eng##_##class##_boardobjgrp_set_header_aligned), \
|
||||
(u32)sizeof(union nv_pmu_##eng##_##class##_boardobj_set_union_aligned), \
|
||||
(u32)nvgpu_pmu_get_ss_member_set_size(&g->pmu, \
|
||||
(u32)nvgpu_pmu_get_ss_member_set_size(g, &g->pmu, \
|
||||
NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \
|
||||
(u32)nvgpu_pmu_get_ss_member_set_offset(&g->pmu, \
|
||||
(u32)nvgpu_pmu_get_ss_member_set_offset(g, &g->pmu, \
|
||||
NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \
|
||||
NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD))
|
||||
|
||||
@@ -352,9 +352,9 @@ do { \
|
||||
NV_PMU_##ENG##_MSG_ID_BOARDOBJ_GRP_GET_STATUS, /* msgid */ \
|
||||
(u32)sizeof(union nv_pmu_##eng##_##class##_boardobjgrp_get_status_header_aligned), \
|
||||
(u32)sizeof(union nv_pmu_##eng##_##class##_boardobj_get_status_union_aligned), \
|
||||
(u32)nvgpu_pmu_get_ss_member_get_status_size(&g->pmu, \
|
||||
(u32)nvgpu_pmu_get_ss_member_get_status_size(g, &g->pmu, \
|
||||
NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \
|
||||
(u32)nvgpu_pmu_get_ss_member_get_status_offset(&g->pmu, \
|
||||
(u32)nvgpu_pmu_get_ss_member_get_status_offset(g, &g->pmu, \
|
||||
NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \
|
||||
NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD))
|
||||
|
||||
|
||||
@@ -194,10 +194,12 @@ struct gk20a;
|
||||
/* GFXP preemption support */
|
||||
#define NVGPU_SUPPORT_PREEMPTION_GFXP 75
|
||||
|
||||
/* PMU Super surface */
|
||||
#define NVGPU_SUPPORT_PMU_SUPER_SURFACE 76
|
||||
/*
|
||||
* Must be greater than the largest bit offset in the above list.
|
||||
*/
|
||||
#define NVGPU_MAX_ENABLED_BITS 76U
|
||||
#define NVGPU_MAX_ENABLED_BITS 77U
|
||||
|
||||
/**
|
||||
* nvgpu_is_enabled - Check if the passed flag is enabled.
|
||||
|
||||
@@ -1436,8 +1436,6 @@ struct gpu_ops {
|
||||
void (*set_irqmask)(struct gk20a *g);
|
||||
void (*setup_apertures)(struct gk20a *g);
|
||||
u32 (*get_irqdest)(struct gk20a *g);
|
||||
int (*alloc_super_surface)(struct gk20a *g,
|
||||
struct nvgpu_mem *super_surface, u32 size);
|
||||
bool (*is_debug_mode_enabled)(struct gk20a *g);
|
||||
void (*secured_pmu_start)(struct gk20a *g);
|
||||
struct {
|
||||
@@ -1449,7 +1447,6 @@ struct gpu_ops {
|
||||
u32 hw_id, u32 err_id, u32 status,
|
||||
u32 pmu_err_type);
|
||||
} err_ops;
|
||||
void (*create_ssmd_lookup_table)(struct nvgpu_pmu *pmu);
|
||||
void (*save_zbc)(struct gk20a *g, u32 entries);
|
||||
void (*pmu_clear_bar0_host_err_status)(struct gk20a *g);
|
||||
} pmu;
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
#include <nvgpu/nvgpu_common.h>
|
||||
#include <nvgpu/flcnif_cmn.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/pmu/pmu_pg.h>
|
||||
@@ -219,12 +218,7 @@ struct nvgpu_pmu {
|
||||
struct nvgpu_mem seq_buf;
|
||||
struct nvgpu_mem trace_buf;
|
||||
|
||||
/* super surface members */
|
||||
struct nvgpu_mem super_surface_buf;
|
||||
struct nv_pmu_super_surface_member_descriptor
|
||||
ssmd_set[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT];
|
||||
struct nv_pmu_super_surface_member_descriptor
|
||||
ssmd_get_status[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT];
|
||||
struct pmu_super_surface *super_surface;
|
||||
|
||||
struct pmu_sha1_gid gid_info;
|
||||
|
||||
@@ -355,15 +349,6 @@ void nvgpu_pmu_get_cmd_line_args_offset(struct gk20a *g,
|
||||
|
||||
struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu);
|
||||
|
||||
/* super surface */
|
||||
void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu);
|
||||
u32 nvgpu_pmu_get_ss_member_set_offset(struct nvgpu_pmu *pmu, u32 member_id);
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_offset(struct nvgpu_pmu *pmu,
|
||||
u32 member_id);
|
||||
u32 nvgpu_pmu_get_ss_member_set_size(struct nvgpu_pmu *pmu, u32 member_id);
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_size(struct nvgpu_pmu *pmu,
|
||||
u32 member_id);
|
||||
|
||||
void nvgpu_pmu_report_bar0_pri_err_status(struct gk20a *g, u32 bar0_status,
|
||||
u32 error_type);
|
||||
int gk20a_pmu_bar0_error_status(struct gk20a *g, u32 *bar0_status,
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
#include <nvgpu/nvgpu_common.h>
|
||||
#include <nvgpu/flcnif_cmn.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
|
||||
#include <nvgpu/pmuif/gpmuif_pg.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
|
||||
85
drivers/gpu/nvgpu/include/nvgpu/pmu/super_surface.h
Normal file
85
drivers/gpu/nvgpu/include/nvgpu/pmu/super_surface.h
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef SUPER_SURFACE_H
|
||||
#define SUPER_SURFACE_H
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct nvgpu_mem;
|
||||
struct nvgpu_pmu;
|
||||
struct pmu_super_surface;
|
||||
|
||||
/*
|
||||
* Super surface member BIT identification used in member_mask indicating
|
||||
* which members in the super surface are valid.
|
||||
*
|
||||
* The ordering here is very important because it defines the order of
|
||||
* processing in the PMU and takes dependencies into consideration.
|
||||
*/
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_THERM_DEVICE_GRP 0x00U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_THERM_CHANNEL_GRP 0x01U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VFE_VAR_GRP 0x03U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VFE_EQU_GRP 0x04U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_DEVICE_GRP 0x0BU
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_RAIL_GRP 0x0CU
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_POLICY_GRP 0x0DU
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_DOMAIN_GRP 0x12U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_PROG_GRP 0x13U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_VIN_DEVICE_GRP 0x15U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FLL_DEVICE_GRP 0x16U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_VF_POINT_GRP 0x17U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FREQ_CONTROLLER_GRP 0x18U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FREQ_DOMAIN_GRP 0x19U
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_CHANGE_SEQ_GRP 0x1EU
|
||||
|
||||
#define NV_PMU_SUPER_SURFACE_MEMBER_COUNT 0x1FU
|
||||
|
||||
u32 nvgpu_pmu_get_ss_member_set_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id);
|
||||
u32 nvgpu_pmu_get_ss_member_set_size(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id);
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id);
|
||||
u32 nvgpu_pmu_get_ss_member_get_status_size(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, u32 member_id);
|
||||
void nvgpu_pmu_ss_create_ssmd_lookup_table(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss);
|
||||
struct nvgpu_mem *nvgpu_pmu_super_surface_mem(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss);
|
||||
int nvgpu_pmu_super_surface_buf_alloc(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_super_surface *ss);
|
||||
|
||||
u32 nvgpu_pmu_get_ss_cmd_fbq_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss, u32 id);
|
||||
u32 nvgpu_pmu_get_ss_msg_fbq_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss);
|
||||
u32 nvgpu_pmu_get_ss_msg_fbq_element_offset(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_super_surface *ss, u32 idx);
|
||||
|
||||
void nvgpu_pmu_super_surface_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_super_surface *ss);
|
||||
|
||||
int nvgpu_pmu_super_surface_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_super_surface **super_suface);
|
||||
|
||||
#endif /* SUPER_SURFACE_H */
|
||||
@@ -1159,12 +1159,10 @@ static const struct gpu_ops tu104_ops = {
|
||||
.is_engine_in_reset = gp106_pmu_is_engine_in_reset,
|
||||
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
|
||||
.get_irqdest = gk20a_pmu_get_irqdest,
|
||||
.alloc_super_surface = nvgpu_pmu_super_surface_alloc,
|
||||
.handle_ext_irq = gv11b_pmu_handle_ext_irq,
|
||||
.is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en,
|
||||
.setup_apertures = gp106_pmu_setup_apertures,
|
||||
.secured_pmu_start = gm20b_secured_pmu_start,
|
||||
.create_ssmd_lookup_table = nvgpu_pmu_create_ssmd_lookup_table,
|
||||
.save_zbc = gk20a_pmu_save_zbc,
|
||||
.pmu_clear_bar0_host_err_status =
|
||||
gm20b_clear_pmu_bar0_host_err_status,
|
||||
@@ -1520,6 +1518,7 @@ int tu104_init_hal(struct gk20a *g)
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_VM, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_VM, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE, true);
|
||||
|
||||
/* for now */
|
||||
gops->clk.support_clk_freq_controller = false;
|
||||
|
||||
Reference in New Issue
Block a user