Files
linux-nvgpu/drivers/gpu/nvgpu/common/pmu/pmu_super_surface.c
Mahantesh Kumbar 80342778b3 gpu: nvgpu: PMU super surface SSMD support
-SSMD - super surface member descriptor
-created new file pmu_super_sruface.c for super surface
 related functions.
-Modified macros BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT and
 BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT to fetch
 offset/size using super surface related functions
-Moved functions nvgpu_pmu_super_surface_alloc() &
 nvgpu_pmu_surface_free from pmu.c to pmu_super_sruface.c
-Created ops create_ssmd_lookup_table under pmu
 to support function for chip specific

Currently, NVGPU must modify RM/PMU defined common super surface
data struct to match offset as per NVGPU super surface data struct
as NVGPU cannot include directly RM/PMU defined struct due to number
boardobjs supported by NVGPU, this adds extra work when there is
changes in boardobj or when need add support for new boardobj.
SO, to fix this issue SSMD feature is introduced.

With SSMD support, NVGPU required boardobjs offset will be part of
SSMD lookup table which is part of PMU super surface buffer & is
always first member of PMU super surface data struct for easy access,
SSMD lookup table will be copied to PMU super surface SSMD offset by
PMU RTOS ucode at init stage as per predefined SSMD lookup table.

JIRA NVGPU-1874

Change-Id: Ida1edae707ddded300f9a629710b53a6606ac0ee
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1761338
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2019-02-07 04:15:18 -08:00

129 lines
3.8 KiB
C

/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/pmu.h>
#include <nvgpu/dma.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
struct nvgpu_mem *mem_surface, u32 size)
{
struct vm_gk20a *vm = g->mm.pmu.vm;
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_dma_alloc_map(vm, size, mem_surface);
if (err != 0) {
nvgpu_err(g, "failed to allocate pmu suffer surface\n");
}
return err;
}
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
{
nvgpu_log_fn(g, " ");
nvgpu_dma_free(g, mem);
}
/*
* Lookup table to hold info about super surface member,
* here member ID from nv_pmu_super_surface_member_descriptor
* used as a index to store the member info in two different
* table, i.e one table is for SET ID TYPE & second table for
* GET_STATUS ID_TYPE.
*/
void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu)
{
struct gk20a *g = pmu->g;
struct nv_pmu_super_surface_member_descriptor ssmd;
u32 ssmd_size = (u32)sizeof(
struct nv_pmu_super_surface_member_descriptor);
u32 idx = 0U;
nvgpu_log_fn(g, " ");
for (idx = 0U; idx < NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT;
idx++) {
(void) memset(&ssmd, 0x0, ssmd_size);
nvgpu_mem_rd_n(g, &pmu->super_surface_buf, idx * ssmd_size,
&ssmd, ssmd_size);
nvgpu_pmu_dbg(g, "ssmd: id-0x%x offset-0x%x size-%x rsvd-0x%x",
ssmd.id, ssmd.offset, ssmd.size, ssmd.rsvd);
/* Check member type from ID member & update respective table*/
if ((ssmd.id &
NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_SET) != 0U) {
/*
* clear member type from member ID as we create
* different table for each type & use ID as index
* during member info fetch.
*/
ssmd.id &= 0xFFFFU;
/*use member ID as index for lookup table too*/
(void) memcpy(&pmu->ssmd_set[ssmd.id], &ssmd,
ssmd_size);
} else if ((ssmd.id &
NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS) != 0U) {
/*
* clear member type from member ID as we create
* different table for each type & use ID as index
* during member info fetch.
*/
ssmd.id &= 0xFFFFU;
/*use member ID as index for lookup table too*/
(void) memcpy(&pmu->ssmd_get_status[ssmd.id], &ssmd,
ssmd_size);
} else {
continue;
}
}
}
u32 nvgpu_pmu_get_ss_member_set_offset(struct nvgpu_pmu *pmu, u32 member_id)
{
return pmu->ssmd_set[member_id].offset;
}
u32 nvgpu_pmu_get_ss_member_set_size(struct nvgpu_pmu *pmu, u32 member_id)
{
return pmu->ssmd_set[member_id].size;
}
u32 nvgpu_pmu_get_ss_member_get_status_offset(struct nvgpu_pmu *pmu,
u32 member_id)
{
return pmu->ssmd_get_status[member_id].offset;
}
u32 nvgpu_pmu_get_ss_member_get_status_size(struct nvgpu_pmu *pmu,
u32 member_id)
{
return pmu->ssmd_get_status[member_id].size;
}