gpu: nvgpu: PMU super surface support

- Added ops "pmu.alloc_super_surface" to create
  memory space for pmu super surface
- Defined method nvgpu_pmu_sysmem_surface_alloc()
  to allocate pmu super surface memory & assigned
  to "pmu.alloc_super_surface" for gv100
- "pmu.alloc_super_surface" set to NULL for gp106
- Memory space of size "struct nv_pmu_super_surface"
  is allocated during pmu sw init setup if
  "pmu.alloc_super_surface" is not NULL &
  free if error occur.
- Added ops "pmu_ver.config_pmu_cmdline_args_super_surface"
  to describe PMU super surface details to PMU ucode
  as part of pmu command line args command if
  "pmu.alloc_super_surface" is not NULL.
- Updated pmu_cmdline_args_v6 to include member
  "struct flcn_mem_desc_v0 super_surface"
- Free allocated memory for PMU super surface in
  nvgpu_remove_pmu_support() method
- Added "struct nvgpu_mem super_surface_buf" to "nvgpu_pmu" struct
- Created header file "gpmu_super_surf_if.h" to include interface
  about pmu super surface, added "struct nv_pmu_super_surface"
  to hold super surface members along with rsvd[x] dummy space
  to sync members offset with PMU super surface members.

Change-Id: I2b28912bf4d86a8cc72884e3b023f21c73fb3503
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1656571
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2018-02-13 14:37:18 +05:30
committed by mobile promotions
parent 418f31cd91
commit cc4b9f540f
10 changed files with 135 additions and 3 deletions

View File

@@ -241,11 +241,19 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
if (g->ops.pmu.alloc_super_surface) {
err = g->ops.pmu.alloc_super_surface(g,
&pmu->super_surface_buf,
sizeof(struct nv_pmu_super_surface));
if (err)
goto err_free_seq_buf;
}
err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
&pmu->trace_buf);
if (err) {
nvgpu_err(g, "failed to allocate pmu trace buffer\n");
goto err_free_seq_buf;
goto err_free_super_surface;
}
pmu->sw_ready = true;
@@ -253,6 +261,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
skip_init:
nvgpu_log_fn(g, "done");
return 0;
err_free_super_surface:
if (g->ops.pmu.alloc_super_surface)
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
err_free_seq_buf:
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
err_free_seq:
@@ -560,6 +571,23 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
return 0;
}
int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
struct nvgpu_mem *mem_surface, u32 size)
{
struct vm_gk20a *vm = g->mm.pmu.vm;
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_dma_alloc_map(vm, size, mem_surface);
if (err) {
nvgpu_err(g, "failed to allocate pmu suffer surface\n");
err = -ENOMEM;
}
return err;
}
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
{
nvgpu_dma_free(g, mem);

View File

@@ -142,6 +142,16 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu)
nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf);
}
static void config_pmu_cmdline_args_super_surface_v6(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
if (g->ops.pmu.alloc_super_surface) {
nvgpu_pmu_surface_describe(g, &pmu->super_surface_buf,
&pmu->args_v6.super_surface);
}
}
static void set_pmu_cmdline_args_falctracedmaidx_v5(
struct nvgpu_pmu *pmu, u32 idx)
{
@@ -1250,6 +1260,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
set_pmu_cmdline_args_falctracedmabase_v5;
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx =
set_pmu_cmdline_args_falctracedmaidx_v5;
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface =
config_pmu_cmdline_args_super_surface_v6;
g->ops.pmu_ver.get_pmu_cmdline_args_ptr =
get_pmu_cmdline_args_ptr_v5;
g->ops.pmu_ver.get_pmu_allocation_struct_size =
@@ -1587,6 +1599,8 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
nvgpu_mutex_destroy(&pmu->elpg_mutex);
nvgpu_mutex_destroy(&pmu->pg_mutex);
nvgpu_mutex_destroy(&pmu->isr_mutex);

View File

@@ -641,6 +641,8 @@ struct gpu_ops {
u32 size);
void (*set_pmu_cmdline_args_trace_dma_base)(
struct nvgpu_pmu *pmu);
void (*config_pmu_cmdline_args_super_surface)(
struct nvgpu_pmu *pmu);
void (*set_pmu_cmdline_args_trace_dma_idx)(
struct nvgpu_pmu *pmu, u32 idx);
void * (*get_pmu_cmdline_args_ptr)(struct nvgpu_pmu *pmu);
@@ -914,6 +916,8 @@ struct gpu_ops {
void (*update_lspmu_cmdline_args)(struct gk20a *g);
void (*setup_apertures)(struct gk20a *g);
u32 (*get_irqdest)(struct gk20a *g);
int (*alloc_super_surface)(struct gk20a *g,
struct nvgpu_mem *super_surface, u32 size);
} pmu;
struct {
int (*init_debugfs)(struct gk20a *g);

View File

@@ -611,6 +611,7 @@ static const struct gpu_ops gp106_ops = {
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg,
.get_irqdest = gk20a_pmu_get_irqdest,
.alloc_super_surface = NULL,
},
.clk = {
.init_clk_support = gp106_init_clk_support,

View File

@@ -178,6 +178,8 @@ void init_pmu_setup_hw1(struct gk20a *g)
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface)
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),

View File

@@ -620,6 +620,7 @@ static const struct gpu_ops gv100_ops = {
.is_engine_in_reset = gp106_pmu_is_engine_in_reset,
.pmu_get_queue_tail = pwr_pmu_queue_tail_r,
.get_irqdest = gk20a_pmu_get_irqdest,
.alloc_super_surface = nvgpu_pmu_super_surface_alloc,
},
.clk = {
.init_clk_support = gp106_init_clk_support,

View File

@@ -306,6 +306,8 @@ struct nvgpu_pmu {
/* TBD: remove this if ZBC seq is fixed */
struct nvgpu_mem seq_buf;
struct nvgpu_mem trace_buf;
struct nvgpu_mem super_surface_buf;
bool buf_loaded;
struct pmu_sha1_gid gid_info;
@@ -449,6 +451,8 @@ int nvgpu_init_pmu_support(struct gk20a *g);
int nvgpu_pmu_destroy(struct gk20a *g);
int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_msg *msg);
int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
struct nvgpu_mem *mem_surface, u32 size);
void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
bool post_change_event);

View File

@@ -0,0 +1,77 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GPMU_SUPER_SURF_IF_H__
#define __GPMU_SUPER_SURF_IF_H__
struct nv_pmu_super_surface_hdr {
u32 memberMask;
u16 dmemBufferSizeMax;
};
NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_super_surface_hdr,
sizeof(struct nv_pmu_super_surface_hdr));
/*
* Global Super Surface structure for combined INIT data required by PMU.
* NOTE: Any new substructures or entries must be aligned.
*/
struct nv_pmu_super_surface {
union nv_pmu_super_surface_hdr_aligned hdr;
struct {
struct nv_pmu_volt_volt_device_boardobj_grp_set volt_device_grp_set;
struct nv_pmu_volt_volt_policy_boardobj_grp_set volt_policy_grp_set;
struct nv_pmu_volt_volt_rail_boardobj_grp_set volt_rail_grp_set;
struct nv_pmu_volt_volt_policy_boardobj_grp_get_status volt_policy_grp_get_status;
struct nv_pmu_volt_volt_rail_boardobj_grp_get_status volt_rail_grp_get_status;
struct nv_pmu_volt_volt_device_boardobj_grp_get_status volt_device_grp_get_status;
} volt;
struct {
struct nv_pmu_clk_clk_vin_device_boardobj_grp_set clk_vin_device_grp_set;
struct nv_pmu_clk_clk_domain_boardobj_grp_set clk_domain_grp_set;
struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set clk_freq_controller_grp_set;
struct nv_pmu_clk_clk_fll_device_boardobj_grp_set clk_fll_device_grp_set;
struct nv_pmu_clk_clk_prog_boardobj_grp_set clk_prog_grp_set;
struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set;
struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status clk_vin_device_grp_get_status;
struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status clk_fll_device_grp_get_status;
struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status;
u8 clk_rsvd[0x4660];
} clk;
struct {
struct nv_pmu_perf_vfe_equ_boardobj_grp_set vfe_equ_grp_set;
struct nv_pmu_perf_vfe_var_boardobj_grp_set vfe_var_grp_set;
struct nv_pmu_perf_vfe_var_boardobj_grp_get_status vfe_var_grp_get_status;
u8 perf_rsvd[0x40790];
u8 perfcf_rsvd[0x1eb0];
} perf;
struct {
struct nv_pmu_therm_therm_channel_boardobj_grp_set therm_channel_grp_set;
struct nv_pmu_therm_therm_device_boardobj_grp_set therm_device_grp_set;
u8 therm_rsvd[0x1460];
} therm;
};
#endif /* __GPMU_SUPER_SURF_IF_H__ */

View File

@@ -66,8 +66,8 @@ struct pmu_cmdline_args_v6 {
u8 raise_priv_sec;
struct flcn_mem_desc_v0 gc6_ctx;
struct flcn_mem_desc_v0 gc6_bsod_ctx;
struct flcn_mem_desc_v0 init_data_dma_info;
u32 dummy;
struct flcn_mem_desc_v0 super_surface;
u32 flags;
};
/* GPU ID */

View File

@@ -38,6 +38,7 @@
#include "gpmuiftherm.h"
#include "gpmuifthermsensor.h"
#include "gpmuifseq.h"
#include "gpmu_super_surf_if.h"
/*
* Command requesting execution of the RPC (Remote Procedure Call)