mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: use API to get hwpm_map size
Add new API nvgpu_gr_hwpm_map_get_size() in gr.hwpm_map unit to get size of hwpm_map. Use this API to get size and allocate each pm_ctx Move nvgpu_gr_hwpm_map_init() call to gr.gr unit in gr_init_setup_sw() instead of calling it from gr.falcon unit Add nvgpu_gr_hwpm_map_init() to vGPU initialization to initialize hwpm_map size on vGPU Jira NVGPU-3112 Change-Id: Ifc669dcc9ecae273cea6978f5639f312cd451019 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2096160 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
3c4d6c95df
commit
6f0455a1c7
@@ -399,6 +399,13 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = nvgpu_gr_hwpm_map_init(g, &g->gr.hwpm_map,
|
||||
g->gr.ctx_vars.pm_ctxsw_image_size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "hwpm_map init failed");
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = nvgpu_gr_config_init_map_tiles(g, gr->config);
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
|
||||
@@ -171,15 +171,6 @@ int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
if (err != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (g->gr.ctx_vars.pm_ctxsw_image_size != 0U) {
|
||||
err = nvgpu_gr_hwpm_map_init(g, &g->gr.hwpm_map,
|
||||
g->gr.ctx_vars.pm_ctxsw_image_size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "hwpm_map init failed");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
@@ -42,6 +42,10 @@ int nvgpu_gr_hwpm_map_init(struct gk20a *g, struct nvgpu_gr_hwpm_map **hwpm_map,
|
||||
{
|
||||
struct nvgpu_gr_hwpm_map *tmp_map;
|
||||
|
||||
if (size == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp_map = nvgpu_kzalloc(g, sizeof(*tmp_map));
|
||||
if (tmp_map == NULL) {
|
||||
return -ENOMEM;
|
||||
@@ -65,6 +69,11 @@ void nvgpu_gr_hwpm_map_deinit(struct gk20a *g,
|
||||
nvgpu_kfree(g, hwpm_map);
|
||||
}
|
||||
|
||||
u32 nvgpu_gr_hwpm_map_get_size(struct nvgpu_gr_hwpm_map *hwpm_map)
|
||||
{
|
||||
return hwpm_map->pm_ctxsw_image_size;
|
||||
}
|
||||
|
||||
static int map_cmp(const void *a, const void *b)
|
||||
{
|
||||
const struct ctxsw_buf_offset_map_entry *e1;
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/gr/global_ctx.h>
|
||||
#include <nvgpu/gr/ctx.h>
|
||||
#include <nvgpu/gr/hwpm_map.h>
|
||||
|
||||
#include "common/gr/ctx_priv.h"
|
||||
|
||||
@@ -173,7 +174,7 @@ int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||
}
|
||||
|
||||
pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm,
|
||||
g->gr.ctx_vars.pm_ctxsw_image_size,
|
||||
nvgpu_gr_hwpm_map_get_size(g->gr.hwpm_map),
|
||||
GMMU_PAGE_SIZE_KERNEL);
|
||||
|
||||
if (!pm_ctx->mem.gpu_va) {
|
||||
@@ -181,7 +182,7 @@ int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size;
|
||||
pm_ctx->mem.size = nvgpu_gr_hwpm_map_get_size(g->gr.hwpm_map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <nvgpu/gr/zbc.h>
|
||||
#include <nvgpu/gr/zcull.h>
|
||||
#include <nvgpu/gr/fecs_trace.h>
|
||||
#include <nvgpu/gr/hwpm_map.h>
|
||||
#include <nvgpu/cyclestats_snapshot.h>
|
||||
#include <nvgpu/power_features/pg.h>
|
||||
|
||||
@@ -710,6 +711,13 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = nvgpu_gr_hwpm_map_init(g, &g->gr.hwpm_map,
|
||||
g->gr.ctx_vars.pm_ctxsw_image_size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "hwpm_map init failed");
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = vgpu_gr_init_gr_zcull(g, gr);
|
||||
if (err) {
|
||||
goto clean_up;
|
||||
@@ -920,8 +928,6 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
|
||||
"failed to allocate pm ctxt buffer");
|
||||
return err;
|
||||
}
|
||||
nvgpu_gr_ctx_get_pm_ctx_mem(gr_ctx)->size =
|
||||
g->gr.ctx_vars.pm_ctxsw_image_size;
|
||||
}
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;
|
||||
|
||||
@@ -228,7 +228,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
||||
if (mode != NVGPU_GR_CTX_HWPM_CTXSW_MODE_NO_CTXSW) {
|
||||
nvgpu_gr_ctx_set_size(g->gr.gr_ctx_desc,
|
||||
NVGPU_GR_CTX_PM_CTX,
|
||||
g->gr.ctx_vars.pm_ctxsw_image_size);
|
||||
nvgpu_gr_hwpm_map_get_size(g->gr.hwpm_map));
|
||||
|
||||
ret = nvgpu_gr_ctx_alloc_pm_ctx(g, gr_ctx,
|
||||
g->gr.gr_ctx_desc, c->vm,
|
||||
|
||||
@@ -42,6 +42,8 @@ int nvgpu_gr_hwpm_map_init(struct gk20a *g, struct nvgpu_gr_hwpm_map **hwpm_map,
|
||||
void nvgpu_gr_hwpm_map_deinit(struct gk20a *g,
|
||||
struct nvgpu_gr_hwpm_map *hwpm_map);
|
||||
|
||||
u32 nvgpu_gr_hwpm_map_get_size(struct nvgpu_gr_hwpm_map *hwpm_map);
|
||||
|
||||
int nvgpu_gr_hwmp_map_find_priv_offset(struct gk20a *g,
|
||||
struct nvgpu_gr_hwpm_map *hwpm_map,
|
||||
u32 addr, u32 *priv_offset);
|
||||
|
||||
Reference in New Issue
Block a user