mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: move zcull size initialization to falcon unit
Move zcull size initialization to hal.gr.zcull unit. This removes zcull dependency on falcon unit Add new variable zcull_image_size to gr_gk20a.ctx_vars struct Pass the size to nvgpu_gr_zcull_init()/vgpu_gr_init_gr_zcull() as parameter to initialize zcull info Jira NVGPU-3112 Change-Id: I54d966073dad658b4aad3a529f44c0478208b10c Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2098507 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0c297ce752
commit
d8ec4e4e12
@@ -411,7 +411,7 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = nvgpu_gr_zcull_init(g, &gr->zcull);
|
||||
err = nvgpu_gr_zcull_init(g, &gr->zcull, gr->ctx_vars.zcull_image_size);
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
@@ -30,7 +30,8 @@
|
||||
|
||||
#include "zcull_priv.h"
|
||||
|
||||
int nvgpu_gr_zcull_init(struct gk20a *g, struct nvgpu_gr_zcull **gr_zcull)
|
||||
int nvgpu_gr_zcull_init(struct gk20a *g, struct nvgpu_gr_zcull **gr_zcull,
|
||||
u32 size)
|
||||
{
|
||||
struct nvgpu_gr_config *gr_config = g->gr.config;
|
||||
struct nvgpu_gr_zcull *zcull;
|
||||
@@ -44,6 +45,8 @@ int nvgpu_gr_zcull_init(struct gk20a *g, struct nvgpu_gr_zcull **gr_zcull)
|
||||
|
||||
zcull->g = g;
|
||||
|
||||
zcull->zcull_ctxsw_image_size = size;
|
||||
|
||||
zcull->aliquot_width = nvgpu_gr_config_get_tpc_count(gr_config) * 16U;
|
||||
zcull->aliquot_height = 16;
|
||||
|
||||
|
||||
@@ -146,8 +146,8 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
g->gr.zcull->zcull_ctxsw_image_size = priv->constants.zcull_ctx_size;
|
||||
if (g->gr.zcull->zcull_ctxsw_image_size == 0U) {
|
||||
g->gr.ctx_vars.zcull_image_size = priv->constants.zcull_ctx_size;
|
||||
if (g->gr.ctx_vars.zcull_image_size == 0U) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@@ -467,7 +467,8 @@ cleanup:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vgpu_gr_init_gr_zcull(struct gk20a *g, struct gr_gk20a *gr)
|
||||
static int vgpu_gr_init_gr_zcull(struct gk20a *g, struct gr_gk20a *gr,
|
||||
u32 size)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -476,6 +477,8 @@ static int vgpu_gr_init_gr_zcull(struct gk20a *g, struct gr_gk20a *gr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
gr->zcull->zcull_ctxsw_image_size = size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
int vgpu_gr_bind_ctxsw_zcull(struct gk20a *g, struct channel_gk20a *c,
|
||||
@@ -730,7 +733,7 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = vgpu_gr_init_gr_zcull(g, gr);
|
||||
err = vgpu_gr_init_gr_zcull(g, gr, gr->ctx_vars.zcull_image_size);
|
||||
if (err) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
@@ -104,8 +104,12 @@ struct gr_gk20a {
|
||||
u32 pm_ctxsw_image_size;
|
||||
|
||||
u32 preempt_image_size;
|
||||
|
||||
u32 zcull_image_size;
|
||||
|
||||
bool force_preemption_gfxp;
|
||||
bool force_preemption_cilp;
|
||||
|
||||
bool dump_ctxsw_stats_on_channel_close;
|
||||
} ctx_vars;
|
||||
|
||||
|
||||
@@ -684,6 +684,14 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
"query pm ctx image size failed");
|
||||
return ret;
|
||||
}
|
||||
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
|
||||
0, &g->gr.ctx_vars.zcull_image_size);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g,
|
||||
"query zcull ctx image size failed");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <nvgpu/gr/subctx.h>
|
||||
#include <nvgpu/gr/ctx.h>
|
||||
#include <nvgpu/gr/zcull.h>
|
||||
#include <nvgpu/gr/gr_falcon.h>
|
||||
|
||||
#include "common/gr/zcull_priv.h"
|
||||
|
||||
@@ -34,24 +33,6 @@
|
||||
|
||||
#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
|
||||
|
||||
static int gm20b_gr_init_zcull_ctxsw_image_size(struct gk20a *g,
|
||||
struct nvgpu_gr_zcull *gr_zcull)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!g->gr.ctx_vars.golden_image_initialized) {
|
||||
ret = g->ops.gr.falcon.ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
|
||||
0, &gr_zcull->zcull_ctxsw_image_size);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g,
|
||||
"query zcull ctx image size failed");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gm20b_gr_init_zcull_hw(struct gk20a *g,
|
||||
struct nvgpu_gr_zcull *gr_zcull,
|
||||
struct nvgpu_gr_config *gr_config)
|
||||
@@ -61,17 +42,11 @@ int gm20b_gr_init_zcull_hw(struct gk20a *g,
|
||||
bool floorsweep = false;
|
||||
u32 rcp_conserv;
|
||||
u32 offset;
|
||||
int ret;
|
||||
|
||||
gr_zcull->total_aliquots =
|
||||
gr_gpc0_zcull_total_ram_size_num_aliquots_f(
|
||||
nvgpu_readl(g, gr_gpc0_zcull_total_ram_size_r()));
|
||||
|
||||
ret = gm20b_gr_init_zcull_ctxsw_image_size(g, gr_zcull);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (gpc_index = 0;
|
||||
gpc_index < nvgpu_gr_config_get_gpc_count(gr_config);
|
||||
gpc_index++) {
|
||||
|
||||
@@ -44,7 +44,8 @@ struct nvgpu_gr_zcull_info {
|
||||
u32 subregion_count;
|
||||
};
|
||||
|
||||
int nvgpu_gr_zcull_init(struct gk20a *g, struct nvgpu_gr_zcull **gr_zcull);
|
||||
int nvgpu_gr_zcull_init(struct gk20a *g, struct nvgpu_gr_zcull **gr_zcull,
|
||||
u32 size);
|
||||
void nvgpu_gr_zcull_deinit(struct gk20a *g, struct nvgpu_gr_zcull *gr_zcull);
|
||||
|
||||
u32 nvgpu_gr_get_ctxsw_zcull_size(struct gk20a *g,
|
||||
|
||||
Reference in New Issue
Block a user