mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: remove gr_gk20a.ctx_vars struct
gr_gk20a.ctx_vars struct right now stores sizes for golden_image, zcull,
pm_ctxsw, and gfxp_preemption_buffer.
but these sizes should be really owned by respective units and should
be assigned to units as soon as they are queried from FECS
Add new structure to nvgpu_gr_falcon to hold sizes that will be queried
from FECS
struct nvgpu_gr_falcon_query_sizes {
u32 golden_image_size;
u32 pm_ctxsw_image_size;
u32 preempt_image_size;
u32 zcull_image_size;
};
gr.falcon unit now queries sizes from FECS and fills this structure.
gr.falcon unit also exposes below APIs to query above sizes
u32 nvgpu_gr_falcon_get_golden_image_size(struct nvgpu_gr_falcon *falcon);
u32 nvgpu_gr_falcon_get_pm_ctxsw_image_size(struct nvgpu_gr_falcon *falcon);
u32 nvgpu_gr_falcon_get_preempt_image_size(struct nvgpu_gr_falcon *falcon);
u32 nvgpu_gr_falcon_get_zcull_image_size(struct nvgpu_gr_falcon *falcon);
gr.gr unit now calls into gr.falcon unit to initailize sizes, and then
uses above exposed APIs to set sizes into respective units
vGPU will too fill up struct nvgpu_gr_falcon_query_sizes with all the sizes
and then above APIs will be used to set sizes into respective units
All of above means size variables in gr_gk20a.ctx_vars struct are no more
being referred. Delete them.
Jira NVGPU-3112
Change-Id: I8b8e64ee0840c3bdefabc8ee739e53a30791f2b3
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2103478
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
45c56fd633
commit
f8b3d50360
@@ -374,7 +374,7 @@ static int nvgpu_gr_init_ctx_state(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return nvgpu_gr_falcon_init_ctx_state(g);
|
||||
return nvgpu_gr_falcon_init_ctx_state(g, g->gr->falcon);
|
||||
}
|
||||
|
||||
static int gr_init_setup_sw(struct gk20a *g)
|
||||
@@ -407,7 +407,7 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
#endif
|
||||
|
||||
err = nvgpu_gr_obj_ctx_init(g, &gr->golden_image,
|
||||
g->gr->ctx_vars.golden_image_size);
|
||||
nvgpu_gr_falcon_get_golden_image_size(g->gr->falcon));
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -418,7 +418,7 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
}
|
||||
|
||||
err = nvgpu_gr_hwpm_map_init(g, &g->gr->hwpm_map,
|
||||
g->gr->ctx_vars.pm_ctxsw_image_size);
|
||||
nvgpu_gr_falcon_get_pm_ctxsw_image_size(g->gr->falcon));
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "hwpm_map init failed");
|
||||
goto clean_up;
|
||||
@@ -430,7 +430,8 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
}
|
||||
|
||||
err = nvgpu_gr_zcull_init(g, &gr->zcull,
|
||||
gr->ctx_vars.zcull_image_size, gr->config);
|
||||
nvgpu_gr_falcon_get_zcull_image_size(g->gr->falcon),
|
||||
g->gr->config);
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -440,6 +441,9 @@ static int gr_init_setup_sw(struct gk20a *g)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
nvgpu_gr_ctx_set_size(g->gr->gr_ctx_desc, NVGPU_GR_CTX_PREEMPT_CTXSW,
|
||||
nvgpu_gr_falcon_get_preempt_image_size(g->gr->falcon));
|
||||
|
||||
gr->global_ctx_buffer = nvgpu_gr_global_ctx_desc_alloc(g);
|
||||
if (gr->global_ctx_buffer == NULL) {
|
||||
goto clean_up;
|
||||
|
||||
@@ -151,15 +151,16 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon *falcon)
|
||||
{
|
||||
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes = &falcon->sizes;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
/* fecs init ramchain */
|
||||
err = g->ops.gr.falcon.init_ctx_state(g);
|
||||
err = g->ops.gr.falcon.init_ctx_state(g, sizes);
|
||||
if (err != 0) {
|
||||
goto out;
|
||||
}
|
||||
@@ -174,6 +175,26 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
u32 nvgpu_gr_falcon_get_golden_image_size(struct nvgpu_gr_falcon *falcon)
|
||||
{
|
||||
return falcon->sizes.golden_image_size;
|
||||
}
|
||||
|
||||
u32 nvgpu_gr_falcon_get_pm_ctxsw_image_size(struct nvgpu_gr_falcon *falcon)
|
||||
{
|
||||
return falcon->sizes.pm_ctxsw_image_size;
|
||||
}
|
||||
|
||||
u32 nvgpu_gr_falcon_get_preempt_image_size(struct nvgpu_gr_falcon *falcon)
|
||||
{
|
||||
return falcon->sizes.preempt_image_size;
|
||||
}
|
||||
|
||||
u32 nvgpu_gr_falcon_get_zcull_image_size(struct nvgpu_gr_falcon *falcon)
|
||||
{
|
||||
return falcon->sizes.zcull_image_size;
|
||||
}
|
||||
|
||||
static int nvgpu_gr_falcon_init_ctxsw_ucode_vaspace(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon *falcon)
|
||||
{
|
||||
|
||||
@@ -65,10 +65,19 @@ struct nvgpu_ctxsw_ucode_info {
|
||||
struct nvgpu_ctxsw_ucode_segments gpccs;
|
||||
};
|
||||
|
||||
struct nvgpu_gr_falcon_query_sizes {
|
||||
u32 golden_image_size;
|
||||
u32 pm_ctxsw_image_size;
|
||||
u32 preempt_image_size;
|
||||
u32 zcull_image_size;
|
||||
};
|
||||
|
||||
struct nvgpu_gr_falcon {
|
||||
struct nvgpu_ctxsw_ucode_info ctxsw_ucode_info;
|
||||
struct nvgpu_mutex fecs_mutex; /* protect fecs method */
|
||||
bool skip_ucode_init;
|
||||
|
||||
struct nvgpu_gr_falcon_query_sizes sizes;
|
||||
};
|
||||
|
||||
enum wait_ucode_status {
|
||||
|
||||
@@ -43,15 +43,6 @@ struct gr_channel_map_tlb_entry {
|
||||
|
||||
struct nvgpu_gr {
|
||||
struct gk20a *g;
|
||||
struct {
|
||||
u32 golden_image_size;
|
||||
|
||||
u32 pm_ctxsw_image_size;
|
||||
|
||||
u32 preempt_image_size;
|
||||
|
||||
u32 zcull_image_size;
|
||||
} ctx_vars;
|
||||
|
||||
struct nvgpu_cond init_wq;
|
||||
bool initialized;
|
||||
|
||||
@@ -149,9 +149,6 @@ int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
|
||||
attrib_cb_size);
|
||||
|
||||
nvgpu_gr_ctx_set_size(gr_ctx_desc,
|
||||
NVGPU_GR_CTX_PREEMPT_CTXSW,
|
||||
g->gr->ctx_vars.preempt_image_size);
|
||||
nvgpu_gr_ctx_set_size(gr_ctx_desc,
|
||||
NVGPU_GR_CTX_SPILL_CTXSW, spill_size);
|
||||
nvgpu_gr_ctx_set_size(gr_ctx_desc,
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <nvgpu/gr/global_ctx.h>
|
||||
#include <nvgpu/gr/ctx.h>
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/gr_falcon.h>
|
||||
#include <nvgpu/gr/zbc.h>
|
||||
#include <nvgpu/gr/zcull.h>
|
||||
#include <nvgpu/gr/fecs_trace.h>
|
||||
@@ -55,6 +56,7 @@
|
||||
#include "common/vgpu/ivc/comm_vgpu.h"
|
||||
|
||||
#include "common/gr/gr_config_priv.h"
|
||||
#include "common/gr/gr_falcon_priv.h"
|
||||
#include "common/gr/ctx_priv.h"
|
||||
#include "common/gr/zcull_priv.h"
|
||||
#include "common/gr/zbc_priv.h"
|
||||
@@ -135,27 +137,28 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
|
||||
return (err || msg.ret) ? -1 : 0;
|
||||
}
|
||||
|
||||
int vgpu_gr_init_ctx_state(struct gk20a *g)
|
||||
int vgpu_gr_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes)
|
||||
{
|
||||
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
g->gr->ctx_vars.golden_image_size = priv->constants.golden_ctx_size;
|
||||
g->gr->ctx_vars.pm_ctxsw_image_size = priv->constants.hwpm_ctx_size;
|
||||
if (!g->gr->ctx_vars.golden_image_size ||
|
||||
!g->gr->ctx_vars.pm_ctxsw_image_size) {
|
||||
sizes->golden_image_size = priv->constants.golden_ctx_size;
|
||||
sizes->pm_ctxsw_image_size = priv->constants.hwpm_ctx_size;
|
||||
if (!sizes->golden_image_size ||
|
||||
!sizes->pm_ctxsw_image_size) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
g->gr->ctx_vars.zcull_image_size = priv->constants.zcull_ctx_size;
|
||||
if (g->gr->ctx_vars.zcull_image_size == 0U) {
|
||||
sizes->zcull_image_size = priv->constants.zcull_ctx_size;
|
||||
if (sizes->zcull_image_size == 0U) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
g->gr->ctx_vars.preempt_image_size =
|
||||
sizes->preempt_image_size =
|
||||
priv->constants.preempt_ctx_size;
|
||||
if (!g->gr->ctx_vars.preempt_image_size) {
|
||||
if (!sizes->preempt_image_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -714,7 +717,15 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
|
||||
nvgpu_mutex_init(&g->gr->cs_lock);
|
||||
#endif
|
||||
|
||||
err = g->ops.gr.falcon.init_ctx_state(g);
|
||||
if (gr->falcon == NULL) {
|
||||
gr->falcon = nvgpu_gr_falcon_init_support(g);
|
||||
if (gr->falcon == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
|
||||
err = g->ops.gr.falcon.init_ctx_state(g, &gr->falcon->sizes);
|
||||
if (err) {
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -725,19 +736,20 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
|
||||
}
|
||||
|
||||
err = nvgpu_gr_obj_ctx_init(g, &gr->golden_image,
|
||||
g->gr->ctx_vars.golden_image_size);
|
||||
nvgpu_gr_falcon_get_golden_image_size(g->gr->falcon));
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = nvgpu_gr_hwpm_map_init(g, &g->gr->hwpm_map,
|
||||
g->gr->ctx_vars.pm_ctxsw_image_size);
|
||||
nvgpu_gr_falcon_get_pm_ctxsw_image_size(g->gr->falcon));
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "hwpm_map init failed");
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
err = vgpu_gr_init_gr_zcull(g, gr, gr->ctx_vars.zcull_image_size);
|
||||
err = vgpu_gr_init_gr_zcull(g, gr,
|
||||
nvgpu_gr_falcon_get_zcull_image_size(g->gr->falcon));
|
||||
if (err) {
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -752,6 +764,9 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
nvgpu_gr_ctx_set_size(gr->gr_ctx_desc, NVGPU_GR_CTX_PREEMPT_CTXSW,
|
||||
nvgpu_gr_falcon_get_preempt_image_size(g->gr->falcon));
|
||||
|
||||
nvgpu_spinlock_init(&gr->ch_tlb_lock);
|
||||
|
||||
gr->remove_support = vgpu_remove_gr_support;
|
||||
@@ -1326,15 +1341,12 @@ static int vgpu_gr_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct nvgpu_mem *desc;
|
||||
|
||||
nvgpu_log_info(g, "gfxp context preempt size=%d",
|
||||
g->gr->ctx_vars.preempt_image_size);
|
||||
g->gr->falcon->sizes.preempt_image_size);
|
||||
nvgpu_log_info(g, "gfxp context spill size=%d", spill_size);
|
||||
nvgpu_log_info(g, "gfxp context pagepool size=%d", pagepool_size);
|
||||
nvgpu_log_info(g, "gfxp context attrib cb size=%d",
|
||||
attrib_cb_size);
|
||||
|
||||
nvgpu_gr_ctx_set_size(g->gr->gr_ctx_desc,
|
||||
NVGPU_GR_CTX_PREEMPT_CTXSW,
|
||||
g->gr->ctx_vars.preempt_image_size);
|
||||
nvgpu_gr_ctx_set_size(g->gr->gr_ctx_desc,
|
||||
NVGPU_GR_CTX_SPILL_CTXSW, spill_size);
|
||||
nvgpu_gr_ctx_set_size(g->gr->gr_ctx_desc,
|
||||
|
||||
@@ -39,9 +39,11 @@ struct nvgpu_gr_ctx;
|
||||
struct nvgpu_gr_zcull;
|
||||
struct tegra_vgpu_gr_intr_info;
|
||||
struct tegra_vgpu_sm_esr_info;
|
||||
struct nvgpu_gr_falcon_query_sizes;
|
||||
|
||||
void vgpu_gr_detect_sm_arch(struct gk20a *g);
|
||||
int vgpu_gr_init_ctx_state(struct gk20a *g);
|
||||
int vgpu_gr_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes);
|
||||
int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g);
|
||||
void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
|
||||
void vgpu_gr_free_tsg_ctx(struct tsg_gk20a *tsg);
|
||||
|
||||
@@ -661,7 +661,8 @@ int gm20b_gr_falcon_wait_ctxsw_ready(struct gk20a *g)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gm20b_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -669,7 +670,7 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
|
||||
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_IMAGE_SIZE,
|
||||
0, &g->gr->ctx_vars.golden_image_size);
|
||||
0, &sizes->golden_image_size);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g,
|
||||
"query golden image size failed");
|
||||
@@ -677,7 +678,7 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
}
|
||||
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_PM_IMAGE_SIZE,
|
||||
0, &g->gr->ctx_vars.pm_ctxsw_image_size);
|
||||
0, &sizes->pm_ctxsw_image_size);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g,
|
||||
"query pm ctx image size failed");
|
||||
@@ -685,7 +686,7 @@ int gm20b_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
}
|
||||
ret = gm20b_gr_falcon_ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_CTXSW_DISCOVER_ZCULL_IMAGE_SIZE,
|
||||
0, &g->gr->ctx_vars.zcull_image_size);
|
||||
0, &sizes->zcull_image_size);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g,
|
||||
"query zcull ctx image size failed");
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
struct gk20a;
|
||||
struct nvgpu_fecs_method_op;
|
||||
struct nvgpu_fecs_host_intr_status;
|
||||
struct nvgpu_gr_falcon_query_sizes;
|
||||
|
||||
u32 gm20b_gr_falcon_read_fecs_ctxsw_mailbox(struct gk20a *g, u32 reg_index);
|
||||
void gm20b_gr_falcon_fecs_host_clear_intr(struct gk20a *g, u32 fecs_intr);
|
||||
@@ -72,7 +73,8 @@ u32 gm20b_gr_falcon_get_current_ctx(struct gk20a *g);
|
||||
u32 gm20b_gr_falcon_get_ctx_ptr(u32 ctx);
|
||||
u32 gm20b_gr_falcon_get_fecs_current_ctx_data(struct gk20a *g,
|
||||
struct nvgpu_mem *inst_block);
|
||||
int gm20b_gr_falcon_init_ctx_state(struct gk20a *g);
|
||||
int gm20b_gr_falcon_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes);
|
||||
void gm20b_gr_falcon_fecs_host_int_enable(struct gk20a *g);
|
||||
u32 gm20b_gr_falcon_read_fecs_ctxsw_status0(struct gk20a *g);
|
||||
u32 gm20b_gr_falcon_read_fecs_ctxsw_status1(struct gk20a *g);
|
||||
|
||||
@@ -30,29 +30,27 @@
|
||||
|
||||
#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
|
||||
|
||||
int gp10b_gr_falcon_init_ctx_state(struct gk20a *g)
|
||||
int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes)
|
||||
{
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = gm20b_gr_falcon_init_ctx_state(g);
|
||||
err = gm20b_gr_falcon_init_ctx_state(g, sizes);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (g->gr->ctx_vars.preempt_image_size == 0U) {
|
||||
err = g->ops.gr.falcon.ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U,
|
||||
&g->gr->ctx_vars.preempt_image_size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "query preempt image size failed");
|
||||
return err;
|
||||
}
|
||||
err = g->ops.gr.falcon.ctrl_ctxsw(g,
|
||||
NVGPU_GR_FALCON_METHOD_PREEMPT_IMAGE_SIZE, 0U,
|
||||
&sizes->preempt_image_size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "query preempt image size failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_log_info(g, "preempt image size: %u",
|
||||
g->gr->ctx_vars.preempt_image_size);
|
||||
nvgpu_log_info(g, "preempt image size: %u", sizes->preempt_image_size);
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
|
||||
|
||||
@@ -26,8 +26,10 @@
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
struct nvgpu_gr_falcon_query_sizes;
|
||||
|
||||
int gp10b_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
|
||||
u32 data, u32 *ret_val);
|
||||
int gp10b_gr_falcon_init_ctx_state(struct gk20a *g);
|
||||
int gp10b_gr_falcon_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes);
|
||||
#endif /* NVGPU_GR_FALCON_GP10B_H */
|
||||
|
||||
@@ -57,6 +57,7 @@ struct boardobjgrp;
|
||||
struct boardobjgrp_pmu_cmd;
|
||||
struct boardobjgrpmask;
|
||||
struct nvgpu_gr_falcon;
|
||||
struct nvgpu_gr_falcon_query_sizes;
|
||||
struct nvgpu_sgt;
|
||||
struct nvgpu_sgl;
|
||||
struct nvgpu_device_info;
|
||||
@@ -586,7 +587,8 @@ struct gpu_ops {
|
||||
u32 (*get_ctx_ptr)(u32 ctx);
|
||||
u32 (*get_fecs_current_ctx_data)(struct gk20a *g,
|
||||
struct nvgpu_mem *inst_block);
|
||||
int (*init_ctx_state)(struct gk20a *g);
|
||||
int (*init_ctx_state)(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon_query_sizes *sizes);
|
||||
void (*fecs_host_int_enable)(struct gk20a *g);
|
||||
u32 (*read_fecs_ctxsw_status0)(struct gk20a *g);
|
||||
u32 (*read_fecs_ctxsw_status1)(struct gk20a *g);
|
||||
|
||||
@@ -87,7 +87,8 @@ void nvgpu_gr_falcon_remove_support(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon *falcon);
|
||||
int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g);
|
||||
int nvgpu_gr_falcon_init_ctxsw(struct gk20a *g, struct nvgpu_gr_falcon *falcon);
|
||||
int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g);
|
||||
int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon *falcon);
|
||||
int nvgpu_gr_falcon_init_ctxsw_ucode(struct gk20a *g,
|
||||
struct nvgpu_gr_falcon *falcon);
|
||||
int nvgpu_gr_falcon_load_ctxsw_ucode(struct gk20a *g,
|
||||
@@ -104,4 +105,9 @@ struct nvgpu_ctxsw_ucode_segments *nvgpu_gr_falcon_get_gpccs_ucode_segments(
|
||||
void *nvgpu_gr_falcon_get_surface_desc_cpu_va(
|
||||
struct nvgpu_gr_falcon *falcon);
|
||||
|
||||
u32 nvgpu_gr_falcon_get_golden_image_size(struct nvgpu_gr_falcon *falcon);
|
||||
u32 nvgpu_gr_falcon_get_pm_ctxsw_image_size(struct nvgpu_gr_falcon *falcon);
|
||||
u32 nvgpu_gr_falcon_get_preempt_image_size(struct nvgpu_gr_falcon *falcon);
|
||||
u32 nvgpu_gr_falcon_get_zcull_image_size(struct nvgpu_gr_falcon *falcon);
|
||||
|
||||
#endif /* NVGPU_GR_FALCON_H */
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <nvgpu/gr/global_ctx.h>
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/obj_ctx.h>
|
||||
#include <nvgpu/gr/gr_falcon.h>
|
||||
#include <nvgpu/power_features/cg.h>
|
||||
#include <nvgpu/power_features/pg.h>
|
||||
#include <nvgpu/pmu/pmu_perfmon.h>
|
||||
@@ -894,6 +895,9 @@ static ssize_t tpc_fs_mask_store(struct device *dev,
|
||||
nvgpu_gr_obj_ctx_deinit(g, g->gr->golden_image);
|
||||
g->gr->golden_image = NULL;
|
||||
|
||||
nvgpu_gr_falcon_remove_support(g, g->gr->falcon);
|
||||
g->gr->falcon = NULL;
|
||||
|
||||
nvgpu_gr_config_deinit(g, g->gr->config);
|
||||
/* Cause next poweron to reinit just gr */
|
||||
g->gr->sw_ready = false;
|
||||
|
||||
Reference in New Issue
Block a user