mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: compile out unused code in gr unit
Compile out code not used in safety for gr subunit. The code is used only with dgpu support. Jira NVGPU-3968 Change-Id: I7be5b06c6eed5a6d382016f1ccb5dbec63928294 Signed-off-by: vinodg <vinodg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2247146 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
@@ -365,6 +365,7 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx, patch);
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
if (g->ops.gr.init.commit_rtv_cb != NULL) {
|
||||
/* RTV circular buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
@@ -372,6 +373,7 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (patch) {
|
||||
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, false);
|
||||
@@ -382,13 +384,15 @@ int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
|
||||
static int nvgpu_gr_obj_ctx_alloc_sw_bundle(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
struct netlist_av_list *sw_bundle_init =
|
||||
nvgpu_netlist_get_sw_bundle_init_av_list(g);
|
||||
struct netlist_av_list *sw_veid_bundle_init =
|
||||
nvgpu_netlist_get_sw_veid_bundle_init_av_list(g);
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
struct netlist_av64_list *sw_bundle64_init =
|
||||
nvgpu_netlist_get_sw_bundle64_init_av64_list(g);
|
||||
int err = 0;
|
||||
#endif
|
||||
|
||||
/* enable pipe mode override */
|
||||
g->ops.gr.init.pipe_mode_override(g, true);
|
||||
@@ -407,12 +411,14 @@ static int nvgpu_gr_obj_ctx_alloc_sw_bundle(struct gk20a *g)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
if (g->ops.gr.init.load_sw_bundle64 != NULL) {
|
||||
err = g->ops.gr.init.load_sw_bundle64(g, sw_bundle64_init);
|
||||
if (err != 0) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* disable pipe mode override */
|
||||
g->ops.gr.init.pipe_mode_override(g, false);
|
||||
|
||||
@@ -116,17 +116,19 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
||||
g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader);
|
||||
}
|
||||
|
||||
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx)
|
||||
{
|
||||
return &subctx->ctx_header;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||
void nvgpu_gr_subctx_set_patch_ctx(struct gk20a *g,
|
||||
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx)
|
||||
{
|
||||
g->ops.gr.ctxsw_prog.set_patch_addr(g, &subctx->ctx_header,
|
||||
nvgpu_gr_ctx_get_patch_ctx_mem(gr_ctx)->gpu_va);
|
||||
}
|
||||
|
||||
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx)
|
||||
{
|
||||
return &subctx->ctx_header;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
|
||||
|
||||
@@ -645,12 +645,8 @@ struct gops_gr_init {
|
||||
struct netlist_av_list *sw_method_init);
|
||||
int (*load_sw_veid_bundle)(struct gk20a *g,
|
||||
struct netlist_av_list *sw_method_init);
|
||||
int (*load_sw_bundle64)(struct gk20a *g,
|
||||
struct netlist_av64_list *sw_bundle64_init);
|
||||
void (*commit_global_timeslice)(struct gk20a *g);
|
||||
u32 (*get_rtv_cb_size)(struct gk20a *g);
|
||||
void (*commit_rtv_cb)(struct gk20a *g, u64 addr,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
u32 (*get_bundle_cb_default_size)(struct gk20a *g);
|
||||
u32 (*get_min_gpm_fifo_depth)(struct gk20a *g);
|
||||
u32 (*get_bundle_cb_token_limit)(struct gk20a *g);
|
||||
@@ -690,7 +686,12 @@ struct gops_gr_init {
|
||||
bool patch);
|
||||
u32 (*get_patch_slots)(struct gk20a *g,
|
||||
struct nvgpu_gr_config *config);
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
int (*load_sw_bundle64)(struct gk20a *g,
|
||||
struct netlist_av64_list *sw_bundle64_init);
|
||||
void (*commit_rtv_cb)(struct gk20a *g, u64 addr,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
#endif
|
||||
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
||||
void (*restore_stats_counter_bundle_data)(struct gk20a *g,
|
||||
struct netlist_av_list *sw_bundle_init);
|
||||
|
||||
@@ -86,6 +86,19 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
||||
struct nvgpu_gr_subctx *subctx,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 gpu_va);
|
||||
|
||||
/**
|
||||
* @brief Get pointer of subcontext header memory struct.
|
||||
*
|
||||
* @param subctx [in] Pointer to graphics subcontext struct.
|
||||
*
|
||||
* This function returns #nvgpu_mem pointer of subcontext header stored
|
||||
* in #nvgpu_gr_subctx.
|
||||
*
|
||||
* @return pointer to subcontext header memory struct.
|
||||
*/
|
||||
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx);
|
||||
|
||||
#ifdef CONFIG_NVGPU_HAL_NON_FUSA
|
||||
/**
|
||||
* @brief Set patch context buffer address in subcontext header.
|
||||
*
|
||||
@@ -98,18 +111,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
||||
*/
|
||||
void nvgpu_gr_subctx_set_patch_ctx(struct gk20a *g,
|
||||
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx);
|
||||
|
||||
/**
|
||||
* @brief Get pointer of subcontext header memory struct.
|
||||
*
|
||||
* @param subctx [in] Pointer to graphics subcontext struct.
|
||||
*
|
||||
* This function returns #nvgpu_mem pointer of subcontext header stored
|
||||
* in #nvgpu_gr_subctx.
|
||||
*
|
||||
* @return pointer to subcontext header memory struct.
|
||||
*/
|
||||
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
|
||||
|
||||
Reference in New Issue
Block a user