gpu: nvgpu: update gr code for safety build

Move code used only with graphics under
CONFIG_NVGPU_GRAPHICS check.

gm20b_gr_init_load_sw_bundle_init hal get called
without CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION check.

Remove dead code in
nvgpu_gr_ctx_check_valid_preemption_mode function.

Jira NVGPU-3968

Change-Id: I399126123006ae44dba29b3c08378d11fe82e543
Signed-off-by: vinodg <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2247346
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
vinodg
2019-11-25 15:45:00 -08:00
committed by Alex Waterman
parent c50de751dd
commit b5ab4342fd
10 changed files with 89 additions and 87 deletions

View File

@@ -597,10 +597,6 @@ bool nvgpu_gr_ctx_check_valid_preemption_mode(struct nvgpu_gr_ctx *gr_ctx,
if (graphics_preempt_mode != 0U) {
return false;
}
if (compute_preempt_mode == 0U) {
return false;
}
#endif
#ifndef CONFIG_NVGPU_CILP

View File

@@ -105,7 +105,7 @@ static int nvgpu_gr_setup_validate_channel_and_class(struct gk20a *g,
int err = 0;
/* an address space needs to have been bound at this point.*/
if (!nvgpu_channel_as_bound(c) && (c->vm == NULL)) {
if (!nvgpu_channel_as_bound(c)) {
nvgpu_err(g,
"not bound to address space at time"
" of grctx allocation");

View File

@@ -397,40 +397,6 @@ void gm20b_gr_init_load_method_init(struct gk20a *g,
}
}
int gm20b_gr_init_load_sw_bundle_init(struct gk20a *g,
struct netlist_av_list *sw_bundle_init)
{
u32 i;
int err = 0;
u32 last_bundle_data = 0U;
for (i = 0U; i < sw_bundle_init->count; i++) {
if (i == 0U || last_bundle_data != sw_bundle_init->l[i].value) {
nvgpu_writel(g, gr_pipe_bundle_data_r(),
sw_bundle_init->l[i].value);
last_bundle_data = sw_bundle_init->l[i].value;
}
nvgpu_writel(g, gr_pipe_bundle_address_r(),
sw_bundle_init->l[i].addr);
if (gr_pipe_bundle_address_value_v(sw_bundle_init->l[i].addr) ==
GR_GO_IDLE_BUNDLE) {
err = g->ops.gr.init.wait_idle(g);
if (err != 0) {
return err;
}
}
err = g->ops.gr.init.wait_fe_idle(g);
if (err != 0) {
return err;
}
}
return err;
}
u32 gm20b_gr_init_get_global_ctx_cb_buffer_size(struct gk20a *g)
{
return nvgpu_safe_mult_u32(
@@ -475,3 +441,39 @@ u32 gm20b_gr_init_get_patch_slots(struct gk20a *g,
{
return PATCH_CTX_SLOTS_PER_PAGE;
}
#ifndef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
int gm20b_gr_init_load_sw_bundle_init(struct gk20a *g,
struct netlist_av_list *sw_bundle_init)
{
u32 i;
int err = 0;
u32 last_bundle_data = 0U;
for (i = 0U; i < sw_bundle_init->count; i++) {
if (i == 0U || last_bundle_data != sw_bundle_init->l[i].value) {
nvgpu_writel(g, gr_pipe_bundle_data_r(),
sw_bundle_init->l[i].value);
last_bundle_data = sw_bundle_init->l[i].value;
}
nvgpu_writel(g, gr_pipe_bundle_address_r(),
sw_bundle_init->l[i].addr);
if (gr_pipe_bundle_address_value_v(sw_bundle_init->l[i].addr) ==
GR_GO_IDLE_BUNDLE) {
err = g->ops.gr.init.wait_idle(g);
if (err != 0) {
return err;
}
}
err = g->ops.gr.init.wait_fe_idle(g);
if (err != 0) {
return err;
}
}
return err;
}
#endif

View File

@@ -264,6 +264,7 @@ void gp10b_gr_init_commit_global_attrib_cb(struct gk20a *g,
gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(), patch);
}
#ifdef CONFIG_NVGPU_GRAPHICS
void gp10b_gr_init_commit_cbes_reserve(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch)
{
@@ -279,7 +280,6 @@ void gp10b_gr_init_commit_cbes_reserve(struct gk20a *g,
patch);
}
#ifdef CONFIG_NVGPU_GRAPHICS
u32 gp10b_gr_init_get_attrib_cb_gfxp_default_size(struct gk20a *g)
{
return nvgpu_safe_add_u32(

View File

@@ -327,21 +327,6 @@ void gp10b_gr_init_commit_global_cb_manager(struct gk20a *g,
}
}
u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc)
{
u32 alpha_cb_size = g->ops.gr.init.get_alpha_cb_size(g, tpc_count);
u32 size;
size = nvgpu_safe_mult_u32(
nvgpu_safe_add_u32(betacb_size, alpha_cb_size),
nvgpu_safe_mult_u32(
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(),
max_tpc));
return ALIGN(size, 128U);
}
void gp10b_gr_init_get_supported_preemption_modes(
u32 *graphics_preemption_mode_flags, u32 *compute_preemption_mode_flags)
{
@@ -365,3 +350,20 @@ void gp10b_gr_init_get_default_preemption_modes(
*default_graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_WFI;
*default_compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_WFI;
}
#ifdef CONFIG_NVGPU_GRAPHICS
u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc)
{
u32 alpha_cb_size = g->ops.gr.init.get_alpha_cb_size(g, tpc_count);
u32 size;
size = nvgpu_safe_mult_u32(
nvgpu_safe_add_u32(betacb_size, alpha_cb_size),
nvgpu_safe_mult_u32(
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(),
max_tpc));
return ALIGN(size, 128U);
}
#endif

View File

@@ -743,21 +743,6 @@ int gv11b_gr_init_load_sw_veid_bundle(struct gk20a *g,
return err;
}
void gv11b_gr_init_commit_cbes_reserve(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch)
{
u32 cbes_reserve = gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v();
nvgpu_gr_ctx_patch_write(g, gr_ctx,
gr_gpcs_swdx_beta_cb_ctrl_r(),
gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(cbes_reserve),
patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx,
gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(),
gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(cbes_reserve),
patch);
}
u32 gv11b_gr_init_get_max_subctx_count(void)
{
return gr_pri_fe_chip_def_info_max_veid_count_init_v();
@@ -832,6 +817,23 @@ void gv11b_gr_init_detect_sm_arch(struct gk20a *g)
gr_gpc0_tpc0_sm_arch_warp_count_v(v);
}
#ifdef CONFIG_NVGPU_GRAPHICS
void gv11b_gr_init_commit_cbes_reserve(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch)
{
u32 cbes_reserve = gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v();
nvgpu_gr_ctx_patch_write(g, gr_ctx,
gr_gpcs_swdx_beta_cb_ctrl_r(),
gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(cbes_reserve),
patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx,
gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(),
gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(cbes_reserve),
patch);
}
#endif
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
int gv11b_gr_init_load_sw_bundle_init(struct gk20a *g,
struct netlist_av_list *sw_bundle_init)

View File

@@ -499,10 +499,6 @@ static const struct gpu_ops gp10b_ops = {
.pipe_mode_override = gm20b_gr_init_pipe_mode_override,
.load_sw_bundle_init =
gm20b_gr_init_load_sw_bundle_init,
.get_ctx_attrib_cb_size =
gp10b_gr_init_get_ctx_attrib_cb_size,
.commit_cbes_reserve =
gp10b_gr_init_commit_cbes_reserve,
.get_patch_slots = gm20b_gr_init_get_patch_slots,
.detect_sm_arch = gm20b_gr_init_detect_sm_arch,
.get_supported__preemption_modes =
@@ -510,6 +506,10 @@ static const struct gpu_ops gp10b_ops = {
.get_default_preemption_modes =
gp10b_gr_init_get_default_preemption_modes,
#ifdef CONFIG_NVGPU_GRAPHICS
.get_ctx_attrib_cb_size =
gp10b_gr_init_get_ctx_attrib_cb_size,
.commit_cbes_reserve =
gp10b_gr_init_commit_cbes_reserve,
.rop_mapping = gm20b_gr_init_rop_mapping,
.get_attrib_cb_gfxp_default_size =
gp10b_gr_init_get_attrib_cb_gfxp_default_size,

View File

@@ -597,10 +597,6 @@ static const struct gpu_ops gv11b_ops = {
#endif
.load_sw_veid_bundle =
gv11b_gr_init_load_sw_veid_bundle,
.get_ctx_attrib_cb_size =
gp10b_gr_init_get_ctx_attrib_cb_size,
.commit_cbes_reserve =
gv11b_gr_init_commit_cbes_reserve,
.get_max_subctx_count =
gv11b_gr_init_get_max_subctx_count,
.get_patch_slots = gv11b_gr_init_get_patch_slots,
@@ -610,6 +606,10 @@ static const struct gpu_ops gv11b_ops = {
.get_default_preemption_modes =
gp10b_gr_init_get_default_preemption_modes,
#ifdef CONFIG_NVGPU_GRAPHICS
.get_ctx_attrib_cb_size =
gp10b_gr_init_get_ctx_attrib_cb_size,
.commit_cbes_reserve =
gv11b_gr_init_commit_cbes_reserve,
.rop_mapping = gv11b_gr_init_rop_mapping,
.get_attrib_cb_gfxp_default_size =
gv11b_gr_init_get_attrib_cb_gfxp_default_size,

View File

@@ -628,10 +628,6 @@ static const struct gpu_ops tu104_ops = {
.load_sw_veid_bundle =
gv11b_gr_init_load_sw_veid_bundle,
.load_sw_bundle64 = tu104_gr_init_load_sw_bundle64,
.get_ctx_attrib_cb_size =
gp10b_gr_init_get_ctx_attrib_cb_size,
.commit_cbes_reserve =
gv11b_gr_init_commit_cbes_reserve,
.get_max_subctx_count =
gv11b_gr_init_get_max_subctx_count,
.get_patch_slots = gv11b_gr_init_get_patch_slots,
@@ -641,6 +637,10 @@ static const struct gpu_ops tu104_ops = {
.get_default_preemption_modes =
gp10b_gr_init_get_default_preemption_modes,
#ifdef CONFIG_NVGPU_GRAPHICS
.get_ctx_attrib_cb_size =
gp10b_gr_init_get_ctx_attrib_cb_size,
.commit_cbes_reserve =
gv11b_gr_init_commit_cbes_reserve,
.rop_mapping = gv11b_gr_init_rop_mapping,
.commit_gfxp_rtv_cb = tu104_gr_init_commit_gfxp_rtv_cb,
.get_gfxp_rtv_cb_size = tu104_gr_init_get_gfxp_rtv_cb_size,

View File

@@ -662,14 +662,9 @@ struct gops_gr_init {
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void (*pipe_mode_override)(struct gk20a *g, bool enable);
u32 (*get_ctx_attrib_cb_size)(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc);
void (*commit_ctxsw_spill)(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u64 addr, u32 size, bool patch);
void (*commit_cbes_reserve)(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
u32 (*get_patch_slots)(struct gk20a *g,
struct nvgpu_gr_config *config);
#ifdef CONFIG_NVGPU_DGPU
@@ -687,6 +682,11 @@ struct gops_gr_init {
u32 **whitelist, u32 *num_entries);
#endif
#ifdef CONFIG_NVGPU_GRAPHICS
u32 (*get_ctx_attrib_cb_size)(struct gk20a *g, u32 betacb_size,
u32 tpc_count, u32 max_tpc);
void (*commit_cbes_reserve)(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void (*rop_mapping)(struct gk20a *g,
struct nvgpu_gr_config *gr_config);
void (*commit_gfxp_rtv_cb)(struct gk20a *g,