From cebefd7ea2e15e0fa1f0c23d3f61d59876dbe548 Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Wed, 5 May 2021 13:46:19 +0530 Subject: [PATCH] gpu: nvgpu: move RTV CB code to GRAPHICS config Some of the RTV circular buffer programming is under GRAPHICS config and some is under DGPU config. For nvgpu-next, RTV circular buffer is required even for iGPU so keeping the code under DGPU config does not make sense. Move all the code from DGPU config to GRAPHICS config. Bug 3159973 Change-Id: I8438cc0e25354d27701df2fe44762306a731d8cd Signed-off-by: Deepak Nibade Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2524897 Tested-by: mobile promotions Reviewed-by: mobile promotions --- drivers/gpu/nvgpu/common/gr/ctx.c | 2 +- drivers/gpu/nvgpu/common/gr/global_ctx.c | 2 +- drivers/gpu/nvgpu/common/gr/gr.c | 2 +- drivers/gpu/nvgpu/common/gr/obj_ctx.c | 2 +- drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.c | 78 +++++++++---------- drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.h | 12 ++- drivers/gpu/nvgpu/hal/init/hal_tu104.c | 2 +- drivers/gpu/nvgpu/include/nvgpu/gops/gr.h | 6 +- 8 files changed, 51 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/nvgpu/common/gr/ctx.c b/drivers/gpu/nvgpu/common/gr/ctx.c index cb9dbf303..f3a12d7be 100644 --- a/drivers/gpu/nvgpu/common/gr/ctx.c +++ b/drivers/gpu/nvgpu/common/gr/ctx.c @@ -385,7 +385,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g, nvgpu_err(g, "cannot map ctx pagepool buffer"); goto fail; } -#ifdef CONFIG_NVGPU_DGPU +#ifdef CONFIG_NVGPU_GRAPHICS /* RTV circular buffer */ if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer, NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) { diff --git a/drivers/gpu/nvgpu/common/gr/global_ctx.c b/drivers/gpu/nvgpu/common/gr/global_ctx.c index 4088999bf..9294bbc28 100644 --- a/drivers/gpu/nvgpu/common/gr/global_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/global_ctx.c @@ -289,7 +289,7 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g, } #endif -#ifdef CONFIG_NVGPU_DGPU +#ifdef CONFIG_NVGPU_GRAPHICS if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) { if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) { err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, diff --git a/drivers/gpu/nvgpu/common/gr/gr.c b/drivers/gpu/nvgpu/common/gr/gr.c index 7637f52d9..8fd18cec0 100644 --- a/drivers/gpu/nvgpu/common/gr/gr.c +++ b/drivers/gpu/nvgpu/common/gr/gr.c @@ -100,7 +100,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr) NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size); #endif -#ifdef CONFIG_NVGPU_DGPU +#ifdef CONFIG_NVGPU_GRAPHICS if (g->ops.gr.init.get_rtv_cb_size != NULL) { size = g->ops.gr.init.get_rtv_cb_size(g); nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr, diff --git a/drivers/gpu/nvgpu/common/gr/obj_ctx.c b/drivers/gpu/nvgpu/common/gr/obj_ctx.c index d27788c0e..99c6f6a7d 100644 --- a/drivers/gpu/nvgpu/common/gr/obj_ctx.c +++ b/drivers/gpu/nvgpu/common/gr/obj_ctx.c @@ -372,7 +372,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g, g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx, patch); -#ifdef CONFIG_NVGPU_DGPU +#ifdef CONFIG_NVGPU_GRAPHICS if (g->ops.gr.init.commit_rtv_cb != NULL) { /* RTV circular buffer */ addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, diff --git a/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.c b/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.c index d1431ca30..4893eb4c6 100644 --- a/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.c +++ b/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,45 +32,6 @@ #include -#ifdef CONFIG_NVGPU_DGPU -u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g) -{ - return nvgpu_safe_mult_u32( - nvgpu_safe_add_u32( - gr_scc_rm_rtv_cb_size_div_256b_default_f(), - gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()), - gr_scc_bundle_cb_size_div_256b_byte_granularity_v()); -} - -static void tu104_gr_init_patch_rtv_cb(struct gk20a *g, - struct nvgpu_gr_ctx *gr_ctx, - u32 addr, u32 size, u32 gfxpAddSize, bool patch) -{ - nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_base_r(), - gr_scc_rm_rtv_cb_base_addr_39_8_f(addr), patch); - nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_size_r(), - gr_scc_rm_rtv_cb_size_div_256b_f(size), patch); - nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_rm_rtv_cb_base_r(), - gr_gpcs_gcc_rm_rtv_cb_base_addr_39_8_f(addr), patch); - nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_gfxp_reserve_r(), - gr_scc_rm_gfxp_reserve_rtv_cb_size_div_256b_f(gfxpAddSize), - patch); -} - -void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr, - struct nvgpu_gr_ctx *gr_ctx, bool patch) -{ - u32 size = nvgpu_safe_add_u32( - gr_scc_rm_rtv_cb_size_div_256b_default_f(), - gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()); - - addr = addr >> gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f(); - - nvgpu_assert(u64_hi32(addr) == 0U); - tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr, size, 0, patch); -} -#endif - u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g) { return gr_scc_bundle_cb_size_div_256b__prod_v(); @@ -187,6 +148,43 @@ int tu104_gr_init_load_sw_bundle64(struct gk20a *g, } #ifdef CONFIG_NVGPU_GRAPHICS +u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g) +{ + return nvgpu_safe_mult_u32( + nvgpu_safe_add_u32( + gr_scc_rm_rtv_cb_size_div_256b_default_f(), + gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()), + gr_scc_bundle_cb_size_div_256b_byte_granularity_v()); +} + +static void tu104_gr_init_patch_rtv_cb(struct gk20a *g, + struct nvgpu_gr_ctx *gr_ctx, + u32 addr, u32 size, u32 gfxpAddSize, bool patch) +{ + nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_base_r(), + gr_scc_rm_rtv_cb_base_addr_39_8_f(addr), patch); + nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_size_r(), + gr_scc_rm_rtv_cb_size_div_256b_f(size), patch); + nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_rm_rtv_cb_base_r(), + gr_gpcs_gcc_rm_rtv_cb_base_addr_39_8_f(addr), patch); + nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_gfxp_reserve_r(), + gr_scc_rm_gfxp_reserve_rtv_cb_size_div_256b_f(gfxpAddSize), + patch); +} + +void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr, + struct nvgpu_gr_ctx *gr_ctx, bool patch) +{ + u32 size = nvgpu_safe_add_u32( + gr_scc_rm_rtv_cb_size_div_256b_default_f(), + gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()); + + addr = addr >> gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f(); + + nvgpu_assert(u64_hi32(addr) == 0U); + tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr, size, 0, patch); +} + void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, bool patch) { diff --git a/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.h b/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.h index d0cda1c10..9a8ce5802 100644 --- a/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.h +++ b/drivers/gpu/nvgpu/hal/gr/init/gr_init_tu104.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -29,12 +29,6 @@ struct gk20a; struct nvgpu_gr_ctx; struct netlist_av64_list; -#ifdef CONFIG_NVGPU_DGPU -u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g); -void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr, - struct nvgpu_gr_ctx *gr_ctx, bool patch); -#endif - u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g); u32 tu104_gr_init_get_min_gpm_fifo_depth(struct gk20a *g); u32 tu104_gr_init_get_bundle_cb_token_limit(struct gk20a *g); @@ -45,6 +39,10 @@ int tu104_gr_init_load_sw_bundle64(struct gk20a *g, struct netlist_av64_list *sw_bundle64_init); #ifdef CONFIG_NVGPU_GRAPHICS +u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g); +void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr, + struct nvgpu_gr_ctx *gr_ctx, bool patch); + void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, bool patch); diff --git a/drivers/gpu/nvgpu/hal/init/hal_tu104.c b/drivers/gpu/nvgpu/hal/init/hal_tu104.c index 9be1853be..658fec0c1 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_tu104.c +++ b/drivers/gpu/nvgpu/hal/init/hal_tu104.c @@ -533,7 +533,7 @@ static const struct gops_gr_init tu104_ops_gr_init = { .fe_go_idle_timeout = gv11b_gr_init_fe_go_idle_timeout, .load_method_init = gm20b_gr_init_load_method_init, .commit_global_timeslice = gv11b_gr_init_commit_global_timeslice, -#ifdef CONFIG_NVGPU_DGPU +#ifdef CONFIG_NVGPU_GRAPHICS .get_rtv_cb_size = tu104_gr_init_get_rtv_cb_size, .commit_rtv_cb = tu104_gr_init_commit_rtv_cb, #endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/gops/gr.h b/drivers/gpu/nvgpu/include/nvgpu/gops/gr.h index cf7a9d30e..063625287 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gops/gr.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gops/gr.h @@ -779,9 +779,6 @@ struct gops_gr_init { int (*load_sw_bundle64)(struct gk20a *g, struct netlist_av64_list *sw_bundle64_init); #endif - u32 (*get_rtv_cb_size)(struct gk20a *g); - void (*commit_rtv_cb)(struct gk20a *g, u64 addr, - struct nvgpu_gr_ctx *gr_ctx, bool patch); #ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION void (*restore_stats_counter_bundle_data)(struct gk20a *g, struct netlist_av_list *sw_bundle_init); @@ -804,6 +801,9 @@ struct gops_gr_init { bool patch); void (*rop_mapping)(struct gk20a *g, struct nvgpu_gr_config *gr_config); + u32 (*get_rtv_cb_size)(struct gk20a *g); + void (*commit_rtv_cb)(struct gk20a *g, u64 addr, + struct nvgpu_gr_ctx *gr_ctx, bool patch); void (*commit_gfxp_rtv_cb)(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, bool patch);