mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: move global ctx commit hal to common.gr.obj_ctx unit
gr_gk20a_commit_global_ctx_buffers() is h/w independent, hence move it to common unit common.gr.obj_ctx and rename it as nvgpu_gr_obj_ctx_commit_global_ctx_buffers() Delete g->ops.gr.commit_global_ctx_buffers hal Jira NVGPU-1887 Change-Id: If1c840237b8ba2c13bed40a4315810073756aeb9 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2088506 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
46aedec681
commit
e3e8138404
@@ -26,6 +26,7 @@
|
||||
#include <nvgpu/gr/ctx.h>
|
||||
#include <nvgpu/gr/global_ctx.h>
|
||||
#include <nvgpu/gr/obj_ctx.h>
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/power_features/cg.h>
|
||||
|
||||
#include "obj_ctx_priv.h"
|
||||
@@ -43,6 +44,62 @@
|
||||
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
|
||||
#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
|
||||
|
||||
int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
||||
{
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
u64 addr;
|
||||
u32 size;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (patch) {
|
||||
int err;
|
||||
err = nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, false);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* global pagepool buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA);
|
||||
size = (u32)nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL);
|
||||
|
||||
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch,
|
||||
true);
|
||||
|
||||
/* global bundle cb */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA);
|
||||
size = g->ops.gr.init.get_bundle_cb_default_size(g);
|
||||
|
||||
g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch);
|
||||
|
||||
/* global attrib cb */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_ATTRIBUTE_VA);
|
||||
|
||||
g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx,
|
||||
nvgpu_gr_config_get_tpc_count(g->gr.config),
|
||||
nvgpu_gr_config_get_max_tpc_count(g->gr.config), addr, patch);
|
||||
|
||||
g->ops.gr.init.commit_global_cb_manager(g, g->gr.config, gr_ctx, patch);
|
||||
|
||||
if (g->ops.gr.init.commit_rtv_cb != NULL) {
|
||||
/* RTV circular buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA);
|
||||
|
||||
g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch);
|
||||
}
|
||||
|
||||
if (patch) {
|
||||
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvgpu_gr_obj_ctx_alloc_sw_bundle(struct gk20a *g)
|
||||
{
|
||||
struct netlist_av_list *sw_bundle_init =
|
||||
@@ -219,7 +276,7 @@ int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
|
||||
/* disable fe_go_idle */
|
||||
g->ops.gr.init.fe_go_idle_timeout(g, false);
|
||||
|
||||
err = g->ops.gr.commit_global_ctx_buffers(g, gr_ctx, false);
|
||||
err = nvgpu_gr_obj_ctx_commit_global_ctx_buffers(g, gr_ctx, false);
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -361,7 +418,10 @@ int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
|
||||
goto out;
|
||||
}
|
||||
|
||||
g->ops.gr.commit_global_ctx_buffers(g, gr_ctx, true);
|
||||
err = nvgpu_gr_obj_ctx_commit_global_ctx_buffers(g, gr_ctx, true);
|
||||
if (err != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* commit gr ctx buffer */
|
||||
err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va);
|
||||
|
||||
@@ -196,7 +196,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_gk20a_get_offset_in_gpccs_segment,
|
||||
.set_debug_mode = gm20b_gr_set_debug_mode,
|
||||
|
||||
@@ -231,7 +231,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_gk20a_get_offset_in_gpccs_segment,
|
||||
|
||||
@@ -200,62 +200,6 @@ u32 fecs_current_ctx_data(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
gr_fecs_current_ctx_valid_f(1);
|
||||
}
|
||||
|
||||
int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
||||
{
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
u64 addr;
|
||||
u32 size;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (patch) {
|
||||
int err;
|
||||
err = nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, false);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* global pagepool buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA);
|
||||
size = (u32)nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_PAGEPOOL);
|
||||
|
||||
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch,
|
||||
true);
|
||||
|
||||
/* global bundle cb */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA);
|
||||
size = g->ops.gr.init.get_bundle_cb_default_size(g);
|
||||
|
||||
g->ops.gr.init.commit_global_bundle_cb(g, gr_ctx, addr, size, patch);
|
||||
|
||||
/* global attrib cb */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_ATTRIBUTE_VA);
|
||||
|
||||
g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx,
|
||||
nvgpu_gr_config_get_tpc_count(g->gr.config),
|
||||
nvgpu_gr_config_get_max_tpc_count(g->gr.config), addr, patch);
|
||||
|
||||
g->ops.gr.init.commit_global_cb_manager(g, g->gr.config, gr_ctx, patch);
|
||||
|
||||
if (g->ops.gr.init.commit_rtv_cb != NULL) {
|
||||
/* RTV circular buffer */
|
||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||
NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA);
|
||||
|
||||
g->ops.gr.init.commit_rtv_cb(g, addr, gr_ctx, patch);
|
||||
}
|
||||
|
||||
if (patch) {
|
||||
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
||||
struct channel_gk20a *c,
|
||||
bool enable_smpc_ctxsw)
|
||||
|
||||
@@ -423,8 +423,6 @@ int gk20a_gr_handle_notify_pending(struct gk20a *g,
|
||||
struct gr_gk20a_isr_data *isr_data);
|
||||
|
||||
int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g);
|
||||
int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
|
||||
u32 fecs_current_ctx_data(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
|
||||
|
||||
@@ -315,7 +315,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_gk20a_get_offset_in_gpccs_segment,
|
||||
.set_debug_mode = gm20b_gr_set_debug_mode,
|
||||
|
||||
@@ -352,7 +352,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.create_priv_addr_table = gr_gk20a_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_gk20a_get_offset_in_gpccs_segment,
|
||||
.set_debug_mode = gm20b_gr_set_debug_mode,
|
||||
|
||||
@@ -473,7 +473,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gv100_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_gk20a_get_offset_in_gpccs_segment,
|
||||
|
||||
@@ -432,7 +432,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gk20a_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_gk20a_get_offset_in_gpccs_segment,
|
||||
|
||||
@@ -444,8 +444,6 @@ struct gpu_ops {
|
||||
u32 *priv_addr_table,
|
||||
u32 *priv_addr_table_index);
|
||||
int (*alloc_global_ctx_buffers)(struct gk20a *g);
|
||||
int (*commit_global_ctx_buffers)(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
u32 (*get_nonpes_aware_tpc)(struct gk20a *g, u32 gpc, u32 tpc);
|
||||
int (*get_offset_in_gpccs_segment)(struct gk20a *g,
|
||||
enum ctxsw_addr_type addr_type, u32 num_tpcs,
|
||||
|
||||
@@ -35,6 +35,9 @@ struct nvgpu_mem;
|
||||
struct channel_gk20a;
|
||||
struct nvgpu_gr_obj_ctx_golden_image;
|
||||
|
||||
int nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
|
||||
int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
|
||||
struct nvgpu_gr_obj_ctx_golden_image *golden_image,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
|
||||
@@ -501,7 +501,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.create_priv_addr_table = gr_gv11b_create_priv_addr_table,
|
||||
.split_fbpa_broadcast_addr = gr_gv100_split_fbpa_broadcast_addr,
|
||||
.alloc_global_ctx_buffers = gr_gk20a_alloc_global_ctx_buffers,
|
||||
.commit_global_ctx_buffers = gr_gk20a_commit_global_ctx_buffers,
|
||||
.get_nonpes_aware_tpc = gr_gv11b_get_nonpes_aware_tpc,
|
||||
.get_offset_in_gpccs_segment =
|
||||
gr_tu104_get_offset_in_gpccs_segment,
|
||||
|
||||
Reference in New Issue
Block a user