gpu: nvgpu: vgpu: add rtv circular buffer support

If rtv hals are not null, ask server to map it as part of global
buffers.

Bug 3158160

Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Change-Id: I56c030877219fc7a5a23e5c2715f98996b3c429f
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2434876
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Sagar Kadamati <skadamati@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Aparna Das <aparnad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Richard Zhao
2020-10-21 18:54:07 -07:00
committed by Alex Waterman
parent a1bbcff476
commit e367f670fd
5 changed files with 27 additions and 4 deletions

View File

@@ -246,7 +246,7 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct vm_gk20a *ch_vm, u64 virt_ctx)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
u64 *g_bfr_va;
u64 gpu_va;
@@ -294,6 +294,19 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
/* RTV circular buffer */
if (nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER) != 0U) {
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA] = gpu_va;
}
}
/* Priv register Access Map */
@@ -325,6 +338,7 @@ int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA];
p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA];
p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA];
p->rtv_cb_va = g_bfr_va[NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA];
#ifdef CONFIG_NVGPU_FECS_TRACE
p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA];
#endif

View File

@@ -204,6 +204,14 @@ int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE, size);
if (g->ops.gr.init.get_rtv_cb_size != NULL) {
size = g->ops.gr.init.get_rtv_cb_size(g);
nvgpu_log_info(g, "rtv_circular_buffer_size : %u", size);
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, size);
}
size = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP_SIZE;
nvgpu_log_info(g, "priv_access_map_size : %d", size);

View File

@@ -718,10 +718,10 @@ struct gops_gr_init {
#ifdef CONFIG_NVGPU_DGPU
int (*load_sw_bundle64)(struct gk20a *g,
struct netlist_av64_list *sw_bundle64_init);
#endif
u32 (*get_rtv_cb_size)(struct gk20a *g);
void (*commit_rtv_cb)(struct gk20a *g, u64 addr,
struct nvgpu_gr_ctx *gr_ctx, bool patch);
#endif
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
void (*restore_stats_counter_bundle_data)(struct gk20a *g,
struct netlist_av_list *sw_bundle_init);

View File

@@ -76,10 +76,10 @@ typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g,
* accesses via firmware methods.
*/
#define NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP 6U
#ifdef CONFIG_NVGPU_DGPU
/** S/W defined index for RTV circular global context buffer. */
#define NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER 7U
#endif
#ifdef CONFIG_NVGPU_FECS_TRACE
/** S/W defined index for global FECS trace buffer. */
#define NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER 8U

View File

@@ -212,6 +212,7 @@ struct tegra_vgpu_ch_ctx_params {
u64 attr_va;
u64 page_pool_va;
u64 priv_access_map_va;
u64 rtv_cb_va;
u64 fecs_trace_va;
u32 class_num;
};