mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add DGPU config for RTV circular buffer
RTV circular context buffer is only supported on TU104 dGPU as of now. Hence compile out corresponding #define and code from safety build. Jira NVGPU-4373 Change-Id: I46a3efc92fb247fa08efb925447c248b2a4b9a57 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2255768 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
4f45ec7d5f
commit
d7971e7444
@@ -402,6 +402,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
/* RTV circular buffer */
|
||||
err = nvgpu_gr_ctx_map_ctx_buffer(g,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER,
|
||||
@@ -411,6 +412,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
||||
nvgpu_err(g, "cannot map ctx rtv circular buffer");
|
||||
goto fail;
|
||||
}
|
||||
#endif
|
||||
|
||||
gr_ctx->global_ctx_buffer_mapped = true;
|
||||
|
||||
|
||||
@@ -236,6 +236,7 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) {
|
||||
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER);
|
||||
@@ -243,6 +244,7 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_VPR
|
||||
if (nvgpu_gr_global_ctx_buffer_vpr_alloc(g, desc) != 0) {
|
||||
|
||||
@@ -98,6 +98,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g)
|
||||
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER, size);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
if (g->ops.gr.init.get_rtv_cb_size != NULL) {
|
||||
size = g->ops.gr.init.get_rtv_cb_size(g);
|
||||
nvgpu_log_info(g, "rtv_circular_buffer_size : %u", size);
|
||||
@@ -105,6 +106,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g)
|
||||
nvgpu_gr_global_ctx_set_size(gr->global_ctx_buffer,
|
||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
err = nvgpu_gr_global_ctx_buffer_alloc(g, gr->global_ctx_buffer);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
|
||||
#include <nvgpu/hw/tu104/hw_gr_tu104.h>
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_safe_mult_u32(
|
||||
@@ -68,6 +69,7 @@ void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr, size, 0, patch);
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g)
|
||||
{
|
||||
|
||||
@@ -29,9 +29,11 @@ struct gk20a;
|
||||
struct nvgpu_gr_ctx;
|
||||
struct netlist_av64_list;
|
||||
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g);
|
||||
void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
#endif
|
||||
|
||||
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g);
|
||||
u32 tu104_gr_init_get_min_gpm_fifo_depth(struct gk20a *g);
|
||||
|
||||
@@ -586,8 +586,10 @@ static const struct gpu_ops tu104_ops = {
|
||||
.load_method_init = gm20b_gr_init_load_method_init,
|
||||
.commit_global_timeslice =
|
||||
gv11b_gr_init_commit_global_timeslice,
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
.get_rtv_cb_size = tu104_gr_init_get_rtv_cb_size,
|
||||
.commit_rtv_cb = tu104_gr_init_commit_rtv_cb,
|
||||
#endif
|
||||
.get_bundle_cb_default_size =
|
||||
tu104_gr_init_get_bundle_cb_default_size,
|
||||
.get_min_gpm_fifo_depth =
|
||||
|
||||
@@ -632,7 +632,6 @@ struct gops_gr_init {
|
||||
int (*load_sw_veid_bundle)(struct gk20a *g,
|
||||
struct netlist_av_list *sw_method_init);
|
||||
void (*commit_global_timeslice)(struct gk20a *g);
|
||||
u32 (*get_rtv_cb_size)(struct gk20a *g);
|
||||
u32 (*get_bundle_cb_default_size)(struct gk20a *g);
|
||||
u32 (*get_min_gpm_fifo_depth)(struct gk20a *g);
|
||||
u32 (*get_bundle_cb_token_limit)(struct gk20a *g);
|
||||
@@ -670,6 +669,7 @@ struct gops_gr_init {
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
int (*load_sw_bundle64)(struct gk20a *g,
|
||||
struct netlist_av64_list *sw_bundle64_init);
|
||||
u32 (*get_rtv_cb_size)(struct gk20a *g);
|
||||
void (*commit_rtv_cb)(struct gk20a *g, u64 addr,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||
#endif
|
||||
|
||||
@@ -76,8 +76,10 @@ typedef void (*global_ctx_mem_destroy_fn)(struct gk20a *g,
|
||||
* accesses via firmware methods.
|
||||
*/
|
||||
#define NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP 6U
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
/** S/W defined index for RTV circular global context buffer. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER 7U
|
||||
#endif
|
||||
#ifdef CONFIG_NVGPU_FECS_TRACE
|
||||
/** S/W defined index for global FECS trace buffer. */
|
||||
#define NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER 8U
|
||||
|
||||
Reference in New Issue
Block a user