mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: move RTV CB code to GRAPHICS config
Some of the RTV circular buffer programming is under GRAPHICS config and some is under DGPU config. For nvgpu-next, RTV circular buffer is required even for iGPU so keeping the code under DGPU config does not make sense. Move all the code from DGPU config to GRAPHICS config. Bug 3159973 Change-Id: I8438cc0e25354d27701df2fe44762306a731d8cd Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2524897 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
be507aea50
commit
cebefd7ea2
@@ -385,7 +385,7 @@ int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
|||||||
nvgpu_err(g, "cannot map ctx pagepool buffer");
|
nvgpu_err(g, "cannot map ctx pagepool buffer");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
/* RTV circular buffer */
|
/* RTV circular buffer */
|
||||||
if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
|
if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
|
||||||
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
|
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
|
||||||
|
|||||||
@@ -289,7 +289,7 @@ int nvgpu_gr_global_ctx_buffer_alloc(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
|
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_MIG)) {
|
||||||
if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) {
|
if (desc[NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER].size != 0U) {
|
||||||
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
|
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ static int gr_alloc_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr *gr)
|
|||||||
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size);
|
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR, size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
if (g->ops.gr.init.get_rtv_cb_size != NULL) {
|
if (g->ops.gr.init.get_rtv_cb_size != NULL) {
|
||||||
size = g->ops.gr.init.get_rtv_cb_size(g);
|
size = g->ops.gr.init.get_rtv_cb_size(g);
|
||||||
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr,
|
nvgpu_log(g, gpu_dbg_info | gpu_dbg_gr,
|
||||||
|
|||||||
@@ -372,7 +372,7 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
|
|||||||
g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx,
|
g->ops.gr.init.commit_global_cb_manager(g, config, gr_ctx,
|
||||||
patch);
|
patch);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
if (g->ops.gr.init.commit_rtv_cb != NULL) {
|
if (g->ops.gr.init.commit_rtv_cb != NULL) {
|
||||||
/* RTV circular buffer */
|
/* RTV circular buffer */
|
||||||
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -32,45 +32,6 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/tu104/hw_gr_tu104.h>
|
#include <nvgpu/hw/tu104/hw_gr_tu104.h>
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
|
||||||
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g)
|
|
||||||
{
|
|
||||||
return nvgpu_safe_mult_u32(
|
|
||||||
nvgpu_safe_add_u32(
|
|
||||||
gr_scc_rm_rtv_cb_size_div_256b_default_f(),
|
|
||||||
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()),
|
|
||||||
gr_scc_bundle_cb_size_div_256b_byte_granularity_v());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tu104_gr_init_patch_rtv_cb(struct gk20a *g,
|
|
||||||
struct nvgpu_gr_ctx *gr_ctx,
|
|
||||||
u32 addr, u32 size, u32 gfxpAddSize, bool patch)
|
|
||||||
{
|
|
||||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_base_r(),
|
|
||||||
gr_scc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
|
|
||||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_size_r(),
|
|
||||||
gr_scc_rm_rtv_cb_size_div_256b_f(size), patch);
|
|
||||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_rm_rtv_cb_base_r(),
|
|
||||||
gr_gpcs_gcc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
|
|
||||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_gfxp_reserve_r(),
|
|
||||||
gr_scc_rm_gfxp_reserve_rtv_cb_size_div_256b_f(gfxpAddSize),
|
|
||||||
patch);
|
|
||||||
}
|
|
||||||
|
|
||||||
void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
|
||||||
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
|
||||||
{
|
|
||||||
u32 size = nvgpu_safe_add_u32(
|
|
||||||
gr_scc_rm_rtv_cb_size_div_256b_default_f(),
|
|
||||||
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f());
|
|
||||||
|
|
||||||
addr = addr >> gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f();
|
|
||||||
|
|
||||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
|
||||||
tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr, size, 0, patch);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g)
|
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g)
|
||||||
{
|
{
|
||||||
return gr_scc_bundle_cb_size_div_256b__prod_v();
|
return gr_scc_bundle_cb_size_div_256b__prod_v();
|
||||||
@@ -187,6 +148,43 @@ int tu104_gr_init_load_sw_bundle64(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
|
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g)
|
||||||
|
{
|
||||||
|
return nvgpu_safe_mult_u32(
|
||||||
|
nvgpu_safe_add_u32(
|
||||||
|
gr_scc_rm_rtv_cb_size_div_256b_default_f(),
|
||||||
|
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f()),
|
||||||
|
gr_scc_bundle_cb_size_div_256b_byte_granularity_v());
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tu104_gr_init_patch_rtv_cb(struct gk20a *g,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx,
|
||||||
|
u32 addr, u32 size, u32 gfxpAddSize, bool patch)
|
||||||
|
{
|
||||||
|
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_base_r(),
|
||||||
|
gr_scc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
|
||||||
|
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_size_r(),
|
||||||
|
gr_scc_rm_rtv_cb_size_div_256b_f(size), patch);
|
||||||
|
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_rm_rtv_cb_base_r(),
|
||||||
|
gr_gpcs_gcc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
|
||||||
|
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_gfxp_reserve_r(),
|
||||||
|
gr_scc_rm_gfxp_reserve_rtv_cb_size_div_256b_f(gfxpAddSize),
|
||||||
|
patch);
|
||||||
|
}
|
||||||
|
|
||||||
|
void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
||||||
|
{
|
||||||
|
u32 size = nvgpu_safe_add_u32(
|
||||||
|
gr_scc_rm_rtv_cb_size_div_256b_default_f(),
|
||||||
|
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f());
|
||||||
|
|
||||||
|
addr = addr >> gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f();
|
||||||
|
|
||||||
|
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||||
|
tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr, size, 0, patch);
|
||||||
|
}
|
||||||
|
|
||||||
void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -29,12 +29,6 @@ struct gk20a;
|
|||||||
struct nvgpu_gr_ctx;
|
struct nvgpu_gr_ctx;
|
||||||
struct netlist_av64_list;
|
struct netlist_av64_list;
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
|
||||||
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g);
|
|
||||||
void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
|
||||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g);
|
u32 tu104_gr_init_get_bundle_cb_default_size(struct gk20a *g);
|
||||||
u32 tu104_gr_init_get_min_gpm_fifo_depth(struct gk20a *g);
|
u32 tu104_gr_init_get_min_gpm_fifo_depth(struct gk20a *g);
|
||||||
u32 tu104_gr_init_get_bundle_cb_token_limit(struct gk20a *g);
|
u32 tu104_gr_init_get_bundle_cb_token_limit(struct gk20a *g);
|
||||||
@@ -45,6 +39,10 @@ int tu104_gr_init_load_sw_bundle64(struct gk20a *g,
|
|||||||
struct netlist_av64_list *sw_bundle64_init);
|
struct netlist_av64_list *sw_bundle64_init);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
|
u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g);
|
||||||
|
void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||||
|
|
||||||
void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||||
|
|
||||||
|
|||||||
@@ -533,7 +533,7 @@ static const struct gops_gr_init tu104_ops_gr_init = {
|
|||||||
.fe_go_idle_timeout = gv11b_gr_init_fe_go_idle_timeout,
|
.fe_go_idle_timeout = gv11b_gr_init_fe_go_idle_timeout,
|
||||||
.load_method_init = gm20b_gr_init_load_method_init,
|
.load_method_init = gm20b_gr_init_load_method_init,
|
||||||
.commit_global_timeslice = gv11b_gr_init_commit_global_timeslice,
|
.commit_global_timeslice = gv11b_gr_init_commit_global_timeslice,
|
||||||
#ifdef CONFIG_NVGPU_DGPU
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
.get_rtv_cb_size = tu104_gr_init_get_rtv_cb_size,
|
.get_rtv_cb_size = tu104_gr_init_get_rtv_cb_size,
|
||||||
.commit_rtv_cb = tu104_gr_init_commit_rtv_cb,
|
.commit_rtv_cb = tu104_gr_init_commit_rtv_cb,
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -779,9 +779,6 @@ struct gops_gr_init {
|
|||||||
int (*load_sw_bundle64)(struct gk20a *g,
|
int (*load_sw_bundle64)(struct gk20a *g,
|
||||||
struct netlist_av64_list *sw_bundle64_init);
|
struct netlist_av64_list *sw_bundle64_init);
|
||||||
#endif
|
#endif
|
||||||
u32 (*get_rtv_cb_size)(struct gk20a *g);
|
|
||||||
void (*commit_rtv_cb)(struct gk20a *g, u64 addr,
|
|
||||||
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
|
||||||
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
#ifdef CONFIG_NVGPU_GR_GOLDEN_CTX_VERIFICATION
|
||||||
void (*restore_stats_counter_bundle_data)(struct gk20a *g,
|
void (*restore_stats_counter_bundle_data)(struct gk20a *g,
|
||||||
struct netlist_av_list *sw_bundle_init);
|
struct netlist_av_list *sw_bundle_init);
|
||||||
@@ -804,6 +801,9 @@ struct gops_gr_init {
|
|||||||
bool patch);
|
bool patch);
|
||||||
void (*rop_mapping)(struct gk20a *g,
|
void (*rop_mapping)(struct gk20a *g,
|
||||||
struct nvgpu_gr_config *gr_config);
|
struct nvgpu_gr_config *gr_config);
|
||||||
|
u32 (*get_rtv_cb_size)(struct gk20a *g);
|
||||||
|
void (*commit_rtv_cb)(struct gk20a *g, u64 addr,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx, bool patch);
|
||||||
void (*commit_gfxp_rtv_cb)(struct gk20a *g,
|
void (*commit_gfxp_rtv_cb)(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *gr_ctx,
|
struct nvgpu_gr_ctx *gr_ctx,
|
||||||
bool patch);
|
bool patch);
|
||||||
|
|||||||
Reference in New Issue
Block a user