gpu: nvgpu: move global pagepool buffer commit hal to hal.gr.init

Move g->ops.gr.commit_global_pagepool() hal to hal.gr.init unit as
g->ops.gr.init.commit_global_pagepool()
Also move g->ops.gr.pagepool_default_size() hal to
g->ops.gr.init.pagepool_default_size()

Add global_ctx boolean flag as parameer to
g->ops.gr.init.commit_global_pagepool() to distinguish between
committing global pagepool v/s ctxsw pagepool buffers

Remove register header accessors from gr_gk20a_commit_global_ctx_buffers()
and move them to hal functions

Move hal definitions to gm20b/gp10b hal files appropriately

Remove g->ops.gr.pagepool_default_size() hal for gv11b since gv11b can
re-use gp10b hal

Jira NVGPU-2961

Change-Id: Id532defe05edf2e5d291fec9ec1aeb5b8e33c544
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2077217
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-03-16 16:41:30 +05:30
committed by mobile promotions
parent 08f9184f34
commit 2af9d5787c
21 changed files with 144 additions and 137 deletions

View File

@@ -115,7 +115,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_patch_slots = gr_gk20a_get_patch_slots,
.commit_global_attrib_cb = gr_gp10b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = NULL,
.set_alpha_circular_buffer_size = NULL,
.set_circular_buffer_size = NULL,
@@ -133,7 +132,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = vgpu_gr_detect_sm_arch,
.pagepool_default_size = gr_gp10b_pagepool_default_size,
.init_ctx_state = vgpu_gr_init_ctx_state,
.free_gr_ctx = vgpu_gr_free_gr_ctx,
.init_ctxsw_preemption_mode =
@@ -344,6 +342,10 @@ static const struct gpu_ops vgpu_gp10b_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gp10b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gp10b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gp10b_gr_init_commit_global_pagepool,
},
},
.perf = {

View File

@@ -136,7 +136,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.gr = {
.commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = NULL,
.set_alpha_circular_buffer_size = NULL,
.set_circular_buffer_size = NULL,
@@ -154,7 +153,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = vgpu_gr_detect_sm_arch,
.pagepool_default_size = gr_gv11b_pagepool_default_size,
.init_ctx_state = vgpu_gr_init_ctx_state,
.free_gr_ctx = vgpu_gr_free_gr_ctx,
.init_ctxsw_preemption_mode =
@@ -395,6 +393,10 @@ static const struct gpu_ops vgpu_gv11b_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gp10b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gp10b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gp10b_gr_init_commit_global_pagepool,
},
},
.perf = {

View File

@@ -721,22 +721,12 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
}
/* global pagepool buffer */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx,
NVGPU_GR_CTX_PAGEPOOL_VA) >>
U64(gr_scc_pagepool_base_addr_39_8_align_bits_v());
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_PAGEPOOL_VA);
size = (u32)nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL) /
gr_scc_pagepool_total_pages_byte_granularity_v();
NVGPU_GR_GLOBAL_CTX_PAGEPOOL);
if (size == g->ops.gr.pagepool_default_size(g)) {
size = gr_scc_pagepool_total_pages_hwmax_v();
}
nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d",
addr, size);
g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, patch);
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size, patch,
true);
/* global bundle cb */
addr = nvgpu_gr_ctx_get_global_ctx_va(gr_ctx, NVGPU_GR_CTX_CIRCULAR_VA);
@@ -5303,29 +5293,6 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
return err;
}
void gr_gk20a_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u64 addr, u32 size, bool patch)
{
BUG_ON(u64_hi32(addr) != 0U);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_base_r(),
gr_scc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_r(),
gr_scc_pagepool_total_pages_f(size) |
gr_scc_pagepool_valid_true_f(), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_base_r(),
gr_gpcs_gcc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_r(),
gr_gpcs_gcc_pagepool_total_pages_f(size), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_pd_pagepool_r(),
gr_pd_pagepool_total_pages_f(size) |
gr_pd_pagepool_valid_true_f(), patch);
}
void gk20a_init_gr(struct gk20a *g)
{
nvgpu_cond_init(&g->gr.init_wq);

View File

@@ -351,9 +351,6 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
u64 gpu_va,
u32 mode);
void gr_gk20a_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u64 addr, u32 size, bool patch);
void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data);
void gr_gk20a_enable_hww_exceptions(struct gk20a *g);
int gr_gk20a_init_ctxsw_ucode(struct gk20a *g);

View File

@@ -156,18 +156,6 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
return 0;
}
void gr_gm20b_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx,
u64 addr, u32 size, bool patch)
{
gr_gk20a_commit_global_pagepool(g, ch_ctx, addr, size, patch);
nvgpu_gr_ctx_patch_write(g, ch_ctx, gr_gpcs_swdx_rm_pagepool_r(),
gr_gpcs_swdx_rm_pagepool_total_pages_f(size) |
gr_gpcs_swdx_rm_pagepool_valid_true_f(), patch);
}
int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data)
{
@@ -644,11 +632,6 @@ void gr_gm20b_detect_sm_arch(struct gk20a *g)
gr_gpc0_tpc0_sm_arch_warp_count_v(v);
}
u32 gr_gm20b_pagepool_default_size(struct gk20a *g)
{
return gr_scc_pagepool_total_pages_hwmax_value_v();
}
int gr_gm20b_init_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
u32 class, u32 flags)

View File

@@ -49,9 +49,6 @@ void gr_gm20b_commit_global_attrib_cb(struct gk20a *g,
int gm20b_gr_tpc_disable_override(struct gk20a *g, u32 mask);
int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch);
void gr_gm20b_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx,
u64 addr, u32 size, bool patch);
int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data);
void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
@@ -76,7 +73,6 @@ bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr);
u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr);
int gr_gm20b_load_ctxsw_ucode(struct gk20a *g);
void gr_gm20b_detect_sm_arch(struct gk20a *g);
u32 gr_gm20b_pagepool_default_size(struct gk20a *g);
int gr_gm20b_init_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
u32 class, u32 flags);

View File

@@ -237,7 +237,6 @@ static const struct gpu_ops gm20b_ops = {
.get_patch_slots = gr_gk20a_get_patch_slots,
.commit_global_attrib_cb = gr_gm20b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gm20b_commit_global_cb_manager,
.commit_global_pagepool = gr_gm20b_commit_global_pagepool,
.handle_sw_method = gr_gm20b_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gm20b_set_alpha_circular_buffer_size,
@@ -258,7 +257,6 @@ static const struct gpu_ops gm20b_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gm20b_detect_sm_arch,
.pagepool_default_size = gr_gm20b_pagepool_default_size,
.init_ctx_state = gr_gk20a_init_ctx_state,
.free_gr_ctx = gr_gk20a_free_gr_ctx,
.init_ctxsw_preemption_mode =
@@ -463,6 +461,10 @@ static const struct gpu_ops gm20b_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gm20b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gm20b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gm20b_gr_init_commit_global_pagepool,
},
.intr = {
.enable_interrupts = gm20b_gr_intr_enable_interrupts,

View File

@@ -161,7 +161,7 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
u32 spill_size =
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
u32 pagepool_size = g->ops.gr.pagepool_default_size(g) *
u32 pagepool_size = g->ops.gr.init.pagepool_default_size(g) *
gr_scc_pagepool_total_pages_byte_granularity_v();
u32 betacb_size = g->ops.gr.init.get_attrib_cb_default_size(g) +
(gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -

View File

@@ -503,31 +503,6 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
return 0;
}
void gr_gp10b_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u64 addr, u32 size, bool patch)
{
nvgpu_assert(u64_hi32(addr) == 0U);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_base_r(),
gr_scc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_r(),
gr_scc_pagepool_total_pages_f(size) |
gr_scc_pagepool_valid_true_f(), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_base_r(),
gr_gpcs_gcc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_r(),
gr_gpcs_gcc_pagepool_total_pages_f(size), patch);
}
u32 gr_gp10b_pagepool_default_size(struct gk20a *g)
{
return gr_scc_pagepool_total_pages_hwmax_value_v();
}
static void gr_gp10b_set_go_idle_timeout(struct gk20a *g, u32 data)
{
gk20a_writel(g, gr_fe_go_idle_timeout_r(), data);
@@ -985,18 +960,12 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
gr_scc_pagepool_base_addr_39_8_align_bits_v()) |
(u64_hi32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) <<
(32U - gr_scc_pagepool_base_addr_39_8_align_bits_v()));
addr = gr_ctx->pagepool_ctxsw_buffer.gpu_va;
nvgpu_assert(gr_ctx->pagepool_ctxsw_buffer.size <= U32_MAX);
size = (u32)gr_ctx->pagepool_ctxsw_buffer.size;
if (size == g->ops.gr.pagepool_default_size(g)) {
size = gr_scc_pagepool_total_pages_hwmax_v();
}
g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, true);
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size,
true, false);
addr = (u64_lo32(gr_ctx->spill_ctxsw_buffer.gpu_va) >>
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v()) |
@@ -1893,7 +1862,7 @@ u32 gp10b_gr_get_ctx_spill_size(struct gk20a *g)
u32 gp10b_gr_get_ctx_pagepool_size(struct gk20a *g)
{
return g->ops.gr.pagepool_default_size(g) *
return g->ops.gr.init.pagepool_default_size(g) *
gr_scc_pagepool_total_pages_byte_granularity_v();
}

View File

@@ -72,10 +72,6 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event);
int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, bool patch);
void gr_gp10b_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
u64 addr, u32 size, bool patch);
u32 gr_gp10b_pagepool_default_size(struct gk20a *g);
void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data);
void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data);
int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr,

View File

@@ -261,7 +261,6 @@ static const struct gpu_ops gp10b_ops = {
.get_patch_slots = gr_gk20a_get_patch_slots,
.commit_global_attrib_cb = gr_gp10b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = gr_gp10b_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gp10b_set_alpha_circular_buffer_size,
@@ -282,7 +281,6 @@ static const struct gpu_ops gp10b_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gm20b_detect_sm_arch,
.pagepool_default_size = gr_gp10b_pagepool_default_size,
.init_ctx_state = gr_gp10b_init_ctx_state,
.free_gr_ctx = gr_gk20a_free_gr_ctx,
.init_ctxsw_preemption_mode =
@@ -540,6 +538,10 @@ static const struct gpu_ops gp10b_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gp10b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gp10b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gp10b_gr_init_commit_global_pagepool,
},
.intr = {
.enable_interrupts = gm20b_gr_intr_enable_interrupts,

View File

@@ -373,7 +373,6 @@ static const struct gpu_ops gv100_ops = {
.get_patch_slots = gr_gv100_get_patch_slots,
.commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = gr_gv11b_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gv11b_set_alpha_circular_buffer_size,
@@ -394,7 +393,6 @@ static const struct gpu_ops gv100_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gv11b_detect_sm_arch,
.pagepool_default_size = gr_gv11b_pagepool_default_size,
.init_ctx_state = gr_gp10b_init_ctx_state,
.free_gr_ctx = gr_gk20a_free_gr_ctx,
.init_ctxsw_preemption_mode =
@@ -683,6 +681,10 @@ static const struct gpu_ops gv100_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gp10b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gp10b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gp10b_gr_init_commit_global_pagepool,
},
.intr = {
.enable_interrupts = gm20b_gr_intr_enable_interrupts,

View File

@@ -1102,11 +1102,6 @@ int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
return 0;
}
u32 gr_gv11b_pagepool_default_size(struct gk20a *g)
{
return gr_scc_pagepool_total_pages_hwmax_value_v();
}
void gr_gv11b_set_go_idle_timeout(struct gk20a *g, u32 data)
{
gk20a_writel(g, gr_fe_go_idle_timeout_r(), data);
@@ -1435,18 +1430,12 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr);
g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true);
addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >>
gr_scc_pagepool_base_addr_39_8_align_bits_v()) |
(u64_hi32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) <<
(32U - gr_scc_pagepool_base_addr_39_8_align_bits_v()));
BUG_ON(gr_ctx->pagepool_ctxsw_buffer.size > U32_MAX);
addr = gr_ctx->pagepool_ctxsw_buffer.gpu_va;
nvgpu_assert(gr_ctx->pagepool_ctxsw_buffer.size <= U32_MAX);
size = (u32)gr_ctx->pagepool_ctxsw_buffer.size;
if (size == g->ops.gr.pagepool_default_size(g)) {
size = gr_scc_pagepool_total_pages_hwmax_v();
}
g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, true);
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size,
true, false);
addr = (u64_lo32(gr_ctx->spill_ctxsw_buffer.gpu_va) >>
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v()) |
@@ -4310,7 +4299,7 @@ u32 gv11b_gr_get_ctx_spill_size(struct gk20a *g)
u32 gv11b_gr_get_ctx_pagepool_size(struct gk20a *g)
{
return g->ops.gr.pagepool_default_size(g) *
return g->ops.gr.init.pagepool_default_size(g) *
gr_scc_pagepool_total_pages_byte_granularity_v();
}

View File

@@ -92,7 +92,6 @@ int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc,
void gr_gv11b_enable_gpc_exceptions(struct gk20a *g);
int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
bool *post_event);
u32 gr_gv11b_pagepool_default_size(struct gk20a *g);
int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data);
void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);

View File

@@ -324,7 +324,6 @@ static const struct gpu_ops gv11b_ops = {
.get_patch_slots = gr_gv100_get_patch_slots,
.commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = gr_gv11b_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gv11b_set_alpha_circular_buffer_size,
@@ -345,7 +344,6 @@ static const struct gpu_ops gv11b_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gv11b_detect_sm_arch,
.pagepool_default_size = gr_gv11b_pagepool_default_size,
.init_ctx_state = gr_gp10b_init_ctx_state,
.free_gr_ctx = gr_gk20a_free_gr_ctx,
.powergate_tpc = gr_gv11b_powergate_tpc,
@@ -643,6 +641,10 @@ static const struct gpu_ops gv11b_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gp10b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gp10b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gp10b_gr_init_commit_global_pagepool,
},
.intr = {
.enable_interrupts = gm20b_gr_intr_enable_interrupts,

View File

@@ -23,6 +23,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/io.h>
#include <nvgpu/log.h>
#include <nvgpu/bug.h>
#include <nvgpu/timers.h>
#include <nvgpu/enabled.h>
#include <nvgpu/engine_status.h>
@@ -816,7 +817,7 @@ u32 gm20b_gr_init_get_global_ctx_cb_buffer_size(struct gk20a *g)
u32 gm20b_gr_init_get_global_ctx_pagepool_buffer_size(struct gk20a *g)
{
return g->ops.gr.pagepool_default_size(g) *
return g->ops.gr.init.pagepool_default_size(g) *
gr_scc_pagepool_total_pages_byte_granularity_v();
}
@@ -860,3 +861,51 @@ void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g,
gr_pd_ab_dist_cfg2_state_limit_f(data), patch);
}
u32 gm20b_gr_init_pagepool_default_size(struct gk20a *g)
{
return gr_scc_pagepool_total_pages_hwmax_value_v();
}
void gm20b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch,
bool global_ctx)
{
addr = (u64_lo32(addr) >>
U64(gr_scc_pagepool_base_addr_39_8_align_bits_v()) |
(u64_hi32(addr) <<
U64(32U - gr_scc_pagepool_base_addr_39_8_align_bits_v())));
if (global_ctx) {
size = size / gr_scc_pagepool_total_pages_byte_granularity_v();
}
if (size == g->ops.gr.init.pagepool_default_size(g)) {
size = gr_scc_pagepool_total_pages_hwmax_v();
}
nvgpu_assert(u64_hi32(addr) == 0U);
nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d",
addr, size);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_base_r(),
gr_scc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_r(),
gr_scc_pagepool_total_pages_f(size) |
gr_scc_pagepool_valid_true_f(), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_base_r(),
gr_gpcs_gcc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_r(),
gr_gpcs_gcc_pagepool_total_pages_f(size), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_pd_pagepool_r(),
gr_pd_pagepool_total_pages_f(size) |
gr_pd_pagepool_valid_true_f(), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_swdx_rm_pagepool_r(),
gr_gpcs_swdx_rm_pagepool_total_pages_f(size) |
gr_gpcs_swdx_rm_pagepool_valid_true_f(), patch);
}

View File

@@ -75,5 +75,9 @@ u32 gm20b_gr_init_get_global_ctx_pagepool_buffer_size(struct gk20a *g);
void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u64 size, bool patch);
u32 gm20b_gr_init_pagepool_default_size(struct gk20a *g);
void gm20b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx, u64 addr, u32 size, bool patch,
bool global_ctx);
#endif /* NVGPU_GR_INIT_GM20B_H */

View File

@@ -334,3 +334,42 @@ void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
gr_pd_ab_dist_cfg2_state_limit_f(data), patch);
}
u32 gp10b_gr_init_pagepool_default_size(struct gk20a *g)
{
return gr_scc_pagepool_total_pages_hwmax_value_v();
}
void gp10b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch,
bool global_ctx)
{
addr = (u64_lo32(addr) >>
U64(gr_scc_pagepool_base_addr_39_8_align_bits_v()) |
(u64_hi32(addr) <<
U64(32U - gr_scc_pagepool_base_addr_39_8_align_bits_v())));
if (global_ctx) {
size = size / gr_scc_pagepool_total_pages_byte_granularity_v();
}
if (size == g->ops.gr.init.pagepool_default_size(g)) {
size = gr_scc_pagepool_total_pages_hwmax_v();
}
nvgpu_assert(u64_hi32(addr) == 0U);
nvgpu_log_info(g, "pagepool buffer addr : 0x%016llx, size : %d",
addr, size);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_base_r(),
gr_scc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_pagepool_r(),
gr_scc_pagepool_total_pages_f(size) |
gr_scc_pagepool_valid_true_f(), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_base_r(),
gr_gpcs_gcc_pagepool_base_addr_39_8_f((u32)addr), patch);
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_gcc_pagepool_r(),
gr_gpcs_gcc_pagepool_total_pages_f(size), patch);
}

View File

@@ -26,6 +26,7 @@
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_gr_ctx;
void gp10b_gr_init_get_access_map(struct gk20a *g,
u32 **whitelist, int *num_entries);
@@ -48,5 +49,9 @@ u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u64 size, bool patch);
u32 gp10b_gr_init_pagepool_default_size(struct gk20a *g);
void gp10b_gr_init_commit_global_pagepool(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx, u64 addr, u32 size, bool patch,
bool global_ctx);
#endif /* NVGPU_GR_INIT_GP10B_H */

View File

@@ -265,9 +265,6 @@ struct gpu_ops {
int (*commit_global_cb_manager)(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
bool patch);
void (*commit_global_pagepool)(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx,
u64 addr, u32 size, bool patch);
int (*handle_sw_method)(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data);
void (*set_alpha_circular_buffer_size)(struct gk20a *g,
@@ -314,7 +311,6 @@ struct gpu_ops {
u32 (*get_tpc_num)(struct gk20a *g, u32 addr);
u32 (*get_egpc_base)(struct gk20a *g);
void (*detect_sm_arch)(struct gk20a *g);
u32 (*pagepool_default_size)(struct gk20a *g);
int (*init_ctx_state)(struct gk20a *g);
void (*free_gr_ctx)(struct gk20a *g,
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx);
@@ -724,6 +720,10 @@ struct gpu_ops {
void (*commit_global_bundle_cb)(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx, u64 addr, u64 size,
bool patch);
u32 (*pagepool_default_size)(struct gk20a *g);
void (*commit_global_pagepool)(struct gk20a *g,
struct nvgpu_gr_ctx *ch_ctx, u64 addr, u32 size,
bool patch, bool global_ctx);
} init;
struct {

View File

@@ -393,7 +393,6 @@ static const struct gpu_ops tu104_ops = {
.get_patch_slots = gr_gv100_get_patch_slots,
.commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
.commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
.commit_global_pagepool = gr_gp10b_commit_global_pagepool,
.handle_sw_method = gr_tu104_handle_sw_method,
.set_alpha_circular_buffer_size =
gr_gv11b_set_alpha_circular_buffer_size,
@@ -414,7 +413,6 @@ static const struct gpu_ops tu104_ops = {
.is_tpc_addr = gr_gm20b_is_tpc_addr,
.get_tpc_num = gr_gm20b_get_tpc_num,
.detect_sm_arch = gr_gv11b_detect_sm_arch,
.pagepool_default_size = gr_gv11b_pagepool_default_size,
.init_ctx_state = gr_gp10b_init_ctx_state,
.free_gr_ctx = gr_gk20a_free_gr_ctx,
.init_ctxsw_preemption_mode =
@@ -715,6 +713,10 @@ static const struct gpu_ops tu104_ops = {
gm20b_gr_init_get_global_ctx_pagepool_buffer_size,
.commit_global_bundle_cb =
gp10b_gr_init_commit_global_bundle_cb,
.pagepool_default_size =
gp10b_gr_init_pagepool_default_size,
.commit_global_pagepool =
gp10b_gr_init_commit_global_pagepool,
},
.intr = {
.enable_interrupts = gm20b_gr_intr_enable_interrupts,