gpu: nvgpu: Move programming of debug page to FB

Debug page was allocated and programmed to HUB MMU in GR code. This
introduces a dependency from GR to FB and is anyway the wrong place.
Move the code to allocate memory to generic MM code, and the code
to program the addresses to FB.

Change-Id: Ib6d3c96efde6794cf5e8cd4c908525c85b57c233
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1801423
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2018-08-16 14:47:01 -07:00
committed by mobile promotions
parent 83efad7adb
commit c86f185d10
5 changed files with 60 additions and 62 deletions

View File

@@ -60,7 +60,6 @@
#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pri_ringmaster_gk20a.h>
#include <nvgpu/hw/gk20a/hw_top_gk20a.h>
#include <nvgpu/hw/gk20a/hw_fb_gk20a.h>
#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
#define BLK_SIZE (256)
@@ -3153,9 +3152,6 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
gr_gk20a_free_global_ctx_buffers(g);
nvgpu_dma_free(g, &gr->mmu_wr_mem);
nvgpu_dma_free(g, &gr->mmu_rd_mem);
nvgpu_dma_free(g, &gr->compbit_store.mem);
memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
@@ -3495,31 +3491,6 @@ clean_up:
return -ENOMEM;
}
static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
{
int err;
if (!nvgpu_mem_is_valid(&gr->mmu_wr_mem)) {
err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem);
if (err) {
goto err;
}
}
if (!nvgpu_mem_is_valid(&gr->mmu_rd_mem)) {
err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem);
if (err) {
goto err_free_wr_mem;
}
}
return 0;
err_free_wr_mem:
nvgpu_dma_free(g, &gr->mmu_wr_mem);
err:
return -ENOMEM;
}
static u32 prime_set[18] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 };
@@ -4529,35 +4500,11 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
struct aiv_list_gk20a *sw_ctx_load = &g->gr.ctx_vars.sw_ctx_load;
struct av_list_gk20a *sw_method_init = &g->gr.ctx_vars.sw_method_init;
u32 data;
u64 addr;
u32 last_method_data = 0;
u32 i, err;
nvgpu_log_fn(g, " ");
/* init mmu debug buffer */
addr = nvgpu_mem_get_addr(g, &gr->mmu_wr_mem);
addr >>= fb_mmu_debug_wr_addr_alignment_v();
gk20a_writel(g, fb_mmu_debug_wr_r(),
nvgpu_aperture_mask(g, &gr->mmu_wr_mem,
fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
fb_mmu_debug_wr_aperture_sys_mem_coh_f(),
fb_mmu_debug_wr_aperture_vid_mem_f()) |
fb_mmu_debug_wr_vol_false_f() |
fb_mmu_debug_wr_addr_f(addr));
addr = nvgpu_mem_get_addr(g, &gr->mmu_rd_mem);
addr >>= fb_mmu_debug_rd_addr_alignment_v();
gk20a_writel(g, fb_mmu_debug_rd_r(),
nvgpu_aperture_mask(g, &gr->mmu_rd_mem,
fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
fb_mmu_debug_wr_aperture_sys_mem_coh_f(),
fb_mmu_debug_rd_aperture_vid_mem_f()) |
fb_mmu_debug_rd_vol_false_f() |
fb_mmu_debug_rd_addr_f(addr));
if (g->ops.gr.init_gpc_mmu) {
g->ops.gr.init_gpc_mmu(g);
}
@@ -4940,11 +4887,6 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
goto clean_up;
}
err = gr_gk20a_init_mmu_sw(g, gr);
if (err) {
goto clean_up;
}
err = gr_gk20a_init_map_tiles(g, gr);
if (err) {
goto clean_up;