gpu: nvgpu: compile out vidmem from safety build

Safety build does not support vidmem. This patch compiles out vidmem
related changes - vidmem, dma alloc, cbc/acr/pmu alloc based on
vidmem and corresponding tests like pramin, page allocator &
gmmu_map_unmap_vidmem..
As vidmem is applicable only in case of DGPUs the code is compiled
out using CONFIG_NVGPU_DGPU.

JIRA NVGPU-3524

Change-Id: Ic623801112484ffc071195e828ab9f290f945d4d
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2132773
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vaibhav Kachore <vkachore@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-06-07 19:58:11 +05:30
committed by mobile promotions
parent c2eb26436a
commit a16cc2dde3
56 changed files with 253 additions and 444 deletions

View File

@@ -41,7 +41,9 @@ int nvgpu_mm_suspend(struct gk20a *g)
nvgpu_log_info(g, "MM suspend running...");
#ifdef CONFIG_NVGPU_DGPU
nvgpu_vidmem_thread_pause_sync(&g->mm);
#endif
#ifdef CONFIG_NVGPU_COMPRESSION
g->ops.mm.cache.cbc_clean(g);
@@ -114,6 +116,7 @@ static int nvgpu_alloc_sysmem_flush(struct gk20a *g)
#ifdef CONFIG_NVGPU_CE
static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
{
#ifdef CONFIG_NVGPU_DGPU
struct gk20a *g = gk20a_from_mm(mm);
if (mm->vidmem.ce_ctx_id != NVGPU_CE_INVAL_CTX_ID) {
@@ -122,6 +125,7 @@ static void nvgpu_remove_mm_ce_support(struct mm_gk20a *mm)
mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID;
nvgpu_vm_put(mm->ce.vm);
#endif
}
#endif
@@ -162,7 +166,9 @@ static void nvgpu_remove_mm_support(struct mm_gk20a *mm)
}
nvgpu_semaphore_sea_destroy(g);
#ifdef CONFIG_NVGPU_DGPU
nvgpu_vidmem_destroy(g);
#endif
nvgpu_pd_cache_fini(g);
if (g->ops.ramin.deinit_pdb_cache_war != NULL) {
@@ -297,7 +303,7 @@ static int nvgpu_init_mmu_debug(struct mm_gk20a *mm)
#ifdef CONFIG_NVGPU_CE
void nvgpu_init_mm_ce_context(struct gk20a *g)
{
#if defined(CONFIG_GK20A_VIDMEM)
#if defined(CONFIG_NVGPU_DGPU)
if (g->mm.vidmem.size > 0U &&
(g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) {
g->mm.vidmem.ce_ctx_id =
@@ -421,10 +427,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
U32(mm->channel.user_size >> U64(20)),
U32(mm->channel.kernel_size >> U64(20)));
nvgpu_init_pramin(mm);
#ifdef CONFIG_NVGPU_DGPU
mm->vidmem.ce_ctx_id = NVGPU_CE_INVAL_CTX_ID;
nvgpu_init_pramin(mm);
err = nvgpu_vidmem_init(mm);
if (err != 0) {
return err;
@@ -441,6 +448,7 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
return err;
}
}
#endif
err = nvgpu_alloc_sysmem_flush(g);
if (err != 0) {