gpu: nvgpu: support allocating CBC in vidmem

Update nvgpu_ltc_alloc_cbc() API to accept a flag for vidmem allocation
and allocate CBC memory into vidmem if flag is set

Bug 2180284
Jira NVGPUT-12

Change-Id: Ia6b9bb670c9fab6b5787de15526fcc753d702a73
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1805468
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2018-08-23 16:46:15 +05:30
committed by mobile promotions
parent 8e66c5816d
commit c3e18d9474
5 changed files with 25 additions and 8 deletions

View File

@@ -93,7 +93,7 @@ int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log_info(g, "max comptag lines : %d",
max_comptag_lines);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size, false);
if (err) {
return err;
}

View File

@@ -117,7 +117,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d",
gobs_per_comptagline_per_slice);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size, false);
if (err) {
return err;
}

View File

@@ -29,6 +29,7 @@ struct gk20a;
int nvgpu_init_ltc_support(struct gk20a *g);
void nvgpu_ltc_sync_enabled(struct gk20a *g);
int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size);
int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size,
bool vidmem_alloc);
#endif

View File

@@ -27,7 +27,8 @@
#include "gk20a/gk20a.h"
#include "gk20a/gr_gk20a.h"
int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size)
int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size,
bool vidmem_alloc)
{
struct gr_gk20a *gr = &g->gr;
unsigned long flags = 0;
@@ -35,11 +36,25 @@ int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size)
if (nvgpu_mem_is_valid(&gr->compbit_store.mem))
return 0;
if (!nvgpu_iommuable(g))
flags = NVGPU_DMA_FORCE_CONTIGUOUS;
if (vidmem_alloc) {
/*
* Backing store MUST be physically contiguous and allocated in
* one chunk
* Vidmem allocation API does not support FORCE_CONTIGUOUS like
* flag to allocate contiguous memory
* But this allocation will happen in vidmem bootstrap allocator
* which always allocates contiguous memory
*/
return nvgpu_dma_alloc_vid(g,
compbit_backing_size,
&gr->compbit_store.mem);
} else {
if (!nvgpu_iommuable(g))
flags = NVGPU_DMA_FORCE_CONTIGUOUS;
return nvgpu_dma_alloc_flags_sys(g,
return nvgpu_dma_alloc_flags_sys(g,
flags,
compbit_backing_size,
&gr->compbit_store.mem);
}
}

View File

@@ -43,7 +43,8 @@ void nvgpu_ecc_sysfs_remove(struct gk20a *g)
{
}
int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size)
int nvgpu_ltc_alloc_cbc(struct gk20a *g, size_t compbit_backing_size,
bool vidmem_alloc)
{
return 0;
}