gpu: nvgpu: Introduce queries for big page sizes

Introduce query functions for default big page size and available
big page sizes. Move initialization of GPU characteristics big
page sizes to the GPU characteristics query function.

JIRA NVGPU-259

Change-Id: Ie66cc2fbfcd88205593056f8d5010ac2539c8bc2
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1593685
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-11-07 09:56:40 -08:00
committed by mobile promotions
parent a51219e526
commit 58dd20f86b
5 changed files with 34 additions and 13 deletions

View File

@@ -62,7 +62,7 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
if (!is_power_of_2(big_page_size)) if (!is_power_of_2(big_page_size))
return -EINVAL; return -EINVAL;
if (!(big_page_size & g->gpu_characteristics.available_big_page_sizes)) if (!(big_page_size & nvgpu_mm_get_available_big_page_sizes(g)))
return -EINVAL; return -EINVAL;
} }

View File

@@ -211,6 +211,11 @@ gk20a_ctrl_ioctl_gpu_characteristics(
pgpu->vbios_version = g->bios.vbios_version; pgpu->vbios_version = g->bios.vbios_version;
pgpu->vbios_oem_version = g->bios.vbios_oem_version; pgpu->vbios_oem_version = g->bios.vbios_oem_version;
pgpu->big_page_size = nvgpu_mm_get_default_big_page_size(g);
pgpu->pde_coverage_bit_count =
g->ops.mm.get_mmu_levels(g, pgpu->big_page_size)[0].lo_bit[0];
pgpu->available_big_page_sizes = nvgpu_mm_get_available_big_page_sizes(g);
if (request->gpu_characteristics_buf_size > 0) { if (request->gpu_characteristics_buf_size > 0) {
size_t write_size = sizeof(*pgpu); size_t write_size = sizeof(*pgpu);

View File

@@ -423,3 +423,29 @@ int nvgpu_init_mm_support(struct gk20a *g)
return err; return err;
} }
u32 nvgpu_mm_get_default_big_page_size(struct gk20a *g)
{
u32 big_page_size;
big_page_size = g->ops.mm.get_default_big_page_size();
if (g->mm.disable_bigpage)
big_page_size = 0;
return big_page_size;
}
u32 nvgpu_mm_get_available_big_page_sizes(struct gk20a *g)
{
u32 available_big_page_sizes = 0;
if (!g->mm.disable_bigpage) {
available_big_page_sizes =
g->ops.mm.get_default_big_page_size();
if (g->ops.mm.get_big_page_sizes)
available_big_page_sizes |= g->ops.mm.get_big_page_sizes();
}
return available_big_page_sizes;
}

View File

@@ -394,18 +394,6 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
gpu->bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */ gpu->bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */
gpu->compression_page_size = g->ops.fb.compression_page_size(g); gpu->compression_page_size = g->ops.fb.compression_page_size(g);
gpu->big_page_size = g->ops.mm.get_default_big_page_size();
gpu->pde_coverage_bit_count =
g->ops.mm.get_mmu_levels(g, gpu->big_page_size)[0].lo_bit[0];
if (g->mm.disable_bigpage) {
gpu->big_page_size = 0;
gpu->available_big_page_sizes = 0;
} else {
gpu->available_big_page_sizes = gpu->big_page_size;
if (g->ops.mm.get_big_page_sizes)
gpu->available_big_page_sizes |= g->ops.mm.get_big_page_sizes();
}
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_PARTIAL_MAPPINGS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);

View File

@@ -216,5 +216,7 @@ u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *mem);
void nvgpu_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block); void nvgpu_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
int nvgpu_mm_suspend(struct gk20a *g); int nvgpu_mm_suspend(struct gk20a *g);
u32 nvgpu_mm_get_default_big_page_size(struct gk20a *g);
u32 nvgpu_mm_get_available_big_page_sizes(struct gk20a *g);
#endif #endif