gpu: nvgpu: Remove support_sparse() HAL in MM

The support sparse HAL severs only one purpose: return true or false
depending on whether the given chip supports sparse mappings. This
HAL is used to, in turn, program (or not) the
NVGPU_SUPPORT_SPARSE_ALLOCS enabled flag. So instead of having all
this rigmarole to program this flag just program it for all native
GPUs. Then, in the vGPU specific characteristics function disable it
explicitly. This seems to have precedent already.

JIRA NVGPU-1737
JIRA NVGPU-1934

Change-Id: I630928ad656aaffc09fdc6b7fec9fc423aa94c38
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2006796
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-01-29 14:20:16 -08:00
committed by mobile promotions
parent 0027e22c78
commit f2979bcdac
11 changed files with 3 additions and 21 deletions

View File

@@ -500,10 +500,7 @@ void gk20a_init_gpu_characteristics(struct gk20a *g)
{
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true);
if ((g->ops.mm.support_sparse != NULL) && g->ops.mm.support_sparse(g)) {
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
}
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
/*
* Fast submits are supported as long as the user doesn't request

View File

@@ -571,7 +571,6 @@ static const struct gpu_ops gm20b_ops = {
.is_fw_defined = gm20b_netlist_is_firmware_defined,
},
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,

View File

@@ -61,11 +61,6 @@ u32 gm20b_mm_get_default_big_page_size(void)
return SZ_64K;
}
bool gm20b_mm_support_sparse(struct gk20a *g)
{
return true;
}
bool gm20b_mm_is_bar1_supported(struct gk20a *g)
{
return true;

View File

@@ -643,7 +643,6 @@ static const struct gpu_ops gp10b_ops = {
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,

View File

@@ -813,7 +813,6 @@ static const struct gpu_ops gv100_ops = {
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,

View File

@@ -773,7 +773,6 @@ static const struct gpu_ops gv11b_ops = {
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,

View File

@@ -1067,7 +1067,6 @@ struct gpu_ops {
} fecs_trace;
#endif
struct {
bool (*support_sparse)(struct gk20a *g);
u64 (*gmmu_map)(struct vm_gk20a *vm,
u64 map_offset,
struct nvgpu_sgt *sgt,

View File

@@ -844,7 +844,6 @@ static const struct gpu_ops tu104_ops = {
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
.support_sparse = gm20b_mm_support_sparse,
.gmmu_map = gk20a_locked_gmmu_map,
.gmmu_unmap = gk20a_locked_gmmu_unmap,
.vm_bind_channel = gk20a_vm_bind_channel,

View File

@@ -462,8 +462,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
/* FIXME: add support for sparse mappings */
.support_sparse = NULL,
.gmmu_map = vgpu_gp10b_locked_gmmu_map,
.gmmu_unmap = vgpu_locked_gmmu_unmap,
.vm_bind_channel = vgpu_vm_bind_channel,

View File

@@ -540,8 +540,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
},
#endif /* CONFIG_GK20A_CTXSW_TRACE */
.mm = {
/* FIXME: add support for sparse mappings */
.support_sparse = NULL,
.gmmu_map = vgpu_gp10b_locked_gmmu_map,
.gmmu_unmap = vgpu_locked_gmmu_unmap,
.vm_bind_channel = vgpu_vm_bind_channel,

View File

@@ -285,10 +285,10 @@ void vgpu_init_gpu_characteristics(struct gk20a *g)
gk20a_init_gpu_characteristics(g);
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
/* features vgpu does not support */
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, false);
}
int vgpu_read_ptimer(struct gk20a *g, u64 *value)