mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Remove support_sparse() HAL in MM
The support sparse HAL severs only one purpose: return true or false depending on whether the given chip supports sparse mappings. This HAL is used to, in turn, program (or not) the NVGPU_SUPPORT_SPARSE_ALLOCS enabled flag. So instead of having all this rigmarole to program this flag just program it for all native GPUs. Then, in the vGPU specific characteristics function disable it explicitly. This seems to have precedent already. JIRA NVGPU-1737 JIRA NVGPU-1934 Change-Id: I630928ad656aaffc09fdc6b7fec9fc423aa94c38 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2006796 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0027e22c78
commit
f2979bcdac
@@ -500,10 +500,7 @@ void gk20a_init_gpu_characteristics(struct gk20a *g)
|
||||
{
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL, true);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, true);
|
||||
|
||||
if ((g->ops.mm.support_sparse != NULL) && g->ops.mm.support_sparse(g)) {
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
|
||||
}
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
|
||||
|
||||
/*
|
||||
* Fast submits are supported as long as the user doesn't request
|
||||
|
||||
@@ -571,7 +571,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.is_fw_defined = gm20b_netlist_is_firmware_defined,
|
||||
},
|
||||
.mm = {
|
||||
.support_sparse = gm20b_mm_support_sparse,
|
||||
.gmmu_map = gk20a_locked_gmmu_map,
|
||||
.gmmu_unmap = gk20a_locked_gmmu_unmap,
|
||||
.vm_bind_channel = gk20a_vm_bind_channel,
|
||||
|
||||
@@ -61,11 +61,6 @@ u32 gm20b_mm_get_default_big_page_size(void)
|
||||
return SZ_64K;
|
||||
}
|
||||
|
||||
bool gm20b_mm_support_sparse(struct gk20a *g)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gm20b_mm_is_bar1_supported(struct gk20a *g)
|
||||
{
|
||||
return true;
|
||||
|
||||
@@ -643,7 +643,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
},
|
||||
#endif /* CONFIG_GK20A_CTXSW_TRACE */
|
||||
.mm = {
|
||||
.support_sparse = gm20b_mm_support_sparse,
|
||||
.gmmu_map = gk20a_locked_gmmu_map,
|
||||
.gmmu_unmap = gk20a_locked_gmmu_unmap,
|
||||
.vm_bind_channel = gk20a_vm_bind_channel,
|
||||
|
||||
@@ -813,7 +813,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
},
|
||||
#endif /* CONFIG_GK20A_CTXSW_TRACE */
|
||||
.mm = {
|
||||
.support_sparse = gm20b_mm_support_sparse,
|
||||
.gmmu_map = gk20a_locked_gmmu_map,
|
||||
.gmmu_unmap = gk20a_locked_gmmu_unmap,
|
||||
.vm_bind_channel = gk20a_vm_bind_channel,
|
||||
|
||||
@@ -773,7 +773,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
},
|
||||
#endif /* CONFIG_GK20A_CTXSW_TRACE */
|
||||
.mm = {
|
||||
.support_sparse = gm20b_mm_support_sparse,
|
||||
.gmmu_map = gk20a_locked_gmmu_map,
|
||||
.gmmu_unmap = gk20a_locked_gmmu_unmap,
|
||||
.vm_bind_channel = gk20a_vm_bind_channel,
|
||||
|
||||
@@ -1067,7 +1067,6 @@ struct gpu_ops {
|
||||
} fecs_trace;
|
||||
#endif
|
||||
struct {
|
||||
bool (*support_sparse)(struct gk20a *g);
|
||||
u64 (*gmmu_map)(struct vm_gk20a *vm,
|
||||
u64 map_offset,
|
||||
struct nvgpu_sgt *sgt,
|
||||
|
||||
@@ -844,7 +844,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
},
|
||||
#endif /* CONFIG_GK20A_CTXSW_TRACE */
|
||||
.mm = {
|
||||
.support_sparse = gm20b_mm_support_sparse,
|
||||
.gmmu_map = gk20a_locked_gmmu_map,
|
||||
.gmmu_unmap = gk20a_locked_gmmu_unmap,
|
||||
.vm_bind_channel = gk20a_vm_bind_channel,
|
||||
|
||||
@@ -462,8 +462,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
},
|
||||
#endif /* CONFIG_GK20A_CTXSW_TRACE */
|
||||
.mm = {
|
||||
/* FIXME: add support for sparse mappings */
|
||||
.support_sparse = NULL,
|
||||
.gmmu_map = vgpu_gp10b_locked_gmmu_map,
|
||||
.gmmu_unmap = vgpu_locked_gmmu_unmap,
|
||||
.vm_bind_channel = vgpu_vm_bind_channel,
|
||||
|
||||
@@ -540,8 +540,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
},
|
||||
#endif /* CONFIG_GK20A_CTXSW_TRACE */
|
||||
.mm = {
|
||||
/* FIXME: add support for sparse mappings */
|
||||
.support_sparse = NULL,
|
||||
.gmmu_map = vgpu_gp10b_locked_gmmu_map,
|
||||
.gmmu_unmap = vgpu_locked_gmmu_unmap,
|
||||
.vm_bind_channel = vgpu_vm_bind_channel,
|
||||
|
||||
@@ -285,10 +285,10 @@ void vgpu_init_gpu_characteristics(struct gk20a *g)
|
||||
|
||||
gk20a_init_gpu_characteristics(g);
|
||||
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
|
||||
|
||||
/* features vgpu does not support */
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH, false);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_RESCHEDULE_RUNLIST, false);
|
||||
nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, false);
|
||||
}
|
||||
|
||||
int vgpu_read_ptimer(struct gk20a *g, u64 *value)
|
||||
|
||||
Reference in New Issue
Block a user