gpu: nvgpu: userd slab cleanup

Follow-up change to rename g->ops.mm.bar1_map (and implementations)
to more specific g->ops.mm.bar1_map_userd.
Also use nvgpu_big_zalloc() to allocate userd slabs memory descriptors.

Bug 2422486
Bug 200474793

Change-Id: Iceff3bd1d34d56d3bb9496c179fff1b876b224ce
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1970891
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2018-12-11 17:27:38 -08:00
committed by mobile promotions
parent e9066a46c9
commit 3943f87d69
14 changed files with 17 additions and 17 deletions

View File

@@ -953,8 +953,8 @@ int gk20a_fifo_init_userd_slabs(struct gk20a *g)
f->num_userd_slabs =
DIV_ROUND_UP(f->num_channels, f->num_channels_per_slab);
f->userd_slabs = nvgpu_kcalloc(g, f->num_userd_slabs,
sizeof(struct nvgpu_mem));
f->userd_slabs = nvgpu_big_zalloc(g, f->num_userd_slabs *
sizeof(struct nvgpu_mem));
if (f->userd_slabs == NULL) {
nvgpu_err(g, "could not allocate userd slabs");
return -ENOMEM;
@@ -987,7 +987,7 @@ int gk20a_fifo_init_userd(struct gk20a *g, struct channel_gk20a *c)
}
if (g->ops.mm.is_bar1_supported(g)) {
mem->gpu_va = g->ops.mm.bar1_map(g, mem,
mem->gpu_va = g->ops.mm.bar1_map_userd(g, mem,
slab * PAGE_SIZE);
}
}
@@ -1015,7 +1015,7 @@ void gk20a_fifo_free_userd_slabs(struct gk20a *g)
for (slab = 0; slab < f->num_userd_slabs; slab++) {
nvgpu_dma_free(g, &f->userd_slabs[slab]);
}
nvgpu_kfree(g, f->userd_slabs);
nvgpu_big_free(g, f->userd_slabs);
f->userd_slabs = NULL;
}

View File

@@ -668,7 +668,7 @@ const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
gk20a_mm_levels_64k : gk20a_mm_levels_128k;
}
u64 gk20a_mm_bar1_map(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
u64 gk20a_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
{
struct fifo_gk20a *f = &g->fifo;
u64 gpu_va = f->userd_gpu_va + offset;

View File

@@ -152,5 +152,5 @@ u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
struct nvgpu_gmmu_pd *pd, u32 pd_idx);
u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
struct nvgpu_gmmu_pd *pd, u32 pd_idx);
u64 gk20a_mm_bar1_map(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
u64 gk20a_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
#endif /* MM_GK20A_H */