mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: userd slab cleanup
Follow-up change to rename g->ops.mm.bar1_map (and implementations) to more specific g->ops.mm.bar1_map_userd. Also use nvgpu_big_zalloc() to allocate userd slabs memory descriptors. Bug 2422486 Bug 200474793 Change-Id: Iceff3bd1d34d56d3bb9496c179fff1b876b224ce Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1970891 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
e9066a46c9
commit
3943f87d69
@@ -953,8 +953,8 @@ int gk20a_fifo_init_userd_slabs(struct gk20a *g)
|
||||
f->num_userd_slabs =
|
||||
DIV_ROUND_UP(f->num_channels, f->num_channels_per_slab);
|
||||
|
||||
f->userd_slabs = nvgpu_kcalloc(g, f->num_userd_slabs,
|
||||
sizeof(struct nvgpu_mem));
|
||||
f->userd_slabs = nvgpu_big_zalloc(g, f->num_userd_slabs *
|
||||
sizeof(struct nvgpu_mem));
|
||||
if (f->userd_slabs == NULL) {
|
||||
nvgpu_err(g, "could not allocate userd slabs");
|
||||
return -ENOMEM;
|
||||
@@ -987,7 +987,7 @@ int gk20a_fifo_init_userd(struct gk20a *g, struct channel_gk20a *c)
|
||||
}
|
||||
|
||||
if (g->ops.mm.is_bar1_supported(g)) {
|
||||
mem->gpu_va = g->ops.mm.bar1_map(g, mem,
|
||||
mem->gpu_va = g->ops.mm.bar1_map_userd(g, mem,
|
||||
slab * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
@@ -1015,7 +1015,7 @@ void gk20a_fifo_free_userd_slabs(struct gk20a *g)
|
||||
for (slab = 0; slab < f->num_userd_slabs; slab++) {
|
||||
nvgpu_dma_free(g, &f->userd_slabs[slab]);
|
||||
}
|
||||
nvgpu_kfree(g, f->userd_slabs);
|
||||
nvgpu_big_free(g, f->userd_slabs);
|
||||
f->userd_slabs = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -668,7 +668,7 @@ const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
|
||||
gk20a_mm_levels_64k : gk20a_mm_levels_128k;
|
||||
}
|
||||
|
||||
u64 gk20a_mm_bar1_map(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
|
||||
u64 gk20a_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
u64 gpu_va = f->userd_gpu_va + offset;
|
||||
|
||||
@@ -152,5 +152,5 @@ u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
struct nvgpu_gmmu_pd *pd, u32 pd_idx);
|
||||
u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
struct nvgpu_gmmu_pd *pd, u32 pd_idx);
|
||||
u64 gk20a_mm_bar1_map(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
u64 gk20a_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
#endif /* MM_GK20A_H */
|
||||
|
||||
@@ -584,7 +584,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.mmu_fault_pending = gk20a_fifo_mmu_fault_pending,
|
||||
.get_kind_invalid = gm20b_get_kind_invalid,
|
||||
.get_kind_pitch = gm20b_get_kind_pitch,
|
||||
.bar1_map = gk20a_mm_bar1_map,
|
||||
.bar1_map_userd = gk20a_mm_bar1_map_userd,
|
||||
},
|
||||
.therm = {
|
||||
.init_therm_setup_hw = gm20b_init_therm_setup_hw,
|
||||
|
||||
@@ -656,7 +656,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
.get_kind_invalid = gm20b_get_kind_invalid,
|
||||
.get_kind_pitch = gm20b_get_kind_pitch,
|
||||
.bar1_map = gk20a_mm_bar1_map,
|
||||
.bar1_map_userd = gk20a_mm_bar1_map_userd,
|
||||
},
|
||||
.pramin = {
|
||||
.data032_r = pram_data032_r,
|
||||
|
||||
@@ -832,7 +832,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
|
||||
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
|
||||
.get_flush_retries = gv100_mm_get_flush_retries,
|
||||
.bar1_map = NULL,
|
||||
.bar1_map_userd = NULL,
|
||||
},
|
||||
.pramin = {
|
||||
.data032_r = pram_data032_r,
|
||||
|
||||
@@ -787,7 +787,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
|
||||
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
|
||||
.bar1_map = NULL,
|
||||
.bar1_map_userd = NULL,
|
||||
},
|
||||
.therm = {
|
||||
.init_therm_setup_hw = gv11b_init_therm_setup_hw,
|
||||
|
||||
@@ -1100,7 +1100,7 @@ struct gpu_ops {
|
||||
u32 (*get_kind_pitch)(void);
|
||||
u32 (*get_flush_retries)(struct gk20a *g,
|
||||
enum nvgpu_flush_op op);
|
||||
u64 (*bar1_map)(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
u64 (*bar1_map_userd)(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
} mm;
|
||||
/*
|
||||
* This function is called to allocate secure memory (memory
|
||||
|
||||
@@ -81,7 +81,7 @@ int vgpu_get_timestamps_zipper(struct gk20a *g,
|
||||
struct nvgpu_cpu_time_correlation_sample *samples);
|
||||
int vgpu_init_hal(struct gk20a *g);
|
||||
int vgpu_get_constants(struct gk20a *g);
|
||||
u64 vgpu_bar1_map(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
u64 vgpu_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
|
||||
int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);
|
||||
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
|
||||
@@ -863,7 +863,7 @@ static const struct gpu_ops tu104_ops = {
|
||||
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
|
||||
.mmu_fault_disable_hw = gv11b_mm_mmu_fault_disable_hw,
|
||||
.get_flush_retries = gv100_mm_get_flush_retries,
|
||||
.bar1_map = NULL,
|
||||
.bar1_map_userd = NULL,
|
||||
},
|
||||
.pramin = {
|
||||
.data032_r = pram_data032_r,
|
||||
|
||||
@@ -375,7 +375,7 @@ int vgpu_init_fifo_setup_hw(struct gk20a *g)
|
||||
nvgpu_err(g, "userd allocation failed, err=%d", err);
|
||||
return err;
|
||||
}
|
||||
mem->gpu_va = g->ops.mm.bar1_map(g, mem, 0);
|
||||
mem->gpu_va = g->ops.mm.bar1_map_userd(g, mem, 0);
|
||||
f->userd_gpu_va = mem->gpu_va;
|
||||
|
||||
/* test write, read through bar1 @ userd region before
|
||||
|
||||
@@ -481,7 +481,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
.get_kind_invalid = gm20b_get_kind_invalid,
|
||||
.get_kind_pitch = gm20b_get_kind_pitch,
|
||||
.bar1_map = vgpu_bar1_map,
|
||||
.bar1_map_userd = vgpu_mm_bar1_map_userd,
|
||||
},
|
||||
.pramin = {
|
||||
.data032_r = NULL,
|
||||
|
||||
@@ -560,7 +560,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.init_bar2_vm = gp10b_init_bar2_vm,
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
.fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
|
||||
.bar1_map = vgpu_bar1_map,
|
||||
.bar1_map_userd = vgpu_mm_bar1_map_userd,
|
||||
},
|
||||
.therm = {
|
||||
.init_therm_setup_hw = NULL,
|
||||
|
||||
@@ -155,7 +155,7 @@ void vgpu_vm_remove(struct vm_gk20a *vm)
|
||||
WARN_ON(err || msg.ret);
|
||||
}
|
||||
|
||||
u64 vgpu_bar1_map(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
|
||||
u64 vgpu_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
|
||||
{
|
||||
u64 addr = nvgpu_mem_get_addr(g, mem);
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
|
||||
Reference in New Issue
Block a user