gpu: nvgpu: Rename nvgpu DMA APIs

Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*.
This better reflects the purpose of the APIs (to allocate DMA suitable
memory) and avoids confusion with GMMU related code.

JIRA NVGPU-12

Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1325548
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2017-03-21 15:34:50 -07:00
committed by mobile promotions
parent 8f2d4a3f4a
commit 50667e097b
20 changed files with 179 additions and 178 deletions

View File

@@ -839,7 +839,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
gk20a_dbg_fn("");
err = gk20a_gmmu_alloc_sys(vm->mm->g, size, mem);
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
if (err)
return err;
@@ -859,7 +859,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
return 0;
fail_free:
gk20a_gmmu_free(vm->mm->g, mem);
nvgpu_dma_free(vm->mm->g, mem);
return err;
}
@@ -980,11 +980,11 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
return 0;
fail_free_betacb:
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
fail_free_spill:
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
fail_free_preempt:
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
fail:
return err;
}
@@ -1098,10 +1098,10 @@ static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
if (g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close)
dump_ctx_switch_stats(g, vm, gr_ctx);
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
gr_gk20a_free_gr_ctx(g, vm, gr_ctx);
gk20a_dbg_fn("done");
}

View File

@@ -39,7 +39,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
gk20a_dbg_fn("");
if (!g->mm.bar2_desc.gpu_va) {
err = gk20a_gmmu_alloc_map_sys(vm, rbfb_size,
err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
&g->mm.bar2_desc);
if (err) {
dev_err(dev_from_gk20a(g),
@@ -63,7 +63,7 @@ void gp10b_replayable_pagefault_buffer_deinit(struct gk20a *g)
{
struct vm_gk20a *vm = &g->mm.bar2.vm;
gk20a_gmmu_unmap_free(vm, &g->mm.bar2_desc);
nvgpu_dma_unmap_free(vm, &g->mm.bar2_desc);
}
u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)