gpu: nvgpu: split perfbuf initialization

gk20a_perfbuf_map() allocates perfbuf VM, maps the user buffer into new
VM, and then triggers gops.perfbuf.perfbuf_enable(). This HAL then does
following :
- Allocate perfbuf instance block
- Initialize perfbuf instance block
- Reset stream buffer
- Program instance block address in PMA registers
- Program user buffer address into PMA registers

New profiler interface will have it's own API to setup PMA strem, and
it requires above setup to be done in two phases of perfbuf
initialization and then user buffer setup.

Split above functionalities into below functions
- nvgpu_perfbuf_init_vm()
  - Allocate perfbuf VM
  - Call gops.perfbuf.init_inst_block() to initialize perfbuf instance
    block

- gops.perfbuf.init_inst_block()
  - Allocate perfbuf instance block
  - Initialize perfbuf instance block
  - Program instance block address in PMA registers using
    gops.perf.init_inst_block()
  - In case of vGPU, trigger TEGRA_VGPU_CMD_PERFBUF_INST_BLOCK_MGT
    command to gpu server

- gops.perf.init_inst_block()
  - Reset stream buffer
  - Program user buffer address into PMA registers

Also add corresponding cleanup functions as below :
gops.perf.deinit_inst_block()
gops.perfbuf.deinit_inst_block()
nvgpu_perfbuf_deinit_vm()

Bug 2510974
Jira NVGPU-5360

Change-Id: I486370f21012cbb7fea84fe46fb16db95bc16790
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2372984
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2020-05-20 12:03:30 +05:30
committed by Alex Waterman
parent 12e71f22f8
commit f34711d3de
18 changed files with 188 additions and 57 deletions

View File

@@ -31,6 +31,7 @@
#include <nvgpu/cond.h>
#include <nvgpu/debugger.h>
#include <nvgpu/profiler.h>
#include <nvgpu/perfbuf.h>
#include <nvgpu/utils.h>
#include <nvgpu/mm.h>
#include <nvgpu/gk20a.h>
@@ -1433,7 +1434,6 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
struct mm_gk20a *mm = &g->mm;
int err;
u32 virt_size;
u32 big_page_size = g->ops.mm.gmmu.get_default_big_page_size();
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1442,14 +1442,10 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
return -EBUSY;
}
mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size,
big_page_size << 10,
NV_MM_DEFAULT_KERNEL_SIZE,
NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
false, false, false, "perfbuf");
if (!mm->perfbuf.vm) {
err = nvgpu_perfbuf_init_vm(g);
if (err) {
nvgpu_mutex_release(&g->dbg_sessions_lock);
return -ENOMEM;
return err;
}
err = nvgpu_vm_map_buffer(mm->perfbuf.vm,
@@ -1485,7 +1481,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
err_unmap:
nvgpu_vm_unmap(mm->perfbuf.vm, args->offset, NULL);
err_remove_vm:
nvgpu_vm_put(mm->perfbuf.vm);
nvgpu_perfbuf_deinit_vm(g);
nvgpu_mutex_release(&g->dbg_sessions_lock);
return err;
}
@@ -1712,8 +1708,8 @@ static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
err = g->ops.perfbuf.perfbuf_disable(g);
nvgpu_vm_unmap(vm, offset, NULL);
nvgpu_free_inst_block(g, &mm->perfbuf.inst_block);
nvgpu_vm_put(vm);
nvgpu_perfbuf_deinit_vm(g);
g->perfbuf.owner = NULL;
g->perfbuf.offset = 0;