gpu: nvgpu: unit: add tests for TSG hal

Add unit tests for:
- gv11b_tsg_init_eng_method_buffers
- gv11b_tsg_deinit_eng_method_buffers
- gv11b_tsg_bind_channel_eng_method_buffers
- gv11b_tsg_unbind_channel_check_eng_faulted

Note: gv11b_tsg_enable was already tested as part of TSG common.

Added SWUTS documentation for above tests.

Modified gv11b_tsg_init_eng_method_buffers to inline computation
of method buffer size, as existing static function could never
return 0, making one branch not testable.

Added dummy IO register spaces for PFB, CE, PBUS and HSUB_COMMON,
so that g->ops.mm.init_mm_support can be called as part of
test_fifo_init_support. MM support is needed to test allocation
and mapping of DMA buffers.

Jira NVGPU-3788

Change-Id: I5356531b23c0456662187d16b35955bf0e528782
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2207384
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-09-25 16:15:46 -04:00
committed by Alex Waterman
parent babdf69f8b
commit 99ffa2622c
16 changed files with 703 additions and 73 deletions

View File

@@ -102,26 +102,13 @@ void gv11b_tsg_bind_channel_eng_method_buffers(struct nvgpu_tsg *tsg,
g->ops.ramin.set_eng_method_buffer(g, &ch->inst_block, gpu_va);
}
static u32 gv11b_tsg_get_eng_method_buffer_size(struct gk20a *g)
{
u32 buffer_size;
u32 page_size = U32(PAGE_SIZE);
buffer_size = nvgpu_safe_add_u32(nvgpu_safe_mult_u32((9U + 1U + 3U),
g->ops.ce.get_num_pce(g)), 2U);
buffer_size = nvgpu_safe_mult_u32((27U * 5U), buffer_size);
buffer_size = roundup(buffer_size, page_size);
nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size);
return buffer_size;
}
int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
{
struct vm_gk20a *vm = g->mm.bar2.vm;
int err = 0;
int i;
unsigned int runque, method_buffer_size;
unsigned int runque, buffer_size;
u32 page_size = U32(PAGE_SIZE);
unsigned int num_pbdma = g->fifo.num_pbdma;
if (tsg->eng_method_buffers != NULL) {
@@ -129,11 +116,11 @@ int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
return 0;
}
method_buffer_size = gv11b_tsg_get_eng_method_buffer_size(g);
if (method_buffer_size == 0U) {
nvgpu_info(g, "ce will hit MTHD_BUFFER_FAULT");
return -EINVAL;
}
buffer_size = nvgpu_safe_add_u32(nvgpu_safe_mult_u32((9U + 1U + 3U),
g->ops.ce.get_num_pce(g)), 2U);
buffer_size = nvgpu_safe_mult_u32((27U * 5U), buffer_size);
buffer_size = roundup(buffer_size, page_size);
nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size);
tsg->eng_method_buffers = nvgpu_kzalloc(g,
num_pbdma * sizeof(struct nvgpu_mem));
@@ -143,7 +130,7 @@ int gv11b_tsg_init_eng_method_buffers(struct gk20a *g, struct nvgpu_tsg *tsg)
}
for (runque = 0; runque < num_pbdma; runque++) {
err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size,
err = nvgpu_dma_alloc_map_sys(vm, buffer_size,
&tsg->eng_method_buffers[runque]);
if (err != 0) {
nvgpu_err(g, "alloc eng method buffers, runque=%d",

View File

@@ -207,6 +207,8 @@ nvgpu_dma_alloc_map_sys
nvgpu_dma_alloc_sys
nvgpu_dma_free
nvgpu_dma_unmap_free
nvgpu_engine_get_fast_ce_runlist_id
nvgpu_engine_get_gr_runlist_id
nvgpu_get
nvgpu_falcon_bl_bootstrap
nvgpu_falcon_bootstrap