video: tegra: host: nvdla, pva: add task information to fences

For NSys, we're interested in having detailed information on what task
waits on a particular prefence or requests a particular postfence.
This is implemented by adding two extra fields, 'task_syncpt_id' and
'task_syncpt_thresh' to 'struct nvhost_task_fence', to record the task
this particular fence is associated with.

To avoid race conditon in pva_submit (similar to what was fixed in
0c2065fd669926536f79fd9e8ec33f33cbdcae2e), PVA task memory management
is changed to use simple kref-based scheme, much like it's done in DLA.

Finally, this patch renames syncpoint fields of 'task_fence' to 'syncpt_id'
and 'syncpt_thresh' to match the same field names in other events, which
is intended to simplify Python scripting.

JIRA DTSP-1662
JIRA DTSP-682

Signed-off-by: Dmitry Antipov <dantipov@nvidia.com>
Change-Id: I4c55efcae15eb80a0d950882d6ff6e5ac706ab20
Reviewed-on: https://git-master.nvidia.com/r/1978175
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Shridhar Rasal <srasal@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Prashant Gaikwad <pgaikwad@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Dmitry Antipov
2018-12-28 15:56:36 +03:00
committed by Laxman Dewangan
parent fb7762f5a8
commit 6875779b10

View File

@@ -1,7 +1,7 @@
/* /*
* NVDLA queue and task management for T194 * NVDLA queue and task management for T194
* *
* Copyright (c) 2016-2018, NVIDIA Corporation. All rights reserved. * Copyright (c) 2016-2019, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -293,7 +293,7 @@ static inline size_t nvdla_profile_status_offset(struct nvdla_task *task)
static void nvdla_queue_update(void *priv, int nr_completed) static void nvdla_queue_update(void *priv, int nr_completed)
{ {
int i, task_complete; int task_complete;
struct nvdla_task *task, *safe; struct nvdla_task *task, *safe;
struct nvhost_queue *queue = priv; struct nvhost_queue *queue = priv;
struct platform_device *pdev = queue->pool->pdev; struct platform_device *pdev = queue->pool->pdev;
@@ -330,10 +330,14 @@ static void nvdla_queue_update(void *priv, int nr_completed)
timestamp_start, timestamp_start,
timestamp_end); timestamp_end);
for (i = 0; i < task->num_postfences; i++) /* Record task postfences */
nvhost_eventlib_log_fence(pdev, nvhost_eventlib_log_fences(pdev,
queue->syncpt_id,
task->fence,
task->postfences,
task->num_postfences,
NVDEV_FENCE_KIND_POST, NVDEV_FENCE_KIND_POST,
task->postfences + i, timestamp_end); timestamp_end);
nvdla_task_free_locked(task); nvdla_task_free_locked(task);
} }
@@ -736,7 +740,6 @@ static int nvdla_fill_preactions(struct nvdla_task *task)
struct platform_device *pdev = queue->pool->pdev; struct platform_device *pdev = queue->pool->pdev;
struct nvhost_master *host = nvhost_get_host(pdev); struct nvhost_master *host = nvhost_get_host(pdev);
struct nvhost_syncpt *sp = &host->syncpt; struct nvhost_syncpt *sp = &host->syncpt;
u64 timestamp = arch_counter_get_cntvct();
struct dla_action_list *preactionl; struct dla_action_list *preactionl;
uint16_t preactionlist_of; uint16_t preactionlist_of;
u8 *next, *start; u8 *next, *start;
@@ -752,9 +755,6 @@ static int nvdla_fill_preactions(struct nvdla_task *task)
/* fill all preactions */ /* fill all preactions */
for (i = 0; i < task->num_prefences; i++) { for (i = 0; i < task->num_prefences; i++) {
nvhost_eventlib_log_fence(pdev, NVDEV_FENCE_KIND_PRE,
task->prefences + i, timestamp);
switch (task->prefences[i].type) { switch (task->prefences[i].type) {
case NVDEV_FENCE_TYPE_SYNC_FD: { case NVDEV_FENCE_TYPE_SYNC_FD: {
struct sync_fence *f; struct sync_fence *f;
@@ -1241,11 +1241,6 @@ static int nvdla_queue_submit(struct nvhost_queue *queue, void *in_task)
if (err) if (err)
goto fail_to_register; goto fail_to_register;
nvhost_eventlib_log_submit(queue->pool->pdev,
queue->syncpt_id,
task->fence,
timestamp);
/* prepare command for MMIO submit */ /* prepare command for MMIO submit */
if (nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_MMIO) { if (nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_MMIO) {
cmd_data.method_id = method_id; cmd_data.method_id = method_id;
@@ -1260,6 +1255,23 @@ static int nvdla_queue_submit(struct nvhost_queue *queue, void *in_task)
task->fence); task->fence);
} }
} }
if (!err) {
/* If submitted, record task submit and prefences */
nvhost_eventlib_log_submit(pdev,
queue->syncpt_id,
task->fence,
timestamp);
nvhost_eventlib_log_fences(pdev,
queue->syncpt_id,
task->fence,
task->prefences,
task->num_prefences,
NVDEV_FENCE_KIND_PRE,
timestamp);
}
mutex_unlock(&queue->list_lock); mutex_unlock(&queue->list_lock);
return err; return err;