From 6875779b10556bbba156b3564d6b200cb4433f33 Mon Sep 17 00:00:00 2001 From: Dmitry Antipov Date: Fri, 28 Dec 2018 15:56:36 +0300 Subject: [PATCH] video: tegra: host: nvdla, pva: add task information to fences For NSys, we're interested in having detailed information on what task waits on a particular prefence or requests a particular postfence. This is implemented by adding two extra fields, 'task_syncpt_id' and 'task_syncpt_thresh' to 'struct nvhost_task_fence', to record the task this particular fence is associated with. To avoid race conditon in pva_submit (similar to what was fixed in 0c2065fd669926536f79fd9e8ec33f33cbdcae2e), PVA task memory management is changed to use simple kref-based scheme, much like it's done in DLA. Finally, this patch renames syncpoint fields of 'task_fence' to 'syncpt_id' and 'syncpt_thresh' to match the same field names in other events, which is intended to simplify Python scripting. JIRA DTSP-1662 JIRA DTSP-682 Signed-off-by: Dmitry Antipov Change-Id: I4c55efcae15eb80a0d950882d6ff6e5ac706ab20 Reviewed-on: https://git-master.nvidia.com/r/1978175 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra Reviewed-by: Shridhar Rasal GVS: Gerrit_Virtual_Submit Reviewed-by: Prashant Gaikwad Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/video/tegra/host/nvdla/nvdla_queue.c | 42 +++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/drivers/video/tegra/host/nvdla/nvdla_queue.c b/drivers/video/tegra/host/nvdla/nvdla_queue.c index 8e173944..9c14a8d6 100644 --- a/drivers/video/tegra/host/nvdla/nvdla_queue.c +++ b/drivers/video/tegra/host/nvdla/nvdla_queue.c @@ -1,7 +1,7 @@ /* * NVDLA queue and task management for T194 * - * Copyright (c) 2016-2018, NVIDIA Corporation. All rights reserved. + * Copyright (c) 2016-2019, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -293,7 +293,7 @@ static inline size_t nvdla_profile_status_offset(struct nvdla_task *task) static void nvdla_queue_update(void *priv, int nr_completed) { - int i, task_complete; + int task_complete; struct nvdla_task *task, *safe; struct nvhost_queue *queue = priv; struct platform_device *pdev = queue->pool->pdev; @@ -330,10 +330,14 @@ static void nvdla_queue_update(void *priv, int nr_completed) timestamp_start, timestamp_end); - for (i = 0; i < task->num_postfences; i++) - nvhost_eventlib_log_fence(pdev, - NVDEV_FENCE_KIND_POST, - task->postfences + i, timestamp_end); + /* Record task postfences */ + nvhost_eventlib_log_fences(pdev, + queue->syncpt_id, + task->fence, + task->postfences, + task->num_postfences, + NVDEV_FENCE_KIND_POST, + timestamp_end); nvdla_task_free_locked(task); } @@ -736,7 +740,6 @@ static int nvdla_fill_preactions(struct nvdla_task *task) struct platform_device *pdev = queue->pool->pdev; struct nvhost_master *host = nvhost_get_host(pdev); struct nvhost_syncpt *sp = &host->syncpt; - u64 timestamp = arch_counter_get_cntvct(); struct dla_action_list *preactionl; uint16_t preactionlist_of; u8 *next, *start; @@ -752,9 +755,6 @@ static int nvdla_fill_preactions(struct nvdla_task *task) /* fill all preactions */ for (i = 0; i < task->num_prefences; i++) { - nvhost_eventlib_log_fence(pdev, NVDEV_FENCE_KIND_PRE, - task->prefences + i, timestamp); - switch (task->prefences[i].type) { case NVDEV_FENCE_TYPE_SYNC_FD: { struct sync_fence *f; @@ -1241,11 +1241,6 @@ static int nvdla_queue_submit(struct nvhost_queue *queue, void *in_task) if (err) goto fail_to_register; - nvhost_eventlib_log_submit(queue->pool->pdev, - queue->syncpt_id, - task->fence, - timestamp); - /* prepare command for MMIO submit */ if (nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_MMIO) { cmd_data.method_id = method_id; @@ -1260,6 +1255,23 @@ static int nvdla_queue_submit(struct nvhost_queue *queue, void *in_task) task->fence); } } + + if (!err) { + /* If submitted, record task submit and prefences */ + nvhost_eventlib_log_submit(pdev, + queue->syncpt_id, + task->fence, + timestamp); + + nvhost_eventlib_log_fences(pdev, + queue->syncpt_id, + task->fence, + task->prefences, + task->num_prefences, + NVDEV_FENCE_KIND_PRE, + timestamp); + } + mutex_unlock(&queue->list_lock); return err;