nvdla: Prepare for supporting upstream Linux

The referencing of syncpts in the upstream host1x Linux driver is
different to the downstream nvhost driver. By migrating the DLA driver
to use the '_ext' implementations of the various nvhost functions, we
can implement a use a common nvhost interface for both the upstream and
downstream kernels.

JIRA LS-410

Change-Id: Ibd16dfed53edf7173d44fdfaacb4080788f3ba2c
Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2653102
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Jon Hunter
2022-01-11 14:49:49 +00:00
committed by Laxman Dewangan
parent 722d0f22b3
commit 870c17eed9
3 changed files with 5 additions and 14 deletions

View File

@@ -231,7 +231,6 @@ struct nvdla_device {
* struct nvdla_emu_task: structure for emulator task info
*
* @queue Queue in which task submitted
* @sp pointer to syncpt
* @prefences pointer to pre fences
* @postfences pointer to post fences
* @num_prefences Number of prefences in task
@@ -242,7 +241,6 @@ struct nvdla_device {
*/
struct nvdla_emu_task {
struct nvdla_queue *queue;
struct nvhost_syncpt *sp;
struct nvdev_fence prefences[MAX_NVDLA_EMU_PREFENCES_PER_TASK];
struct nvdev_fence postfences[MAX_NVDLA_EMU_POSTFENCES_PER_TASK];
u32 num_prefences;
@@ -256,7 +254,6 @@ struct nvdla_emu_task {
*
* @queue Queue in which task submitted
* @buffers nvhost buffers for priv/task
* @sp pointer to syncpt
* @prefences pointer to prefences
* @postfences pointer to post fences
* @fence fence tracking for current task
@@ -272,7 +269,6 @@ struct nvdla_emu_task {
struct nvdla_task {
struct nvdla_queue *queue;
struct nvdla_buffers *buffers;
struct nvhost_syncpt *sp;
struct nvdev_fence prefences[MAX_NVDLA_PREFENCES_PER_TASK];
struct nvdev_fence postfences[MAX_NVDLA_POSTFENCES_PER_TASK];
struct nvdla_status_notify in_task_status[MAX_NVDLA_IN_STATUS_PER_TASK];

View File

@@ -688,7 +688,6 @@ static int nvdla_fill_task(struct nvdla_queue *queue,
/* initialize task parameters */
task->queue = queue;
task->buffers = buffers;
task->sp = &nvhost_get_host(pdev)->syncpt;
err = nvdla_val_task_submit_input(local_task);
if (err) {
@@ -852,7 +851,6 @@ static int nvdla_emu_task_submit(struct nvdla_private *priv, void *arg)
nvdla_dbg_fn(pdev, "inside emulator task submit");
task.queue = queue;
task.sp = &nvhost_get_host(pdev)->syncpt;
user_tasks = (struct nvdla_ioctl_emu_submit_task __user *)
(uintptr_t)args->tasks;

View File

@@ -338,7 +338,7 @@ static void nvdla_queue_update(void *priv, int nr_completed)
/* check which task(s) finished */
list_for_each_entry_safe(task, safe, &queue->tasklist, list) {
task_complete = nvhost_syncpt_is_expired(task->sp,
task_complete = nvhost_syncpt_is_expired_ext(pdev,
queue->syncpt_id, task->fence);
/* clean task and remove from list */
@@ -475,14 +475,12 @@ static int nvdla_add_fence_action_cb(struct nvhost_ctrl_sync_fence_info info, vo
struct nvdla_queue *queue = args->queue;
u8 **next = args->mem;
struct platform_device *pdev = queue->pool->pdev;
struct nvhost_master *host = nvhost_get_host(pdev);
struct nvhost_syncpt *sp = &host->syncpt;
dma_addr_t syncpt_addr;
id = info.id;
thresh = info.thresh;
if (!id || !nvhost_syncpt_is_valid_hw_pt(sp, id)) {
if (!id || !nvhost_syncpt_is_valid_pt_ext(pdev, id)) {
nvdla_dbg_err(pdev, "Invalid sync_fd");
return -EINVAL;
}
@@ -1247,7 +1245,7 @@ int nvdla_emulator_submit(struct nvdla_queue *queue, struct nvdla_emu_task *task
}
/* get fence from nvhost */
task->fence = nvhost_syncpt_incr_max(task->sp, queue->syncpt_id,
task->fence = nvhost_syncpt_incr_max_ext(pdev, queue->syncpt_id,
task->fence_counter);
nvdla_dbg_fn(pdev, "syncpt[%d] fence[%d] task[%p] fence_counter[%u]",
@@ -1379,7 +1377,7 @@ static int nvdla_queue_submit_op(struct nvdla_queue *queue, void *in_task)
/* get fence from nvhost for MMIO mode*/
if (nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_MMIO) {
task->fence = nvhost_syncpt_incr_max(task->sp,
task->fence = nvhost_syncpt_incr_max_ext(pdev,
queue->syncpt_id,
task->fence_counter);
}
@@ -1524,7 +1522,6 @@ static int nvdla_queue_abort_op(struct nvdla_queue *queue)
struct nvdla_task *t;
struct nvdla_cmd_data cmd_data;
struct platform_device *pdev = queue->pool->pdev;
struct platform_device *host1x = to_platform_device(pdev->dev.parent);
int retry = NVDLA_QUEUE_ABORT_TIMEOUT / NVDLA_QUEUE_ABORT_RETRY_PERIOD;
nvdla_dbg_fn(pdev, "");
@@ -1568,7 +1565,7 @@ static int nvdla_queue_abort_op(struct nvdla_queue *queue)
t = list_last_entry(&queue->tasklist, struct nvdla_task, list);
/* reset syncpoint to release all tasks */
fence = nvhost_syncpt_read_maxval(host1x, queue->syncpt_id);
fence = nvhost_syncpt_read_maxval(pdev, queue->syncpt_id);
nvhost_syncpt_set_min_update(pdev, queue->syncpt_id, fence);
/* dump details */