nvdla: kmd: switch to using portability layer

- Revamp the core logic to use the portability layer.
- Introduce ping debugfs node.

Bug:
- Update the abort retry logic for -EAGAIN error code. In
  the absence of this logic, the queue becomes unavailable
  due to race between the reset and the flush command.

Jira DLA-7294
Jira DLA-7310

Change-Id: I9f54f14334736189a00d2236f374188c2bac6155
Signed-off-by: Arvind M <am@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3196673
Reviewed-by: Akshata Bhat <akshatab@nvidia.com>
Reviewed-by: Ken Adams <kadams@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Arvind M
2024-09-05 08:31:14 +00:00
committed by Jon Hunter
parent 9a42ad2d63
commit 48a651f8b4
12 changed files with 425 additions and 550 deletions

View File

@@ -1,25 +1,31 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# nvhost-nvdla.ko
#
GCOV_PROFILE := y
NVDLA_TOP := ./
include $(src)/Makefile.config.mk
ifdef CONFIG_TEGRA_GRHOST
ccflags-y += -DCONFIG_TEGRA_NVDLA_CHANNEL
NVDLA_COMMON_OBJS := $(addprefix $(NVDLA_TOP)/,$(NVDLA_COMMON_OBJS))
ccflags-y += -DNVDLA_HAVE_CONFIG_HW_PERFMON=1
ccflags-y += -DNVDLA_HAVE_CONFIG_AXI=0
ccflags-y += -DNVDLA_HAVE_CONFIG_SYNCPTFD=1
ifdef CONFIG_TEGRA_HSIERRRPTINJ
ccflags-y += -DNVDLA_HAVE_CONFIG_HSIERRINJ=1
endif
ccflags-y += -Werror
ccflags-y += -DCONFIG_TEGRA_HOST1X
nvhost-nvdla-objs = \
nvdla.o \
nvdla_buffer.o \
nvdla_ioctl.o \
dla_queue.o \
nvdla_queue.o \
nvdla_debug.o
nvhost-nvdla-objs += \
$(NVDLA_COMMON_OBJS) \
$(NVDLA_TOP)/port/device/nvdla_device_host1x.o \
$(NVDLA_TOP)/port/fw/nvdla_fw_flcn.o \
$(NVDLA_TOP)/port/sync/nvdla_sync_syncpt.o
ifdef CONFIG_TEGRA_GRHOST
nvhost-nvdla-objs += dla_channel.o
nvhost-nvdla-objs += $(NVDLA_TOP)/dla_channel.o
endif
obj-m += nvhost-nvdla.o

View File

@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA Corporation. All rights reserved.
/* SPDX-License-Identifier: LicenseRef-NvidiaProprietary */
/* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* NVDLA channel submission
*/
@@ -10,6 +9,7 @@
#include "dla_queue.h"
#include "nvdla.h"
#include "port/nvdla_fw.h"
#if IS_ENABLED(CONFIG_TEGRA_NVDLA_CHANNEL)
struct platform_device *nvdla_channel_map(struct platform_device *pdev,

View File

@@ -298,7 +298,7 @@ static void nvdla_queue_release(struct kref *ref)
nvdla_putchannel(queue);
/* release allocated resources */
nvhost_syncpt_put_ref_ext(pool->pdev, queue->syncpt_id);
nvdla_sync_destroy(queue->sync_context);
/* free the task_pool */
if (queue->task_dma_size)
@@ -327,6 +327,8 @@ struct nvdla_queue *nvdla_queue_alloc(struct nvdla_queue_pool *pool,
bool use_channel)
{
struct platform_device *pdev = pool->pdev;
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
struct nvdla_queue *queues = pool->queues;
struct nvdla_queue *queue;
int index = 0;
@@ -360,12 +362,12 @@ struct nvdla_queue *nvdla_queue_alloc(struct nvdla_queue_pool *pool,
queue = &queues[index];
set_bit(index, &pool->alloc_table);
/* allocate a syncpt for the queue */
queue->syncpt_id = nvhost_get_syncpt_host_managed(pdev, index, NULL);
if (!queue->syncpt_id) {
dev_err(&pdev->dev, "failed to get syncpt id\n");
/* allocate a sync context for the queue */
queue->sync_context = nvdla_sync_create(nvdla_dev->sync_dev);
if (queue->sync_context == NULL) {
dev_err(&pdev->dev, "failed to create sync context\n");
err = -ENOMEM;
goto err_alloc_syncpt;
goto err_alloc_sync;
}
/* initialize queue ref count and sequence*/
@@ -407,8 +409,8 @@ err_alloc_task_pool:
nvdla_putchannel(queue);
err_alloc_channel:
mutex_lock(&pool->queue_lock);
nvhost_syncpt_put_ref_ext(pdev, queue->syncpt_id);
err_alloc_syncpt:
nvdla_sync_destroy(queue->sync_context);
err_alloc_sync:
clear_bit(queue->id, &pool->alloc_table);
err_alloc_queue:
mutex_unlock(&pool->queue_lock);

View File

@@ -9,6 +9,8 @@
#include <linux/kref.h>
#include "port/nvdla_sync.h"
struct nvdla_queue_task_pool;
/**
@@ -33,7 +35,7 @@ struct nvdla_queue_task_mem_info {
*
* pool pointer queue pool
* kref struct kref for reference count
* syncpt_id Host1x syncpt id
* sync_context NvDLA synchronization context
* id Queue id
* list_lock mutex for tasks lists control
* tasklist Head of tasks list
@@ -56,7 +58,7 @@ struct nvdla_queue {
#endif
struct platform_device *vm_pdev;
bool use_channel;
u32 syncpt_id;
struct nvdla_sync_context *sync_context;
size_t task_dma_size;
size_t task_kmem_size;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
/* SPDX-FileCopyrightText: Copyright (c) 2016-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* NVDLA driver for T194/T23x
*/
@@ -22,11 +22,13 @@
#include <soc/tegra/fuse-helper.h>
#include <soc/tegra/fuse.h>
#include <uapi/linux/nvhost_nvdla_ioctl.h>
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#endif /* NVDLA_HAVE_CONFIG_HW_PERFMON */
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
#include <linux/tegra-hsierrrptinj.h>
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
#if !IS_ENABLED(CONFIG_TEGRA_GRHOST)
#include <linux/clk.h>
@@ -38,12 +40,16 @@
#include "nvdla_hw_flcn.h"
#include "nvdla_t194.h"
#include "nvdla_t234.h"
#include "nvdla_t25x.h"
#include "nvdla_t264_sim.h"
#include "dla_queue.h"
#include "nvdla_buffer.h"
#include "nvdla_debug.h"
#include "dla_os_interface.h"
#include "port/nvdla_device.h"
#include "port/nvdla_fw.h"
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
int nvdla_error_inj_handler(unsigned int instance_id,
struct epl_error_report_frame frame,
void *data)
@@ -79,7 +85,7 @@ int nvdla_error_inj_handler(unsigned int instance_id,
goto fail;
}
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err < 0) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -89,20 +95,22 @@ int nvdla_error_inj_handler(unsigned int instance_id,
if ((frame.reporter_id == device_ue_reporter_id) &&
(frame.error_code == device_ue_error_code)) {
/* Inject uncorrected error. */
host1x_writel(pdev, flcn_safety_erb_r(),
flcn_safety_erb_data_uncorrected_err_v());
nvdla_dbg_info(pdev, "UE Reported ID: %x, Error Code: %x",
frame.reporter_id, frame.error_code);
nvdla_fw_inject_uncorrected_error(pdev);
} else if ((frame.reporter_id == device_ce_reporter_id) &&
(frame.error_code == device_ce_error_code)) {
/* Inject corrected error. */
host1x_writel(pdev, flcn_safety_erb_r(),
flcn_safety_erb_data_corrected_err_v());
nvdla_dbg_info(pdev, "CE Reported ID: %x, Error Code: %x",
frame.reporter_id, frame.error_code);
nvdla_fw_inject_corrected_error(pdev);
} else {
nvdla_dbg_err(pdev, "Invalid Reported ID: %x, Error Code: %x",
frame.reporter_id, frame.error_code);
err = -EINVAL;
}
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail:
return err;
@@ -137,7 +145,7 @@ static void nvdla_error_inj_handler_deinit(struct nvdla_device *nvdla_dev)
hsierrrpt_dereg_cb(IP_DLA, instance_id);
}
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
/*
* Work to handle engine reset for error recovery
@@ -150,7 +158,7 @@ static void nvdla_reset_handler(struct work_struct *work)
struct platform_device *pdev = nvdla_dev->pdev;
/* reset engine */
nvhost_module_reset(pdev, true);
nvdla_module_reset(pdev, true);
nvdla_dbg_info(pdev, "Engine reset done\n");
}
@@ -160,7 +168,7 @@ static void nvdla_reset_handler_init(struct nvdla_device *nvdla_dev)
INIT_WORK(&nvdla_dev->reset_work, nvdla_reset_handler);
}
int nvhost_nvdla_flcn_isr(struct platform_device *pdev)
int nvdla_flcn_isr(struct platform_device *pdev)
{
uint32_t message;
uint32_t mailbox0;
@@ -168,7 +176,7 @@ int nvhost_nvdla_flcn_isr(struct platform_device *pdev)
struct nvdla_device *nvdla_dev = pdata->private_data;
/* dump falcon data if debug enabled */
mailbox0 = host1x_readl(pdev, flcn_mailbox0_r());
(void) nvdla_fw_interrupt_stat_read(pdev, &mailbox0);
message = mailbox0 & DLA_RESPONSE_MSG_MASK;
@@ -195,13 +203,11 @@ int nvhost_nvdla_flcn_isr(struct platform_device *pdev)
}
clear_interrupt:
/* logic to clear the interrupt */
host1x_writel(pdev, flcn_irqmclr_r(), flcn_irqmclr_swgen1_set_f());
host1x_writel(pdev, flcn_thi_int_stat_r(), flcn_thi_int_stat_clr_f());
host1x_readl(pdev, flcn_thi_int_stat_r());
host1x_writel(pdev, flcn_irqsclr_r(), flcn_irqsclr_swgen1_set_f());
/* Clear the interrupt */
(void) nvdla_fw_interrupt_stat_clear(pdev);
/* Notify FW that interuppt handling is complete */
host1x_writel(pdev, flcn_mailbox0_r(), DLA_MSG_INTERRUPT_HANDLING_COMPLETE);
(void) nvdla_fw_send_ack(pdev, DLA_MSG_INTERRUPT_HANDLING_COMPLETE);
return 0;
}
@@ -297,72 +303,6 @@ int nvdla_put_cmd_memory(struct platform_device *pdev, int index)
return 0;
}
int nvdla_send_cmd(struct platform_device *pdev,
struct nvdla_cmd_data *cmd_data)
{
unsigned long timeout;
int ret = 0;
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
uint32_t method_id = cmd_data->method_id;
uint32_t method_data = cmd_data->method_data;
bool wait = cmd_data->wait;
mutex_lock(&nvdla_dev->cmd_lock);
/**
* If device is unavailable, then error out to retry after some time.
**/
if (!nvdla_dev->available) {
nvdla_dbg_err(pdev, "Command failed: device unavailable\n");
mutex_unlock(&nvdla_dev->cmd_lock);
return -EAGAIN;
}
/*
* enable notification for command completion or error if
* wait if required
*/
if (wait)
method_id |= (1 << DLA_INT_ON_COMPLETE_SHIFT) |
(1 << DLA_INT_ON_ERROR_SHIFT);
nvdla_dev->waiting = 1;
nvdla_dbg_reg(pdev, "method_id=[0x%x]", method_id);
host1x_writel(pdev, NV_DLA_THI_METHOD_ID, method_id);
nvdla_dbg_reg(pdev, "method_data=[0x%x]", method_data);
host1x_writel(pdev, NV_DLA_THI_METHOD_DATA, method_data);
if (!wait) {
nvdla_dev->waiting = 0;
mutex_unlock(&nvdla_dev->cmd_lock);
return 0;
}
timeout = msecs_to_jiffies(CMD_TIMEOUT_MSEC);
if (!wait_for_completion_timeout(&nvdla_dev->cmd_completion, timeout)) {
nvdla_dev->waiting = 0;
mutex_unlock(&nvdla_dev->cmd_lock);
return -ETIMEDOUT;
}
if (nvdla_dev->cmd_status != DLA_ERR_NONE) {
nvdla_dbg_err(pdev, "Command %u failed\n", method_id);
ret = -EINVAL;
}
/* Reset command status after use for next command */
nvdla_dev->cmd_status = DLA_ERR_NONE;
nvdla_dev->waiting = 0;
mutex_unlock(&nvdla_dev->cmd_lock);
return ret;
}
static int nvdla_set_gcov_region(struct platform_device *pdev, bool unset_region)
{
int err = 0;
@@ -375,7 +315,7 @@ static int nvdla_set_gcov_region(struct platform_device *pdev, bool unset_region
if (!pdata->flcn_isr)
return 0;
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -403,7 +343,7 @@ static int nvdla_set_gcov_region(struct platform_device *pdev, bool unset_region
cmd_data.method_data = ALIGNED_DMA(gcov_cmd_mem_info.pa);
cmd_data.wait = true;
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
/* release memory allocated for gcov command */
nvdla_put_cmd_memory(pdev, gcov_cmd_mem_info.index);
@@ -413,13 +353,13 @@ static int nvdla_set_gcov_region(struct platform_device *pdev, bool unset_region
goto gcov_send_cmd_failed;
}
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
return err;
gcov_send_cmd_failed:
alloc_gcov_cmd_failed:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_to_power_on:
return err;
}
@@ -523,7 +463,7 @@ static int nvdla_alloc_trace_region(struct platform_device *pdev)
cmd_data.method_data = ALIGNED_DMA(trace_cmd_mem_info.pa);
cmd_data.wait = true;
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
/* release memory allocated for trace command */
nvdla_put_cmd_memory(pdev, trace_cmd_mem_info.index);
@@ -596,7 +536,7 @@ static int nvdla_alloc_dump_region(struct platform_device *pdev)
cmd_data.wait = true;
/* pass dump region to falcon */
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
/* release memory allocated for debug print command */
nvdla_put_cmd_memory(pdev, debug_cmd_mem_info.index);
@@ -622,43 +562,20 @@ fail_to_alloc_debug_dump:
}
/* power management API */
int nvhost_nvdla_finalize_poweron(struct platform_device *pdev)
int nvdla_finalize_poweron(struct platform_device *pdev)
{
int ret;
uint32_t fw_ver_read_bin;
uint32_t firmware_version;
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
nvdla_dbg_fn(pdev, "");
ret = nvhost_flcn_finalize_poweron(pdev);
ret = nvdla_fw_poweron(pdev);
if (ret) {
nvdla_dbg_err(pdev, "failed to poweron\n");
goto fail;
}
fw_ver_read_bin = host1x_readl(pdev, NV_DLA_OS_VERSION);
firmware_version = pdata->version;
if ((firmware_version & 0xffff00) != (fw_ver_read_bin & 0xffff00)) {
nvdla_dbg_err(pdev,
"Fw version of kernel [%u.%u.%u] doesn't match with actual version[%u.%u.%u]",
(firmware_version >> 16) & 0xff, (firmware_version >> 8) & 0xff, firmware_version & 0xff,
(fw_ver_read_bin >> 16 ) & 0xff, (fw_ver_read_bin >> 8) & 0xff, fw_ver_read_bin & 0xff);
ret = -EINVAL;
goto fail_to_val_ver;
}
nvdla_dbg_info(pdev, "Fw version : [%u.%u.%u]\n",
(fw_ver_read_bin >> 16) & 0xff,
(fw_ver_read_bin >> 8) & 0xff,
fw_ver_read_bin & 0xff);
nvdla_dev->fw_version = fw_ver_read_bin;
/**
* At this point, the falcon & hardware is available to use.
**/
@@ -669,26 +586,24 @@ int nvhost_nvdla_finalize_poweron(struct platform_device *pdev)
ret = nvdla_alloc_dump_region(pdev);
if (ret) {
nvdla_dbg_err(pdev, "fail alloc dump region\n");
goto fail_to_alloc_dump_reg;
goto poweroff;
}
ret = nvdla_alloc_trace_region(pdev);
if (ret) {
nvdla_dbg_err(pdev, "fail alloc trace region\n");
goto fail_to_alloc_trace;
goto poweroff;
}
return 0;
fail_to_alloc_trace:
fail_to_alloc_dump_reg:
fail_to_val_ver:
nvhost_nvdla_prepare_poweroff(pdev);
poweroff:
nvdla_prepare_poweroff(pdev);
fail:
return ret;
}
int nvhost_nvdla_prepare_poweroff(struct platform_device *pdev)
int nvdla_prepare_poweroff(struct platform_device *pdev)
{
int ret;
@@ -702,7 +617,7 @@ int nvhost_nvdla_prepare_poweroff(struct platform_device *pdev)
nvdla_dev->available = false;
mutex_unlock(&nvdla_dev->cmd_lock);
ret = nvhost_flcn_prepare_poweroff(pdev);
ret = nvdla_fw_poweroff(pdev);
if (ret) {
nvdla_dbg_err(pdev, "failed to poweroff\n");
goto out;
@@ -784,6 +699,7 @@ static int nvdla_alloc_window_size_memory(struct platform_device *pdev)
return err;
}
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
static int nvdla_hwpm_ip_pm(void *ip_dev, bool disable)
{
int err = 0;
@@ -793,11 +709,11 @@ static int nvdla_hwpm_ip_pm(void *ip_dev, bool disable)
disable ? "disable" : "enable");
if (disable) {
err = nvhost_module_busy(ip_dev);
err = nvdla_module_busy(ip_dev);
if (err < 0)
nvdla_dbg_err(dev, "nvhost_module_busy failed");
nvdla_dbg_err(dev, "nvdla_module_busy failed");
} else {
nvhost_module_idle(ip_dev);
nvdla_module_idle(ip_dev);
}
return err;
@@ -815,12 +731,15 @@ static int nvdla_hwpm_ip_reg_op(void *ip_dev,
nvdla_dbg_fn(dev, "reg_op %d reg_offset %llu", reg_op, reg_offset);
if (reg_op == TEGRA_SOC_HWPM_IP_REG_OP_READ)
*reg_data = host1x_readl(dev, (unsigned int)reg_offset);
*reg_data = nvdla_device_register_read(dev,
(unsigned int)reg_offset);
else if (reg_op == TEGRA_SOC_HWPM_IP_REG_OP_WRITE)
host1x_writel(dev, (unsigned int)reg_offset, *reg_data);
nvdla_device_register_write(dev, (unsigned int)reg_offset,
*reg_data);
return 0;
}
#endif
static uint32_t nvdla_read_soft_sku_scratch_register(void)
{
@@ -841,7 +760,7 @@ static uint32_t nvdla_read_soft_sku_scratch_register(void)
}
#if KERNEL_VERSION(5, 11, 0) >= LINUX_VERSION_CODE
static int nvhost_nvdla_read_chip_option_register(struct platform_device *pdev)
static int nvdla_read_chip_option_register(struct platform_device *pdev)
{
/* Read floor sweeping info using nvmem api
* See Bug 200748079
@@ -1005,6 +924,14 @@ static struct of_device_id tegra_nvdla_of_match[] = {
.name = "nvdla1",
.compatible = "nvidia,tegra234-nvdla",
.data = (struct nvhost_device_data *)&t23x_nvdla1_info },
{
.name = "nvdla0",
.compatible = "nvidia,tegra25x-nvdla",
.data = (struct nvhost_device_data *)&t25x_nvdla0_info },
{
.name = "nvdla",
.compatible = "nvidia,tegra264-nvdla",
.data = (struct nvhost_device_data *)&t264_sim_nvdla_info },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_nvdla_of_match);
@@ -1040,7 +967,9 @@ static int nvdla_probe(struct platform_device *pdev)
uint32_t soft_fuse_ret = 0U;
int fuse_register_ret = 0U;
uint32_t register_value = 0U;
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
struct tegra_soc_hwpm_ip_ops hwpm_ip_ops;
#endif /* NVDLA_HAVE_CONFIG_HW_PERFMON */
#if !IS_ENABLED(CONFIG_TEGRA_GRHOST)
struct kobj_attribute *attr = NULL;
@@ -1101,7 +1030,7 @@ static int nvdla_probe(struct platform_device *pdev)
}
} else {
#if KERNEL_VERSION(5, 11, 0) >= LINUX_VERSION_CODE
fuse_register_ret = nvhost_nvdla_read_chip_option_register(pdev);
fuse_register_ret = nvdla_read_chip_option_register(pdev);
#else
err = tegra_fuse_readl(NVDLA_DISABLE_FUSE_REGISTER_OFFSET, &register_value);
fuse_register_ret = (int)register_value;
@@ -1146,13 +1075,11 @@ static int nvdla_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
nvdla_dev->dbg_mask = debug_err;
err = nvhost_client_device_get_resources(pdev);
if (err)
goto err_get_resources;
err = nvhost_module_init(pdev);
if (err)
goto err_module_init;
err = nvdla_module_init(pdev);
if (err != 0) {
dev_err(dev, "Failed to init device\n");
goto err_device_init;
}
if (pdata->version == FIRMWARE_ENCODE_VERSION(T23X)) {
if (num_enabled_dla_instances(soft_fuse_ret, fuse_register_ret) == 1) {
@@ -1160,15 +1087,10 @@ static int nvdla_probe(struct platform_device *pdev)
}
}
err = nvhost_client_device_init(pdev);
if (err)
goto err_client_device_init;
/* create debugfs entries */
nvdla_debug_init(pdev);
if (pdata->flcn_isr)
flcn_intr_init(pdev);
(void) nvdla_fw_init(pdev);
nvdla_dev->pool = nvdla_queue_init(pdev, &nvdla_queue_ops,
MAX_NVDLA_QUEUE_COUNT);
@@ -1180,9 +1102,11 @@ static int nvdla_probe(struct platform_device *pdev)
/* init reset handler workqueue */
nvdla_reset_handler_init(nvdla_dev);
err = nvhost_syncpt_unit_interface_init(pdev);
if (err)
nvdla_dev->sync_dev = nvdla_sync_device_create_syncpoint(pdev);
if (nvdla_dev->sync_dev == NULL) {
err = -ENOMEM;
goto err_mss_init;
}
err = nvdla_alloc_cmd_memory(pdev);
if (err)
@@ -1196,6 +1120,7 @@ static int nvdla_probe(struct platform_device *pdev)
if (err)
goto err_alloc_window_size_mem;
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
nvdla_dbg_info(pdev, "hwpm ip %s register", pdev->name);
hwpm_ip_ops.ip_dev = (void *)pdev;
hwpm_ip_ops.ip_base_address = pdev->resource[0].start;
@@ -1203,14 +1128,15 @@ static int nvdla_probe(struct platform_device *pdev)
hwpm_ip_ops.hwpm_ip_pm = &nvdla_hwpm_ip_pm;
hwpm_ip_ops.hwpm_ip_reg_op = &nvdla_hwpm_ip_reg_op;
tegra_soc_hwpm_ip_register(&hwpm_ip_ops);
#endif
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
err = nvdla_error_inj_handler_init(nvdla_dev);
if (err) {
dev_err(dev, "Failed to register error injection\n");
goto err_inj_handler_init;
}
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
#if !IS_ENABLED(CONFIG_TEGRA_GRHOST)
if (pdata->num_clks > 0) {
@@ -1259,25 +1185,25 @@ err_cleanup_sysfs:
kobject_put(&pdata->clk_cap_kobj);
err_clk_cap_fail:
#endif
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
err_inj_handler_init:
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
tegra_soc_hwpm_ip_unregister(&hwpm_ip_ops);
#endif /* NVDLA_HAVE_CONFIG_HW_PERFMON */
nvdla_free_window_size_memory(pdev);
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
err_alloc_window_size_mem:
nvdla_free_utilization_rate_memory(pdev);
err_alloc_utilization_rate_mem:
nvdla_free_cmd_memory(pdev);
err_alloc_cmd_mem:
nvhost_syncpt_unit_interface_deinit(pdev);
nvdla_sync_device_destroy(nvdla_dev->sync_dev);
err_mss_init:
nvdla_queue_deinit(nvdla_dev->pool);
err_queue_init:
nvhost_client_device_release(pdev);
err_client_device_init:
nvhost_module_deinit(pdev);
err_module_init:
err_get_resources:
nvdla_fw_deinit(pdev);
nvdla_module_deinit(pdev);
err_device_init:
mutex_destroy(&nvdla_dev->ping_lock);
devm_kfree(dev, nvdla_dev);
err_alloc_nvdla:
@@ -1291,7 +1217,9 @@ static int __exit nvdla_remove(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
struct tegra_soc_hwpm_ip_ops hwpm_ip_ops;
#endif /* NVDLA_HAVE_CONFIG_HW_PERFMON */
#if !IS_ENABLED(CONFIG_TEGRA_GRHOST)
int i;
@@ -1307,6 +1235,7 @@ static int __exit nvdla_remove(struct platform_device *pdev)
}
#endif
#if defined(NVDLA_HAVE_CONFIG_HW_PERFMON) && (NVDLA_HAVE_CONFIG_HW_PERFMON == 1)
nvdla_dbg_info(pdev, "hwpm ip %s unregister", pdev->name);
hwpm_ip_ops.ip_dev = (void *)pdev;
hwpm_ip_ops.ip_base_address = pdev->resource[0].start;
@@ -1314,15 +1243,15 @@ static int __exit nvdla_remove(struct platform_device *pdev)
hwpm_ip_ops.hwpm_ip_pm = NULL;
hwpm_ip_ops.hwpm_ip_reg_op = NULL;
tegra_soc_hwpm_ip_unregister(&hwpm_ip_ops);
#endif /* NVDLA_HAVE_CONFIG_HW_PERFMON */
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
nvdla_error_inj_handler_deinit(nvdla_dev);
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
nvhost_syncpt_unit_interface_deinit(pdev);
nvdla_sync_device_destroy(nvdla_dev->sync_dev);
nvdla_queue_deinit(nvdla_dev->pool);
nvhost_client_device_release(pdev);
nvhost_module_deinit(pdev);
nvdla_module_deinit(pdev);
mutex_destroy(&nvdla_dev->ping_lock);
nvdla_free_gcov_region(pdev, false);
@@ -1356,180 +1285,6 @@ static int __exit nvdla_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
static int nvdla_module_runtime_suspend(struct device *dev)
{
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
struct nvdla_device *nvdla = pdata->private_data;
int err;
if (nvhost_module_pm_ops.runtime_suspend != NULL) {
err = nvhost_module_pm_ops.runtime_suspend(dev);
if (!err && nvdla->icc_write) {
err = icc_set_bw(nvdla->icc_write, 0, 0);
if (err)
dev_warn(&nvdla->pdev->dev,
"failed to set icc_write bw: %d\n", err);
return 0;
}
return err;
}
return -EOPNOTSUPP;
}
static int nvdla_module_runtime_resume(struct device *dev)
{
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
struct nvdla_device *nvdla = pdata->private_data;
struct clk *clk = pdata->clks[0].clk;
unsigned long rate;
u32 emc_kbps;
int err;
if (nvhost_module_pm_ops.runtime_resume != NULL) {
err = nvhost_module_pm_ops.runtime_resume(dev);
if (!err && nvdla->icc_write) {
rate = clk_get_rate(clk);
emc_kbps = rate * NVDLA_AXI_DBB_BW_BPC / 1024;
err = icc_set_bw(nvdla->icc_write, kbps_to_icc(emc_kbps), 0);
if (err)
dev_warn(&nvdla->pdev->dev,
"failed to set icc_write bw: %d\n", err);
return 0;
}
return err;
}
return -EOPNOTSUPP;
}
static int nvdla_module_suspend(struct device *dev)
{
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
struct nvdla_device *nvdla_dev = pdata->private_data;
int err = 0;
if (nvhost_module_pm_ops.suspend != NULL) {
err = nvhost_module_pm_ops.suspend(dev);
if (err != 0) {
dev_err(dev, "(FAIL) NvHost suspend\n");
goto fail_nvhost_module_suspend;
}
} else {
err = pm_runtime_force_suspend(dev);
if (err != 0) {
dev_err(dev, "(FAIL) PM suspend\n");
goto fail_nvhost_module_suspend;
}
}
if (nvdla_dev->icc_write) {
err = icc_set_bw(nvdla_dev->icc_write, 0, 0);
if (err)
dev_warn(&nvdla_dev->pdev->dev,
"failed to set icc_write bw: %d\n", err);
}
/* Mark module to be in suspend state. */
nvdla_dev->is_suspended = true;
fail_nvhost_module_suspend:
return err;
}
static int nvdla_module_resume(struct device *dev)
{
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
struct nvdla_device *nvdla_dev = pdata->private_data;
int err;
/* Confirm if module is in suspend state. */
if (!nvdla_dev->is_suspended) {
dev_warn(dev, "NvDla is not in suspend state.\n");
goto fail_not_in_suspend;
}
if (nvhost_module_pm_ops.resume != NULL) {
err = nvhost_module_pm_ops.resume(dev);
if (err != 0) {
dev_err(dev, "(FAIL) NvHost resume\n");
goto fail_nvhost_module_resume;
}
} else {
err = pm_runtime_force_resume(dev);
if (err != 0) {
dev_err(dev, "(FAIL) PM resume\n");
goto fail_nvhost_module_resume;
}
}
return 0;
fail_nvhost_module_resume:
fail_not_in_suspend:
return err;
}
static int nvdla_module_prepare_suspend(struct device *dev)
{
int err = 0;
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
struct nvdla_device *nvdla_dev = pdata->private_data;
/* Confirm if module is not in suspend state. */
if (nvdla_dev->is_suspended) {
dev_warn(dev, "NvDla is already in suspend state.\n");
goto fail_already_in_suspend;
}
/* Prepare for queue pool suspension. */
err = nvdla_queue_pool_prepare_suspend(nvdla_dev->pool);
if (err != 0) {
dev_err(dev, "(FAIL) Queue suspend\n");
goto fail_nvdla_queue_pool_prepare_suspend;
}
/* NvHost prepare suspend - callback */
if (nvhost_module_pm_ops.prepare != NULL) {
err = nvhost_module_pm_ops.prepare(dev);
if (err != 0) {
dev_err(dev, "(FAIL) NvHost prepare suspend\n");
goto fail_nvhost_module_prepare_suspend;
}
} else {
/* If we took an extra reference, drop it now to prevent
* the device from automatically resuming upon system
* resume.
*/
pm_runtime_put_sync(dev);
}
return 0;
fail_nvhost_module_prepare_suspend:
fail_nvdla_queue_pool_prepare_suspend:
fail_already_in_suspend:
return err;
}
static void nvdla_module_complete_resume(struct device *dev)
{
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
struct nvdla_device *nvdla_dev = pdata->private_data;
if (nvhost_module_pm_ops.complete != NULL) {
nvhost_module_pm_ops.complete(dev);
} else {
/* Retake reference dropped above */
pm_runtime_get_noresume(dev);
}
/* Module is no longer in suspend and has resumed successfully */
nvdla_dev->is_suspended = false;
}
/**
* SC7 suspend sequence
@@ -1580,33 +1335,16 @@ static struct platform_driver nvdla_driver = {
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
module_platform_driver(nvdla_driver);
#else
static struct host1x_driver host1x_nvdla_driver = {
.driver = {
.name = "host1x-nvdla",
},
.subdevs = tegra_nvdla_of_match,
};
static int __init nvdla_init(void)
{
int err;
err = host1x_driver_register(&host1x_nvdla_driver);
if (err < 0)
return err;
err = platform_driver_register(&nvdla_driver);
if (err < 0)
host1x_driver_unregister(&host1x_nvdla_driver);
return err;
return nvdla_driver_register(&nvdla_driver);
}
module_init(nvdla_init);
static void __exit nvdla_exit(void)
{
platform_driver_unregister(&nvdla_driver);
host1x_driver_unregister(&host1x_nvdla_driver);
nvdla_driver_unregister(&nvdla_driver);
}
module_exit(nvdla_exit);
#endif

View File

@@ -17,7 +17,9 @@
#include "dla_os_interface.h"
#include "dla_t19x_fw_version.h"
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#include "port/nvdla_sync.h"
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
#include <linux/tegra-hsierrrptinj.h>
/*
@@ -38,7 +40,7 @@
#define NVDLA0_UE_HSM_ERROR_CODE 0x290BU
#define NVDLA1_UE_HSM_ERROR_CODE 0x290CU
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
/* DLA FUSE REGISTER
* Corresponds to the offset of "opt-dla-disable" - part of the
@@ -66,8 +68,9 @@
* DLA Host1x class IDs
*/
enum {
NV_DLA0_CLASS_ID = 0xF3,
NV_DLA1_CLASS_ID = 0xF4,
NV_DLA0_CLASS_ID = 0xF3,
NV_DLA1_CLASS_ID = 0xF4,
NV_DLA0_SIM_CLASS_ID = 0xF5,
};
/**
@@ -76,9 +79,11 @@ enum {
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
#define NV_DLA_TEGRA194_FW "nvhost_nvdla010.fw"
#define NV_DLA_TEGRA234_FW "nvhost_nvdla020.fw"
#define NV_DLA_TEGRA25X_FW "axi_nvdla030.fw"
#else
#define NV_DLA_TEGRA194_FW "nvidia/tegra194/nvdla.bin"
#define NV_DLA_TEGRA234_FW "nvidia/tegra234/nvdla.bin"
#define NV_DLA_TEGRA25X_FW "nvidia/tegra25x/nvdla.bin"
#endif
/**
@@ -233,19 +238,6 @@ struct nvdla_cmd_mem {
unsigned long alloc_table;
};
/**
* data structure to keep command data
*
* @method_id method id with command and other info
* @method_data method data for command
* @wait If set to true then wait for command completion
*/
struct nvdla_cmd_data {
uint32_t method_id;
uint32_t method_data;
bool wait;
};
enum nvdla_submit_mode {
NVDLA_SUBMIT_MODE_MMIO = 0,
NVDLA_SUBMIT_MODE_CHANNEL = 1
@@ -313,6 +305,7 @@ struct nvdla_device {
#endif
struct mutex ping_lock;
bool available;
struct nvdla_sync_device *sync_dev;
};
/**
@@ -401,7 +394,7 @@ extern const struct file_operations tegra_nvdla_ctrl_ops;
extern struct nvdla_queue_ops nvdla_queue_ops;
/**
* nvhost_nvdla_finalize_poweron() finalize power on for DLA
* nvdla_finalize_poweron() finalize power on for DLA
*
* @pdev Pointer for platform device
*
@@ -410,10 +403,10 @@ extern struct nvdla_queue_ops nvdla_queue_ops;
* This function called from nvhost ACM subsystem,
* to boot falcon and wait until falcon goes idle after initial setup
*/
int nvhost_nvdla_finalize_poweron(struct platform_device *pdev);
int nvdla_finalize_poweron(struct platform_device *pdev);
/**
* nvhost_nvdla_prepare_poweron() prepare to poweroff DLA
* nvdla_device_prepare_poweron() prepare to poweroff DLA
*
* @pdev Pointer for platform device
*
@@ -422,10 +415,10 @@ int nvhost_nvdla_finalize_poweron(struct platform_device *pdev);
* This function called from nvhost ACM subsystem,
* disables falcon interrupts and pass PM core to powergate and clockgate
*/
int nvhost_nvdla_prepare_poweroff(struct platform_device *pdev);
int nvdla_prepare_poweroff(struct platform_device *pdev);
/**
* nvhost_nvdla_flcn_isr() falcon interrupt handler
* nvdla_flcn_isr() falcon interrupt handler
*
* @pdev Pointer for platform device
*
@@ -434,21 +427,7 @@ int nvhost_nvdla_prepare_poweroff(struct platform_device *pdev);
* This function called from nvhost falcon subsystem on recieving falcon
* interrupt, like INT_ON_COMPLETE, INT_ON_ERR, DLA_DEBUG etc.
*/
int nvhost_nvdla_flcn_isr(struct platform_device *pdev);
/**
* nvdla_send_cmd() send command to DLA
*
* @pdev Pointer for platform device
* @cmd_data Pointer command data
*
* Return 0 on success otherwise negative
*
* This function used to send method to falcon embedding different supporting
* command. This uses THI registers to send method id and method data
*/
int nvdla_send_cmd(struct platform_device *pdev,
struct nvdla_cmd_data *cmd_data);
int nvdla_flcn_isr(struct platform_device *pdev);
/**
* nvdla_task_put() decrease task reference count
@@ -541,12 +520,14 @@ int nvdla_get_signal_fences(struct nvdla_queue *queue, void *in_task);
extern const struct dev_pm_ops nvdla_module_pm_ops;
#endif
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
int nvdla_error_inj_handler(unsigned int instance_id,
struct epl_error_report_frame frame,
void *data);
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
int nvdla_ping(struct platform_device *pdev, struct nvdla_ping_args *args);
#endif /* End of __NVHOST_NVDLA_H__ */

View File

@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2023, NVIDIA Corporation. All rights reserved.
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
/* SPDX-FileCopyrightText: Copyright (c) 2016-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* NVDLA debug utils
*/
@@ -11,10 +10,13 @@
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/version.h>
#include <uapi/linux/nvhost_nvdla_ioctl.h>
#include "dla_os_interface.h"
#include "nvdla.h"
#include "nvdla_debug.h"
#include "port/nvdla_fw.h"
#include "port/nvdla_device.h"
/*
* Header in ring buffer consist (start, end) two uint32_t values.
@@ -40,10 +42,10 @@ static int nvdla_fw_ver_show(struct seq_file *s, void *unused)
pdev = nvdla_dev->pdev;
/* update fw_version if engine is not yet powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err)
return err;
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
seq_printf(s, "%u.%u.%u\n",
((nvdla_dev->fw_version >> 16) & 0xff),
@@ -293,7 +295,7 @@ static int nvdla_get_stats(struct nvdla_device *nvdla_dev)
return -EFAULT;
/* pass set debug command to falcon */
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if (err != 0)
nvdla_dbg_err(pdev, "failed to send get stats command");
@@ -333,7 +335,7 @@ static int debug_dla_fw_resource_util_show(struct seq_file *s, void *data)
util_rate_mantissa = 0;
} else {
/* make sure that device is powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err != 0) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -343,13 +345,13 @@ static int debug_dla_fw_resource_util_show(struct seq_file *s, void *data)
err = nvdla_get_stats(nvdla_dev);
if (err != 0) {
nvdla_dbg_err(pdev, "Failed to send get stats command");
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
goto fail_no_dev;
}
utilization = *(unsigned int *)nvdla_dev->utilization_mem_va;
util_rate_characteristic = (utilization / 10000);
util_rate_mantissa = (utilization % 10000);
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
}
seq_printf(s, "%u.%04u\n", util_rate_characteristic, util_rate_mantissa);
@@ -376,7 +378,7 @@ static int nvdla_get_window_size(struct nvdla_device *nvdla_dev)
}
/* make sure that device is powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err != 0) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -384,14 +386,14 @@ static int nvdla_get_window_size(struct nvdla_device *nvdla_dev)
}
/* pass set debug command to falcon */
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if (err != 0) {
nvdla_dbg_err(pdev, "failed to send set window command");
goto fail_to_send_cmd;
}
fail_to_send_cmd:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_no_dev:
return err;
}
@@ -433,6 +435,13 @@ fail:
return err;
}
static int debug_dla_fw_ping_show(struct seq_file *s, void *data)
{
(void) data;
seq_puts(s, "0\n");
return 0;
}
/*
* When the user calls this debugfs node, the configurable
* window size value is passed down to the FW
@@ -455,7 +464,7 @@ static int nvdla_set_window_size(struct nvdla_device *nvdla_dev)
}
/* make sure that device is powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err != 0) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -463,14 +472,14 @@ static int nvdla_set_window_size(struct nvdla_device *nvdla_dev)
}
/* pass set debug command to falcon */
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if (err != 0) {
nvdla_dbg_err(pdev, "failed to send set window command");
goto fail_to_send_cmd;
}
fail_to_send_cmd:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_no_dev:
return err;
}
@@ -518,6 +527,61 @@ fail:
return -1;
}
static ssize_t debug_dla_fw_ping_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
int err;
struct seq_file *priv_data;
struct nvdla_device *nvdla_dev;
struct platform_device *pdev;
long write_value;
/* Fetch user requested write-value. */
err = kstrtol_from_user(buffer, count, 10, &write_value);
if (err < 0)
goto fail;
priv_data = file->private_data;
if (priv_data == NULL)
goto fail;
nvdla_dev = (struct nvdla_device *) priv_data->private;
if (nvdla_dev == NULL)
goto fail;
pdev = nvdla_dev->pdev;
if (pdev == NULL)
goto fail;
if (write_value > 0) {
struct nvdla_ping_args args = { write_value, 0 };
uint32_t golden = (write_value * 4U);
nvdla_dbg_info(pdev, "[PING] challenge: %u\n",
(unsigned int) write_value);
nvdla_dbg_info(pdev, "[PING] golden: %u\n", golden);
err = nvdla_ping(pdev, &args);
if (err < 0) {
nvdla_dbg_err(pdev, "failed to ping\n");
goto fail;
}
if (args.out_response != golden) {
nvdla_dbg_err(pdev, "[PING] response != golden (%u != %u)\n",
args.out_response, golden);
goto fail;
}
nvdla_dbg_info(pdev, "[PING] successful\n");
}
return count;
fail:
return -1;
}
static int debug_dla_enable_trace_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_dla_enable_trace_show, inode->i_private);
@@ -563,6 +627,11 @@ static int debug_dla_fw_stat_window_open(struct inode *inode, struct file *file)
return single_open(file, debug_dla_fw_stat_window_show, inode->i_private);
}
static int debug_dla_fw_ping_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_dla_fw_ping_show, inode->i_private);
}
static int debug_set_trace_event_config(struct platform_device *pdev,
u32 value, u32 sub_cmd)
{
@@ -572,7 +641,7 @@ static int debug_set_trace_event_config(struct platform_device *pdev,
struct nvdla_cmd_data cmd_data;
/* make sure that device is powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -596,7 +665,7 @@ static int debug_set_trace_event_config(struct platform_device *pdev,
cmd_data.wait = true;
/* pass set debug command to falcon */
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
/* free memory allocated for trace event command */
nvdla_put_cmd_memory(pdev, trace_events_mem_info.index);
@@ -606,12 +675,12 @@ static int debug_set_trace_event_config(struct platform_device *pdev,
goto send_cmd_failed;
}
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
return err;
send_cmd_failed:
alloc_failed:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_to_on:
return err;
}
@@ -755,7 +824,7 @@ static ssize_t debug_dla_fw_reload_set(struct file *file,
* suspend.
*/
ref_cnt = atomic_read(&pdev->dev.power.usage_count);
nvhost_module_idle_mult(pdev, ref_cnt);
nvdla_module_idle_mult(pdev, ref_cnt);
/* check and wait until module is idle (with a timeout) */
end_jiffies = jiffies + msecs_to_jiffies(2000);
@@ -769,12 +838,12 @@ static ssize_t debug_dla_fw_reload_set(struct file *file,
nvdla_dbg_info(pdev, "firmware reload requesting..\n");
err = flcn_reload_fw(pdev);
err = nvdla_fw_reload(pdev);
if (err)
return err; /* propagate firmware reload errors */
/* make sure device in clean state by reset */
nvhost_module_reset(pdev, true);
nvdla_module_reset(pdev, true);
return count;
}
@@ -865,6 +934,14 @@ static const struct file_operations debug_dla_stat_window_fops = {
.write = debug_dla_fw_stat_window_write,
};
static const struct file_operations debug_dla_ping_fops = {
.open = debug_dla_fw_ping_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = debug_dla_fw_ping_write,
};
static void dla_fw_debugfs_init(struct platform_device *pdev)
{
struct dentry *fw_dir, *fw_trace, *events, *fw_gcov;
@@ -937,6 +1014,10 @@ static void dla_fw_debugfs_init(struct platform_device *pdev)
nvdla_dev, &debug_dla_stat_window_fops))
goto trace_failed;
if (!debugfs_create_file("ping", 0600, fw_dir,
nvdla_dev, &debug_dla_ping_fops))
goto trace_failed;
return;
gcov_failed:
@@ -1056,7 +1137,7 @@ fail_create_file_suspend:
}
#endif
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
static ssize_t debug_dla_err_inj_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
@@ -1167,7 +1248,7 @@ static void nvdla_err_inj_debugfs_init(struct platform_device *pdev)
fail_create_file_err_inj:
return;
}
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
void nvdla_debug_init(struct platform_device *pdev)
{
@@ -1191,9 +1272,9 @@ void nvdla_debug_init(struct platform_device *pdev)
nvdla_dev->submit_mode = nvdla_dev->submit_mode &&
pdata->isolate_contexts;
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
#if defined(NVDLA_HAVE_CONFIG_HSIERRINJ) && (NVDLA_HAVE_CONFIG_HSIERRINJ == 1)
nvdla_err_inj_debugfs_init(pdev);
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
#endif /* NVDLA_HAVE_CONFIG_HSIERRINJ */
#ifdef CONFIG_PM
nvdla_pm_debugfs_init(pdev);

View File

@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*/
/*
/* SPDX-License-Identifier: LicenseRef-NvidiaProprietary */
/* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
@@ -297,7 +295,6 @@ static inline u32 flcn_hwcfg2_mem_scrubbing_done_v(void)
return 0x0;
}
#if (IS_ENABLED(CONFIG_TEGRA_HSIERRRPTINJ))
static inline u32 flcn_safety_erb_r(void)
{
return 0x000012ec;
@@ -312,7 +309,6 @@ static inline u32 flcn_safety_erb_data_uncorrected_err_v(void)
{
return 0xeafe1cff;
}
#endif /* CONFIG_TEGRA_HSIERRRPTINJ */
static inline u32 cbb_vic_sec_blf_write_ctl_r(void)
{

View File

@@ -22,6 +22,8 @@
#include <uapi/linux/nvhost_ioctl.h>
#include <uapi/linux/nvhost_nvdla_ioctl.h>
#include "dla_os_interface.h"
#include "port/nvdla_fw.h"
#include "port/nvdla_device.h"
/**
* struct nvdla_private per unique FD private data
@@ -47,7 +49,7 @@ static int nvdla_get_fw_ver(struct nvdla_private *priv,
nvdla_dbg_fn(pdev, "");
/* update fw_version if engine is not yet powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err)
return err;
@@ -55,7 +57,7 @@ static int nvdla_get_fw_ver(struct nvdla_private *priv,
nvdla_dbg_fn(pdev, "version returned[%u]", args->version);
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
return 0;
}
@@ -127,10 +129,10 @@ static int nvdla_get_q_status(struct nvdla_private *priv, void *args)
goto inval_queue;
}
fence.syncpoint_index = queue->syncpt_id;
fence.syncpoint_value = nvhost_syncpt_read_maxval(pdev,
queue->syncpt_id);
nvdla_dbg_info(pdev, "syncpt_id[%u] val[%u]\n", fence.syncpoint_index, fence.syncpoint_value);
fence.syncpoint_index = nvdla_sync_get_syncptid(queue->sync_context);
fence.syncpoint_value = nvdla_sync_get_max_value(queue->sync_context);
nvdla_dbg_info(pdev, "syncptid[%u] val[%u]\n",
fence.syncpoint_index, fence.syncpoint_value);
if (copy_to_user(usr_fence, &fence, sizeof(struct nvdev_fence))) {
err = -EFAULT;
@@ -243,8 +245,7 @@ fail_to_get_val_arg:
return err;
}
static int nvdla_ping(struct platform_device *pdev,
struct nvdla_ping_args *args)
int nvdla_ping(struct platform_device *pdev, struct nvdla_ping_args *args)
{
struct nvdla_cmd_mem_info ping_cmd_mem_info;
struct nvdla_cmd_data cmd_data;
@@ -260,7 +261,7 @@ static int nvdla_ping(struct platform_device *pdev,
}
/* make sure that device is powered on */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err) {
nvdla_dbg_err(pdev, "failed to power on\n");
err = -ENODEV;
@@ -294,7 +295,7 @@ static int nvdla_ping(struct platform_device *pdev,
cmd_data.wait = true;
/* send ping cmd */
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if (err) {
nvdla_dbg_err(pdev, "failed to send ping command");
goto fail_cmd;
@@ -315,7 +316,7 @@ fail_cmd:
fail_to_alloc:
mutex_unlock(&nvdla_dev->ping_lock);
fail_to_get_nvdla_dev:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_to_on:
fail_to_get_val_arg:
return err;
@@ -407,13 +408,10 @@ static int nvdla_send_emu_signal_fences(struct nvdla_emu_task *task,
{
int err = 0, i;
struct platform_device *dla_pdev = task->queue->pool->pdev;
struct platform_device *host_pdev =
to_platform_device(dla_pdev->dev.parent);
struct nvdev_fence __user *prefences =
(struct nvdev_fence __user *)(uintptr_t)user_task->prefences;
struct nvdev_fence __user *postfences =
(struct nvdev_fence __user *)(uintptr_t)user_task->postfences;
char fence_name[32];
nvdla_dbg_fn(dla_pdev, "sending signal fences");
@@ -422,7 +420,11 @@ static int nvdla_send_emu_signal_fences(struct nvdla_emu_task *task,
continue;
if (task->prefences[i].type == NVDEV_FENCE_TYPE_SYNC_FD) {
#if defined(NVDLA_HAVE_CONFIG_SYNCPTFD) && (NVDLA_HAVE_CONFIG_SYNCPTFD == 1)
char fence_name[32];
struct nvhost_ctrl_sync_fence_info info;
struct platform_device *host_pdev =
to_platform_device(dla_pdev->dev.parent);
info.id = task->prefences[i].syncpoint_index;
info.thresh = task->prefences[i].syncpoint_value;
@@ -440,10 +442,12 @@ static int nvdla_send_emu_signal_fences(struct nvdla_emu_task *task,
"encoding error: %d\n", err);
goto fail;
}
err = nvhost_fence_create_fd(host_pdev,
&info, 1, fence_name,
&task->prefences[i].sync_fd);
#else
err = -EOPNOTSUPP;
#endif /* NVDLA_HAVE_CONFIG_SYNCPTFD */
if (err) {
nvdla_dbg_err(dla_pdev,
@@ -469,7 +473,11 @@ static int nvdla_send_emu_signal_fences(struct nvdla_emu_task *task,
continue;
if (task->postfences[i].type == NVDEV_FENCE_TYPE_SYNC_FD) {
#if defined(NVDLA_HAVE_CONFIG_SYNCPTFD) && (NVDLA_HAVE_CONFIG_SYNCPTFD == 1)
char fence_name[32];
struct nvhost_ctrl_sync_fence_info info;
struct platform_device *host_pdev =
to_platform_device(dla_pdev->dev.parent);
info.id = task->postfences[i].syncpoint_index;
info.thresh = task->postfences[i].syncpoint_value;
@@ -491,6 +499,9 @@ static int nvdla_send_emu_signal_fences(struct nvdla_emu_task *task,
err = nvhost_fence_create_fd(host_pdev,
&info, 1, fence_name,
&task->postfences[i].sync_fd);
#else
err = -EOPNOTSUPP;
#endif /* NVDLA_HAVE_CONFIG_SYNCPTFD */
if (err) {
nvdla_dbg_err(dla_pdev,
@@ -520,13 +531,10 @@ static int nvdla_update_signal_fences(struct nvdla_task *task,
{
int err = 0, i;
struct platform_device *dla_pdev = task->queue->pool->pdev;
struct platform_device *host_pdev =
to_platform_device(dla_pdev->dev.parent);
struct nvdev_fence __user *prefences =
(struct nvdev_fence __user *)(uintptr_t)user_task->prefences;
struct nvdev_fence __user *postfences =
(struct nvdev_fence __user *)(uintptr_t)user_task->postfences;
char fence_name[32];
nvdla_dbg_fn(dla_pdev, "copy fences for user");
@@ -536,7 +544,11 @@ static int nvdla_update_signal_fences(struct nvdla_task *task,
continue;
if (task->prefences[i].type == NVDEV_FENCE_TYPE_SYNC_FD) {
#if defined(NVDLA_HAVE_CONFIG_SYNCPTFD) && (NVDLA_HAVE_CONFIG_SYNCPTFD == 1)
char fence_name[32];
struct nvhost_ctrl_sync_fence_info info;
struct platform_device *host_pdev =
to_platform_device(dla_pdev->dev.parent);
info.id = task->prefences[i].syncpoint_index;
info.thresh = task->prefences[i].syncpoint_value;
@@ -558,6 +570,9 @@ static int nvdla_update_signal_fences(struct nvdla_task *task,
err = nvhost_fence_create_fd(host_pdev,
&info, 1, fence_name,
&task->prefences[i].sync_fd);
#else
err = -EOPNOTSUPP;
#endif /* NVDLA_HAVE_CONFIG_SYNCPTFD */
if (err) {
nvdla_dbg_err(dla_pdev,
@@ -583,7 +598,11 @@ static int nvdla_update_signal_fences(struct nvdla_task *task,
continue;
if (task->postfences[i].type == NVDEV_FENCE_TYPE_SYNC_FD) {
#if defined(NVDLA_HAVE_CONFIG_SYNCPTFD) && (NVDLA_HAVE_CONFIG_SYNCPTFD == 1)
char fence_name[32];
struct nvhost_ctrl_sync_fence_info info;
struct platform_device *host_pdev =
to_platform_device(dla_pdev->dev.parent);
info.id = task->postfences[i].syncpoint_index;
info.thresh = task->postfences[i].syncpoint_value;
@@ -605,6 +624,9 @@ static int nvdla_update_signal_fences(struct nvdla_task *task,
err = nvhost_fence_create_fd(host_pdev,
&info, 1, fence_name,
&task->postfences[i].sync_fd);
#else
err = -EOPNOTSUPP;
#endif /* NVDLA_HAVE_CONFIG_SYNCPTFD */
if (err) {
nvdla_dbg_err(dla_pdev,
@@ -1265,14 +1287,15 @@ static int nvdla_open(struct inode *inode, struct file *file)
nvdla_dbg_fn(pdev, "priv:%p", priv);
/* add priv to client list */
err = nvhost_module_add_client(pdev, priv);
err = nvdla_module_client_register(pdev, priv);
if (err < 0)
goto err_add_client;
goto err_client_register;
/* set rate for EMC to max
* on device release ACM sets to default rate
*/
* on device release ACM sets to default rate
*/
for (index = 0; index < NVHOST_MODULE_MAX_CLOCKS; index++) {
#if !defined(NVDLA_HAVE_CONFIG_AXI) || (NVDLA_HAVE_CONFIG_AXI == 0)
struct nvhost_clock *clock = &pdata->clocks[index];
if (clock->moduleid ==
@@ -1283,6 +1306,7 @@ static int nvdla_open(struct inode *inode, struct file *file)
goto err_set_emc_rate;
break;
}
#endif /* not NVDLA_HAVE_CONFIG_AXI */
}
/* Zero out explicitly */
@@ -1302,9 +1326,11 @@ static int nvdla_open(struct inode *inode, struct file *file)
err_alloc_buffer:
kfree(priv->buffers);
#if !defined(NVDLA_HAVE_CONFIG_AXI) || (NVDLA_HAVE_CONFIG_AXI == 0)
err_set_emc_rate:
nvhost_module_remove_client(pdev, priv);
err_add_client:
#endif /* not NVDLA_HAVE_CONFIG_AXI */
nvdla_module_client_unregister(pdev, priv);
err_client_register:
kfree(priv);
err_alloc_priv:
return err;

View File

@@ -16,6 +16,8 @@
#include <uapi/linux/nvhost_ioctl.h>
#include "nvdla.h"
#include "port/nvdla_fw.h"
#include "port/nvdla_device.h"
#include "dla_channel.h"
#include "dla_queue.h"
#include "nvdla_debug.h"
@@ -282,8 +284,9 @@ static void nvdla_task_free_locked(struct nvdla_task *task)
struct platform_device *pdev = queue->pool->pdev;
nvdla_dbg_info(pdev,
"task[%p] completed. syncpt[%d] fence[%d]",
task, queue->syncpt_id, task->fence);
"task[%p] completed. Qsync[%p] fence[%d]",
task, queue->sync_context, task->fence);
nvdla_sync_print(queue->sync_context);
/* unmap all memory shared with engine */
nvdla_unmap_task_memory(task);
@@ -360,7 +363,6 @@ static inline size_t nvdla_profile_status_offset(struct nvdla_task *task)
static void nvdla_queue_task_cleanup(struct nvdla_queue *queue,
uint32_t max_dla_cleanup_depth)
{
int task_complete;
struct nvdla_task *task, *safe;
struct platform_device *pdev = queue->pool->pdev;
struct nvhost_notification *tsp_notifier;
@@ -376,19 +378,21 @@ static void nvdla_queue_task_cleanup(struct nvdla_queue *queue,
/* check which task(s) finished */
list_for_each_entry_safe(task, safe, &queue->tasklist, list) {
int32_t task_complete_status;
if (dla_cleanup_depth >= max_dla_cleanup_depth)
break;
task_id = nvdla_compute_task_id(task->task_desc->sequence,
task->task_desc->queue_id);
task_complete = nvhost_syncpt_is_expired_ext(pdev,
queue->syncpt_id, task->fence);
task_complete_status =
nvdla_sync_wait(queue->sync_context, task->fence, 0ULL);
/* clean task and remove from list */
if (task_complete) {
nvdla_dbg_fn(pdev, "task with syncpt[%d] val[%d] done",
queue->syncpt_id, task->fence);
if (task_complete_status == 0) {
nvdla_dbg_info(pdev, "task with Qsync[%p] val[%d] done",
queue->sync_context, task->fence);
nvdla_sync_print(queue->sync_context);
tsp_notifier = (struct nvhost_notification *)
((uint8_t *)task->task_desc +
@@ -419,7 +423,7 @@ static void nvdla_queue_task_cleanup(struct nvdla_queue *queue,
}
/* put pm refcount */
nvhost_module_idle_mult(pdev, dla_cleanup_depth);
nvdla_module_idle_mult(pdev, dla_cleanup_depth);
mutex_unlock(&queue->list_lock);
}
@@ -519,6 +523,7 @@ static u8 *add_timestamp_action(u8 *mem, uint8_t op, uint64_t addr)
return mem + sizeof(struct dla_action_timestamp);
}
#if defined(NVDLA_HAVE_CONFIG_SYNCPTFD) && (NVDLA_HAVE_CONFIG_SYNCPTFD == 1)
static int nvdla_add_fence_action_cb(struct nvhost_ctrl_sync_fence_info info, void *data)
{
u32 id, thresh;
@@ -526,18 +531,20 @@ static int nvdla_add_fence_action_cb(struct nvhost_ctrl_sync_fence_info info, vo
struct nvdla_queue *queue = args->queue;
u8 **next = args->mem;
struct platform_device *pdev = queue->pool->pdev;
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
dma_addr_t syncpt_addr;
id = info.id;
thresh = info.thresh;
if (!id || !nvhost_syncpt_is_valid_pt_ext(pdev, id)) {
if (!id) {
nvdla_dbg_err(pdev, "Invalid sync_fd");
return -EINVAL;
}
syncpt_addr = nvhost_syncpt_address(
queue->vm_pdev, id);
syncpt_addr = nvdla_sync_get_address_by_syncptid(
nvdla_dev->sync_dev, id);
nvdla_dbg_info(pdev, "syncfd_pt:[%u]"
"mss_dma_addr[%pad]",
id, &syncpt_addr);
@@ -546,6 +553,7 @@ static int nvdla_add_fence_action_cb(struct nvhost_ctrl_sync_fence_info info, vo
return 0;
}
#endif /* NVDLA_HAVE_CONFIG_SYNCPTFD */
static int nvdla_map_task_memory(struct nvdla_task *task)
{
@@ -627,6 +635,7 @@ static int nvdla_fill_wait_fence_action(struct nvdla_task *task,
switch(fence->type) {
case NVDEV_FENCE_TYPE_SYNC_FD: {
#if defined(NVDLA_HAVE_CONFIG_SYNCPTFD) && (NVDLA_HAVE_CONFIG_SYNCPTFD == 1)
struct nvhost_fence *f;
struct nvdla_add_fence_action_cb_args args;
@@ -642,18 +651,23 @@ static int nvdla_fill_wait_fence_action(struct nvdla_task *task,
if (err != 0) {
nvhost_fence_put(f);
}
#else
err = -EOPNOTSUPP;
#endif /* NVDLA_HAVE_CONFIG_SYNCPTFD */
break;
}
case NVDEV_FENCE_TYPE_SYNCPT: {
dma_addr_t syncpt_addr;
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvdla_device *nvdla_dev = pdata->private_data;
nvdla_dbg_info(pdev, "id[%d] val[%d]",
fence->syncpoint_index,
fence->syncpoint_value);
syncpt_addr = nvhost_syncpt_address(
queue->vm_pdev, fence->syncpoint_index);
syncpt_addr = nvdla_sync_get_address_by_syncptid(
nvdla_dev->sync_dev, fence->syncpoint_index);
nvdla_dbg_info(pdev, "syncpt:[%u] dma_addr[%pad]",
fence->syncpoint_index, &syncpt_addr);
@@ -713,15 +727,15 @@ static int nvdla_fill_signal_fence_action(struct nvdla_task *task,
dma_addr_t syncpt_addr;
/* For postaction also update MSS addr */
syncpt_addr = nvhost_syncpt_address(queue->vm_pdev,
queue->syncpt_id);
syncpt_addr = nvdla_sync_get_address(queue->sync_context);
next = add_fence_action(next, ACTION_WRITE_SEM,
syncpt_addr, 1);
task->fence_counter = task->fence_counter + 1;
nvdla_dbg_info(pdev, "syncpt:[%u] mss:[%pad]",
queue->syncpt_id, &syncpt_addr);
nvdla_dbg_info(pdev, "Qsync:[%p] mss:[%pad]",
queue->sync_context, &syncpt_addr);
nvdla_sync_print(queue->sync_context);
break;
}
case NVDEV_FENCE_TYPE_SEMAPHORE: {
@@ -1220,12 +1234,13 @@ int nvdla_emulator_submit(struct nvdla_queue *queue, struct nvdla_emu_task *task
}
/* get fence from nvhost */
task->fence = nvhost_syncpt_incr_max_ext(pdev, queue->syncpt_id,
task->fence_counter);
task->fence = nvdla_sync_increment_max_value(queue->sync_context,
task->fence_counter);
nvdla_dbg_fn(pdev, "syncpt[%d] fence[%d] task[%p] fence_counter[%u]",
queue->syncpt_id, task->fence,
nvdla_dbg_info(pdev, "Qsync[%p] fence[%d] task[%p] fence_counter[%u]",
queue->sync_context, task->fence,
task, task->fence_counter);
nvdla_sync_print(queue->sync_context);
/* Update signal fences for all */
counter = task->fence_counter - 1;
@@ -1235,8 +1250,10 @@ int nvdla_emulator_submit(struct nvdla_queue *queue, struct nvdla_emu_task *task
if ((task->prefences[i].type == NVDEV_FENCE_TYPE_SYNCPT) ||
(task->prefences[i].type == NVDEV_FENCE_TYPE_SYNC_FD)) {
task->prefences[i].syncpoint_index =
queue->syncpt_id;
uint32_t syncptid;
syncptid = nvdla_sync_get_syncptid(queue->sync_context);
task->prefences[i].syncpoint_index = syncptid;
task->prefences[i].syncpoint_value =
task->fence - counter;
@@ -1254,8 +1271,10 @@ int nvdla_emulator_submit(struct nvdla_queue *queue, struct nvdla_emu_task *task
if ((task->postfences[i].type == NVDEV_FENCE_TYPE_SYNCPT) ||
(task->postfences[i].type == NVDEV_FENCE_TYPE_SYNC_FD)) {
task->postfences[i].syncpoint_index =
queue->syncpt_id;
uint32_t syncptid;
syncptid = nvdla_sync_get_syncptid(queue->sync_context);
task->postfences[i].syncpoint_index = syncptid;
task->postfences[i].syncpoint_value =
task->fence - counter;
@@ -1283,7 +1302,7 @@ int nvdla_get_signal_fences(struct nvdla_queue *queue, void *in_task)
if (task->fence_counter == 0)
task->fence_counter = 1;
task_fence = nvhost_syncpt_read_maxval(pdev, queue->syncpt_id) +
task_fence = nvdla_sync_get_max_value(queue->sync_context) +
task->fence_counter;
/* Update fences signal updates for both prefence and postfence */
@@ -1294,8 +1313,10 @@ int nvdla_get_signal_fences(struct nvdla_queue *queue, void *in_task)
if ((task->prefences[i].type == NVDEV_FENCE_TYPE_SYNCPT) ||
(task->prefences[i].type == NVDEV_FENCE_TYPE_SYNC_FD)) {
task->prefences[i].syncpoint_index =
queue->syncpt_id;
uint32_t syncptid;
syncptid = nvdla_sync_get_syncptid(queue->sync_context);
task->prefences[i].syncpoint_index = syncptid;
task->prefences[i].syncpoint_value =
task_fence - counter;
@@ -1313,8 +1334,10 @@ int nvdla_get_signal_fences(struct nvdla_queue *queue, void *in_task)
if ((task->postfences[i].type == NVDEV_FENCE_TYPE_SYNCPT) ||
(task->postfences[i].type == NVDEV_FENCE_TYPE_SYNC_FD)) {
task->postfences[i].syncpoint_index =
queue->syncpt_id;
uint32_t syncptid;
syncptid = nvdla_sync_get_syncptid(queue->sync_context);
task->postfences[i].syncpoint_index = syncptid;
task->postfences[i].syncpoint_value =
task_fence - counter;
@@ -1375,7 +1398,7 @@ static int nvdla_queue_submit_op(struct nvdla_queue *queue, void *in_task)
timestamp = arch_timer_read_counter();
/* get pm refcount */
if (nvhost_module_busy(pdev))
if (nvdla_module_busy(pdev))
goto fail_to_poweron;
/* prepare command for submit */
@@ -1392,18 +1415,19 @@ static int nvdla_queue_submit_op(struct nvdla_queue *queue, void *in_task)
}
if (likely(nvdla_dev->submit_mode == NVDLA_SUBMIT_MODE_MMIO)) {
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if (err) {
nvdla_dbg_err(pdev, "task[%p] submit failed", task);
goto fail_to_submit;
}
task->fence = nvhost_syncpt_incr_max_ext(pdev,
queue->syncpt_id,
task->fence_counter);
nvdla_dbg_fn(pdev, "syncpt[%d] fence[%d] task[%p] fence_counter[%u]",
queue->syncpt_id, task->fence,
task->fence = nvdla_sync_increment_max_value(
queue->sync_context,
task->fence_counter);
nvdla_dbg_info(pdev, "Qsync[%p] fence[%d] task[%p] fence_counter[%u]",
queue->sync_context, task->fence,
task, task->fence_counter);
nvdla_sync_print(queue->sync_context);
}
if (IS_ENABLED(CONFIG_TRACING)) {
@@ -1440,7 +1464,7 @@ static int nvdla_queue_submit_op(struct nvdla_queue *queue, void *in_task)
return 0;
fail_to_submit:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_to_poweron:
mutex_lock(&queue->list_lock);
if (last_task != NULL)
@@ -1468,7 +1492,7 @@ int nvdla_set_queue_state(struct nvdla_queue *queue, int cmd)
}
/* get pm refcount */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err) {
nvdla_dbg_err(pdev, "failed to poweron, err: %d", err);
goto fail_to_poweron;
@@ -1479,14 +1503,14 @@ int nvdla_set_queue_state(struct nvdla_queue *queue, int cmd)
cmd_data.method_data = queue->id;
cmd_data.wait = true;
err = nvdla_send_cmd(pdev, &cmd_data);
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if (err) {
nvdla_dbg_err(pdev, "failed to suspend queue %d", err);
goto fail_to_suspend;
}
fail_to_suspend:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
fail_to_poweron:
return err;
}
@@ -1508,7 +1532,7 @@ static int nvdla_queue_abort_op(struct nvdla_queue *queue)
goto done;
/* get pm refcount */
err = nvhost_module_busy(pdev);
err = nvdla_module_busy(pdev);
if (err) {
nvdla_dbg_err(pdev, "failed to poweron, err: %d", err);
goto done;
@@ -1521,8 +1545,8 @@ static int nvdla_queue_abort_op(struct nvdla_queue *queue)
/* flush engine side queues */
do {
err = nvdla_send_cmd(pdev, &cmd_data);
if (err == DLA_ERR_PROCESSOR_BUSY)
err = nvdla_fw_send_cmd(pdev, &cmd_data);
if ((err == DLA_ERR_PROCESSOR_BUSY) || (err == -EAGAIN))
mdelay(NVDLA_QUEUE_ABORT_RETRY_PERIOD);
else
break;
@@ -1538,17 +1562,18 @@ static int nvdla_queue_abort_op(struct nvdla_queue *queue)
nvdla_dbg_info(pdev, "Engine Q[%d] flush done", queue->id);
/* reset syncpoint to release all tasks */
fence = nvhost_syncpt_read_maxval(pdev, queue->syncpt_id);
nvhost_syncpt_set_min_update(pdev, queue->syncpt_id, fence);
fence = nvdla_sync_get_max_value(queue->sync_context);
(void) nvdla_sync_signal(queue->sync_context, fence);
/* dump details */
nvdla_dbg_info(pdev, "Q id %d reset syncpt[%d] done",
queue->id, queue->syncpt_id);
nvdla_dbg_info(pdev, "Q id %d reset sync[%p] done",
queue->id, queue->sync_context);
nvdla_sync_print(queue->sync_context);
nvdla_queue_cleanup_op(queue);
poweroff:
nvhost_module_idle(pdev);
nvdla_module_idle(pdev);
done:
return err;
}

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA Corporation. All rights reserved.
/* SPDX-License-Identifier: LicenseRef-NvidiaProprietary */
/* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Device data for T194
*/
#ifndef __NVHOST_NVDLA_T194_H__
@@ -26,9 +27,9 @@ static struct nvhost_device_data t19_nvdla0_info = {
TEGRA_SET_EMC_FLOOR}
},
.resource_policy = RESOURCE_PER_CHANNEL_INSTANCE,
.finalize_poweron = nvhost_nvdla_finalize_poweron,
.prepare_poweroff = nvhost_nvdla_prepare_poweroff,
.flcn_isr = nvhost_nvdla_flcn_isr,
.finalize_poweron = nvdla_finalize_poweron,
.prepare_poweroff = nvdla_prepare_poweroff,
.flcn_isr = nvdla_flcn_isr,
.self_config_flcn_isr = true,
.vm_regs = {{0x30, true}, {0x34, false} },
.firmware_name = NV_DLA_TEGRA194_FW,
@@ -38,7 +39,11 @@ static struct nvhost_device_data t19_nvdla0_info = {
.poweron_reset = true,
.serialize = true,
.ctrl_ops = &tegra_nvdla_ctrl_ops,
#if defined(NVDLA_HAVE_CONFIG_AXI) && (NVDLA_HAVE_CONFIG_AXI == 1)
.get_reloc_phys_addr = NULL,
#else
.get_reloc_phys_addr = nvhost_t194_get_reloc_phys_addr,
#endif
.module_irq = 1,
.engine_cg_regs = nvdla_gating_registers,
.engine_can_cg = true,
@@ -60,9 +65,9 @@ static struct nvhost_device_data t19_nvdla1_info = {
TEGRA_SET_EMC_FLOOR}
},
.resource_policy = RESOURCE_PER_CHANNEL_INSTANCE,
.finalize_poweron = nvhost_nvdla_finalize_poweron,
.prepare_poweroff = nvhost_nvdla_prepare_poweroff,
.flcn_isr = nvhost_nvdla_flcn_isr,
.finalize_poweron = nvdla_finalize_poweron,
.prepare_poweroff = nvdla_prepare_poweroff,
.flcn_isr = nvdla_flcn_isr,
.self_config_flcn_isr = true,
.vm_regs = {{0x30, true}, {0x34, false} },
.firmware_name = NV_DLA_TEGRA194_FW,
@@ -72,7 +77,11 @@ static struct nvhost_device_data t19_nvdla1_info = {
.poweron_reset = true,
.serialize = true,
.ctrl_ops = &tegra_nvdla_ctrl_ops,
#if defined(NVDLA_HAVE_CONFIG_AXI) && (NVDLA_HAVE_CONFIG_AXI == 1)
.get_reloc_phys_addr = NULL,
#else
.get_reloc_phys_addr = nvhost_t194_get_reloc_phys_addr,
#endif
.module_irq = 1,
.engine_cg_regs = nvdla_gating_registers,
.engine_can_cg = true,

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA Corporation. All rights reserved.
/* SPDX-License-Identifier: LicenseRef-NvidiaProprietary */
/* SPDX-FileCopyrightText: Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Device data for T234
*/
#ifndef __NVHOST_NVDLA_T234_H__
@@ -21,9 +22,9 @@ static struct nvhost_device_data t23x_nvdla0_info = {
{"nvdla0_flcn", UINT_MAX}
},
.resource_policy = RESOURCE_PER_CHANNEL_INSTANCE,
.finalize_poweron = nvhost_nvdla_finalize_poweron,
.prepare_poweroff = nvhost_nvdla_prepare_poweroff,
.flcn_isr = nvhost_nvdla_flcn_isr,
.finalize_poweron = nvdla_finalize_poweron,
.prepare_poweroff = nvdla_prepare_poweroff,
.flcn_isr = nvdla_flcn_isr,
.self_config_flcn_isr = true,
.vm_regs = {{0x30, true}, {0x34, false} },
.firmware_name = NV_DLA_TEGRA234_FW,
@@ -33,7 +34,11 @@ static struct nvhost_device_data t23x_nvdla0_info = {
.poweron_reset = true,
.serialize = true,
.ctrl_ops = &tegra_nvdla_ctrl_ops,
#if defined(NVDLA_HAVE_CONFIG_AXI) && (NVDLA_HAVE_CONFIG_AXI == 1)
.get_reloc_phys_addr = NULL,
#else
.get_reloc_phys_addr = nvhost_t23x_get_reloc_phys_addr,
#endif
.module_irq = 1,
.engine_cg_regs = nvdla_gating_registers,
.engine_can_cg = true,
@@ -52,9 +57,9 @@ static struct nvhost_device_data t23x_nvdla1_info = {
{"nvdla1_flcn", UINT_MAX}
},
.resource_policy = RESOURCE_PER_CHANNEL_INSTANCE,
.finalize_poweron = nvhost_nvdla_finalize_poweron,
.prepare_poweroff = nvhost_nvdla_prepare_poweroff,
.flcn_isr = nvhost_nvdla_flcn_isr,
.finalize_poweron = nvdla_finalize_poweron,
.prepare_poweroff = nvdla_prepare_poweroff,
.flcn_isr = nvdla_flcn_isr,
.self_config_flcn_isr = true,
.vm_regs = {{0x30, true}, {0x34, false} },
.firmware_name = NV_DLA_TEGRA234_FW,
@@ -64,7 +69,11 @@ static struct nvhost_device_data t23x_nvdla1_info = {
.poweron_reset = true,
.serialize = true,
.ctrl_ops = &tegra_nvdla_ctrl_ops,
#if defined(NVDLA_HAVE_CONFIG_AXI) && (NVDLA_HAVE_CONFIG_AXI == 1)
.get_reloc_phys_addr = NULL,
#else
.get_reloc_phys_addr = nvhost_t23x_get_reloc_phys_addr,
#endif
.module_irq = 1,
.engine_cg_regs = nvdla_gating_registers,
.engine_can_cg = true,