mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
drivers: pva: fix recovery
When pva recovery is initiated due to command or task submit timeout or initiated by pva-fw, queues are cleared and tasks returned with error among other cleanup activities and pva reset and fw reboot. There could be multiple concurrent attempts to recover the PVA engine. Additionally, task and command submit may be at varying stages of execution. - Skip recovery requests while recovery work is pending. - Skip task removal in case of timeout or invalidated task during task submit. - Skip task submit to CCQ if task was removed during abort. - Guard against concurrency during recovery - Re-attempt pva reboot on fail during boot except in recovery. - Reset driver PM state if module busy fails and PM error is set. - Set default FW trace mask to WARN+ERROR+BOOT - Set default driver log mask to FW TRACING - ccq polling routine exits with timeout if abort is active Bug 4944591 Change-Id: Id3a7388700ccada135b568c978176bb9f2c5f8a0 Signed-off-by: omar <onemri@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3284303 GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: Amruta Sai Anusha Bhamidipati <abhamidipati@nvidia.com>
This commit is contained in:
@@ -71,6 +71,8 @@
|
||||
#endif
|
||||
|
||||
#include "pva-debug-buffer.h"
|
||||
#include "pva_trace.h"
|
||||
|
||||
/*
|
||||
* NO IOMMU set 0x60000000 as start address.
|
||||
* With IOMMU set 0x80000000(>2GB) as startaddress
|
||||
@@ -250,6 +252,10 @@ static u32 evp_reg_val[EVP_REG_NUM] = {
|
||||
EVP_FIQ_VECTOR
|
||||
};
|
||||
|
||||
static int pva_cleanup_after_boot_fail(struct platform_device *pdev);
|
||||
static int pva_prepare_poweroff_core(struct platform_device *pdev,
|
||||
bool hold_reset);
|
||||
|
||||
/**
|
||||
* Allocate and set a circular array for FW to provide status info about
|
||||
* completed tasks from all the PVA R5 queues.
|
||||
@@ -310,7 +316,7 @@ static void pva_free_task_status_buffer(struct pva *pva)
|
||||
pva->priv_circular_array.pa);
|
||||
}
|
||||
|
||||
void pva_fw_log_dump(struct pva *pva)
|
||||
void pva_fw_log_dump(struct pva *pva, bool hold_mutex)
|
||||
{
|
||||
uint32_t tail;
|
||||
char *content;
|
||||
@@ -321,11 +327,16 @@ void pva_fw_log_dump(struct pva *pva)
|
||||
mutex_lock(&pva->pva_fw_log_mutex);
|
||||
|
||||
fw_log_buffer = (struct pva_kmd_fw_print_buffer *) pva->fw_info.priv2_buffer.va;
|
||||
if(!fw_log_buffer)
|
||||
goto exit;
|
||||
|
||||
tail = fw_log_buffer->tail;
|
||||
content = ((char *)fw_log_buffer) + sizeof(struct pva_kmd_fw_print_buffer);
|
||||
|
||||
//fault_if(tail, GREATER, fw_log_buffer->size, uint32_t, "Firmware print tail is out of bounds\n");
|
||||
|
||||
if(tail > fw_log_buffer->size) {
|
||||
nvpva_err(&pva->pdev->dev, "Firmware print tail is out of bounds");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
while (fw_log_buffer->head != tail) {
|
||||
const char *str = content + fw_log_buffer->head;
|
||||
@@ -361,14 +372,16 @@ void pva_fw_log_dump(struct pva *pva)
|
||||
if ((fw_log_buffer->flags & PVA_FW_PRINT_BUFFER_FULL_LOG_DROPPED) != 0U)
|
||||
nvpva_err(&pva->pdev->dev, "Firmware print log dropped!");
|
||||
|
||||
mutex_unlock(&pva->pva_fw_log_mutex);
|
||||
exit:
|
||||
if(!hold_mutex)
|
||||
mutex_unlock(&pva->pva_fw_log_mutex);
|
||||
}
|
||||
|
||||
static void pva_fw_log_dump_handler(struct work_struct *work)
|
||||
{
|
||||
struct pva *pva = container_of(work, struct pva, pva_fw_log_work);
|
||||
|
||||
pva_fw_log_dump(pva);
|
||||
pva_fw_log_dump(pva, false);
|
||||
}
|
||||
|
||||
static void pva_fw_log_dump_init(struct pva *pva)
|
||||
@@ -484,6 +497,11 @@ static int pva_init_fw(struct platform_device *pdev)
|
||||
sema_value |= PVA_VMEM_RD_WAR_DISABLE;
|
||||
|
||||
sema_value |= (PVA_BOOT_INT | PVA_TEST_WAIT | PVA_VMEM_MBX_WAR_ENABLE);
|
||||
pva->boot_count += 1;
|
||||
|
||||
nvpva_dbg_fn(pva, "boot count = %d", pva->boot_count);
|
||||
|
||||
host1x_writel(pdev, hsp_ss0_clr_r(), 0xFFFFFFFF);
|
||||
host1x_writel(pdev, hsp_ss0_set_r(), sema_value);
|
||||
|
||||
if (pva->version == PVA_HW_GEN1) {
|
||||
@@ -546,23 +564,22 @@ static int pva_init_fw(struct platform_device *pdev)
|
||||
#ifdef CONFIG_PVA_INTERRUPT_DISABLED
|
||||
err = pva_poll_mailbox_isr(pva, 600000);
|
||||
#else
|
||||
err = pva_mailbox_wait_event(pva, 60000);
|
||||
err = pva_mailbox_wait_event(pva, 60000, false);
|
||||
#endif
|
||||
nvpva_dbg_fn(pva, "PVA boot returned: %d", err);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mbox timedout boot sema=%x\n",
|
||||
(host1x_readl(pdev, hsp_ss0_state_r())));
|
||||
goto wait_timeout;
|
||||
}
|
||||
|
||||
pva->cmd_status[PVA_MAILBOX_INDEX] = PVA_CMD_STATUS_INVALID;
|
||||
|
||||
nvpva_dbg_fn(pva, "PVA boot returned: %d", err);
|
||||
|
||||
pva_reset_task_status_buffer(pva);
|
||||
(void)memset(pva->priv_circular_array.va, 0,
|
||||
pva->priv_circular_array.size);
|
||||
wait_timeout:
|
||||
out:
|
||||
pva->cmd_status[PVA_MAILBOX_INDEX] = PVA_CMD_STATUS_INVALID;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -583,10 +600,10 @@ static int pva_free_fw(struct platform_device *pdev, struct pva *pva)
|
||||
}
|
||||
|
||||
pva->co->base_pa = 0;
|
||||
pva->co->base_va = 0;
|
||||
}
|
||||
|
||||
pva->priv1_dma.pa = 0;
|
||||
pva->priv1_dma.va = 0;
|
||||
if (pva->priv2_dma.va) {
|
||||
dma_free_coherent(&pva->aux_pdev->dev, pva->priv2_dma.size,
|
||||
pva->priv2_dma.va, pva->priv2_dma.pa);
|
||||
@@ -595,6 +612,8 @@ static int pva_free_fw(struct platform_device *pdev, struct pva *pva)
|
||||
}
|
||||
|
||||
memset(fw_info, 0, sizeof(struct pva_fw));
|
||||
pva->fw_debug_log.addr = NULL;
|
||||
pva->pva_trace.addr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -828,9 +847,9 @@ int pva_set_log_level(struct pva *pva, u32 log_level, bool mailbox_locked)
|
||||
nregs = pva_cmd_set_logging_level(&cmd, log_level, flags);
|
||||
|
||||
if (mailbox_locked)
|
||||
pva_mailbox_send_cmd_sync_locked(pva, &cmd, nregs, &status);
|
||||
err = pva_mailbox_send_cmd_sync_locked(pva, &cmd, nregs, &status);
|
||||
else
|
||||
pva_mailbox_send_cmd_sync(pva, &cmd, nregs, &status);
|
||||
err = pva_mailbox_send_cmd_sync(pva, &cmd, nregs, &status);
|
||||
|
||||
if (err < 0)
|
||||
nvpva_warn(&pva->pdev->dev, "mbox set log level failed: %d\n",
|
||||
@@ -1002,13 +1021,14 @@ int pva_finalize_poweron(struct platform_device *pdev)
|
||||
timestamp = nvpva_get_tsc_stamp();
|
||||
|
||||
nvpva_dbg_fn(pva, "");
|
||||
|
||||
if (!pva->boot_from_file) {
|
||||
nvpva_dbg_fn(pva, "boot from co");
|
||||
pva->co = pva_fw_co_get_info(pva);
|
||||
if (pva->co == NULL) {
|
||||
nvpva_dbg_fn(pva, "failed to get carveout");
|
||||
err = -ENOMEM;
|
||||
goto err_poweron;
|
||||
goto err_poweron_1;
|
||||
}
|
||||
|
||||
nvpva_dbg_fn(pva, "CO base = %llx, CO size = %llu\n",
|
||||
@@ -1037,7 +1057,7 @@ int pva_finalize_poweron(struct platform_device *pdev)
|
||||
|
||||
if (err < 0) {
|
||||
nvpva_err(&pdev->dev, " pva fw failed to load\n");
|
||||
goto err_poweron;
|
||||
goto err_poweron_1;
|
||||
}
|
||||
|
||||
for (i = 0; i < pva->version_config->irq_count; i++)
|
||||
@@ -1062,19 +1082,32 @@ int pva_finalize_poweron(struct platform_device *pdev)
|
||||
|
||||
timestamp2 = nvpva_get_tsc_stamp() - timestamp;
|
||||
|
||||
pva_set_log_level(pva, pva->log_level, true);
|
||||
err = pva_set_log_level(pva, pva->log_level, true);
|
||||
|
||||
if (err < 0) {
|
||||
nvpva_err(&pdev->dev, " pva fw init: set log level failed\n");
|
||||
goto err_poweron;
|
||||
}
|
||||
|
||||
pva->booted = true;
|
||||
|
||||
timestamp = nvpva_get_tsc_stamp() - timestamp;
|
||||
|
||||
nvpva_dbg_prof(pva, "Power on took %lld us, without log level%lld\n",
|
||||
nvpva_dbg_prof(pva, "Power on took %lld us, without log level %lld\n",
|
||||
(32 * timestamp)/1000, (32 * timestamp2)/1000);
|
||||
pva->pva_power_on_err = err;
|
||||
|
||||
pva_trace_copy_to_ftrace(pva);
|
||||
|
||||
return err;
|
||||
|
||||
err_poweron:
|
||||
for (i = 0; i < pva->version_config->irq_count; i++)
|
||||
disable_irq(pva->irq[i]);
|
||||
|
||||
pva_trace_copy_to_ftrace(pva);
|
||||
pva_cleanup_after_boot_fail(pdev);
|
||||
|
||||
err_poweron_1:
|
||||
|
||||
pva->pva_power_on_err = err;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1089,7 +1122,18 @@ void save_fw_debug_log(struct pva *pva)
|
||||
}
|
||||
}
|
||||
|
||||
static int pva_cleanup_after_boot_fail(struct platform_device *pdev)
|
||||
{
|
||||
return pva_prepare_poweroff_core(pdev, false);
|
||||
}
|
||||
|
||||
int pva_prepare_poweroff(struct platform_device *pdev)
|
||||
{
|
||||
return pva_prepare_poweroff_core(pdev, true);
|
||||
}
|
||||
|
||||
static int pva_prepare_poweroff_core(struct platform_device *pdev,
|
||||
bool hold_reset)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
struct pva *pva = pdata->private_data;
|
||||
@@ -1099,6 +1143,7 @@ int pva_prepare_poweroff(struct platform_device *pdev)
|
||||
#endif
|
||||
|
||||
nvpva_dbg_fn(pva, "");
|
||||
|
||||
/*
|
||||
* Disable IRQs. Interrupt handler won't be under execution after the
|
||||
* call returns.
|
||||
@@ -1123,7 +1168,11 @@ int pva_prepare_poweroff(struct platform_device *pdev)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
||||
reset_control_acquire(pdata->reset_control);
|
||||
#endif
|
||||
reset_control_assert(pdata->reset_control);
|
||||
if(hold_reset)
|
||||
reset_control_assert(pdata->reset_control);
|
||||
else
|
||||
reset_control_reset(pdata->reset_control);
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
||||
reset_control_release(pdata->reset_control);
|
||||
#endif
|
||||
@@ -1134,6 +1183,43 @@ int pva_prepare_poweroff(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pva_busy(struct pva *pva, u32 attempts)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
nvpva_dbg_fn(pva, "b");
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
mutex_lock(&pva->pva_busy_mutex);
|
||||
while ( attempts-- > 0) {
|
||||
nvpva_dbg_fn(pva, "%d\n", attempts);
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
if (err == 0)
|
||||
break;
|
||||
|
||||
if (!pm_runtime_suspended(&pva->pdev->dev))
|
||||
break;
|
||||
|
||||
if(!pva->pdev->dev.power.runtime_error)
|
||||
break;
|
||||
|
||||
pm_runtime_set_suspended(&pva->pdev->dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&pva->pva_busy_mutex);
|
||||
#else
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
#endif
|
||||
|
||||
nvpva_dbg_fn(pva, "e");
|
||||
return err;
|
||||
}
|
||||
|
||||
void pva_idle(struct pva *pva)
|
||||
{
|
||||
nvhost_module_idle(pva->pdev);
|
||||
}
|
||||
|
||||
int pva_hwpm_ip_pm(void *ip_dev, bool disable)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -1342,15 +1428,21 @@ static int pva_probe(struct platform_device *pdev)
|
||||
pdata->private_data = pva;
|
||||
platform_set_drvdata(pdev, pdata);
|
||||
mutex_init(&pva->mailbox_mutex);
|
||||
mutex_init(&pva->ccq_mutex);
|
||||
for (i = 0; i < MAX_PVA_INTERFACE; i++)
|
||||
mutex_init(&pva->ccq_mutex[i]);
|
||||
mutex_init(&pva->recovery_mutex);
|
||||
mutex_init(&pva->pva_busy_mutex);
|
||||
atomic_set(&pva->recovery_cnt, 0);
|
||||
pva->submit_task_mode = PVA_SUBMIT_MODE_MMIO_CCQ;
|
||||
pva->slcg_disable = 0;
|
||||
pva->vmem_war_disable = 0;
|
||||
pva->vpu_printf_enabled = true;
|
||||
pva->vpu_debug_enabled = true;
|
||||
pva->driver_log_mask = NVPVA_DEFAULT_DBG_MASK;
|
||||
pva->log_level = NVPVA_DEFAULT_LG_MASK;
|
||||
pva->profiling_level = 0;
|
||||
pva->stats_enabled = false;
|
||||
pva->in_recovery = false;
|
||||
memset(&pva->vpu_util_info, 0, sizeof(pva->vpu_util_info));
|
||||
pva->syncpts.syncpts_mapped_r = false;
|
||||
pva->syncpts.syncpts_mapped_rw = false;
|
||||
@@ -1359,6 +1451,7 @@ static int pva_probe(struct platform_device *pdev)
|
||||
#endif
|
||||
nvpva_dbg_fn(pva, "match. compatible = %s", match->compatible);
|
||||
pva->is_hv_mode = is_tegra_hypervisor_mode();
|
||||
pva->booted = false;
|
||||
if (pva->is_hv_mode)
|
||||
pva->map_co_needed = false;
|
||||
else
|
||||
@@ -1379,7 +1472,6 @@ static int pva_probe(struct platform_device *pdev)
|
||||
if (pdata->version == PVA_HW_GEN2)
|
||||
pva->boot_from_file = true;
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
|
||||
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
|
||||
@@ -1629,7 +1721,10 @@ static int __exit pva_remove(struct platform_device *pdev)
|
||||
nvhost_module_deinit(pdev);
|
||||
mutex_destroy(&pdata->lock);
|
||||
mutex_destroy(&pva->mailbox_mutex);
|
||||
mutex_destroy(&pva->ccq_mutex);
|
||||
for (i = 0; i < MAX_PVA_INTERFACE; i++)
|
||||
mutex_destroy(&pva->ccq_mutex[i]);
|
||||
mutex_destroy(&pva->recovery_mutex);
|
||||
mutex_destroy(&pva->pva_busy_mutex);
|
||||
mutex_destroy(&pva->pva_auth.allow_list_lock);
|
||||
mutex_destroy(&pva->pva_auth_sys.allow_list_lock);
|
||||
|
||||
|
||||
@@ -119,12 +119,10 @@ struct pva_version_info {
|
||||
#endif
|
||||
|
||||
enum nvpva_dbg_categories {
|
||||
pva_dbg_info = BIT(0), /* slightly verbose info */
|
||||
pva_dbg_fn = BIT(2), /* fn name tracing */
|
||||
pva_dbg_reg = BIT(3), /* register accesses, very verbose */
|
||||
pva_dbg_prof = BIT(7), /* profiling info */
|
||||
pva_dbg_powercycle = BIT(8), /* poer cycle info */
|
||||
pva_dbg_mem = BIT(31), /* memory accesses, very verbose */
|
||||
pva_dbg_info = BIT(0), /* slightly verbose info */
|
||||
pva_dbg_fn = BIT(2), /* fn name tracing */
|
||||
pva_dbg_prof = BIT(7), /* profiling info */
|
||||
pva_dbg_fw_trace = BIT(8), /* FW trace logs */
|
||||
};
|
||||
|
||||
#if defined(NVPVA_DEBUG)
|
||||
@@ -165,8 +163,8 @@ enum nvpva_dbg_categories {
|
||||
#define nvpva_dbg_prof(pva, fmt, arg...) \
|
||||
nvpva_dbg(pva, pva_dbg_prof, fmt, ##arg)
|
||||
|
||||
#define nvpva_dbg_powercycle(pva, fmt, arg...) \
|
||||
nvpva_dbg(pva, pva_dbg_powercycle, fmt, ##arg)
|
||||
#define nvpva_dbg_fw_trace(pva, fmt, arg...) \
|
||||
nvpva_dbg(pva, pva_dbg_fw_trace, fmt, ##arg)
|
||||
|
||||
/**
|
||||
* @brief struct to hold the segment details
|
||||
@@ -298,7 +296,7 @@ struct pva_version_config {
|
||||
u32 isr_status,
|
||||
struct pva_cmd_status_regs *status_out);
|
||||
int (*ccq_send_task)(struct pva *pva, u32 queue_id,
|
||||
dma_addr_t task_addr, u8 batchsize, u32 flags);
|
||||
dma_addr_t task_addr, u8 batchsize, u8 *task_status, u32 flags);
|
||||
int (*submit_cmd_sync_locked)(struct pva *pva, struct pva_cmd_s *cmd,
|
||||
u32 nregs, u32 queue_id,
|
||||
struct pva_cmd_status_regs *status_regs);
|
||||
@@ -410,8 +408,9 @@ struct pva {
|
||||
struct pva_cmd_status_regs cmd_status_regs[MAX_PVA_INTERFACE];
|
||||
enum pva_cmd_status cmd_status[MAX_PVA_INTERFACE];
|
||||
struct mutex mailbox_mutex;
|
||||
|
||||
struct mutex ccq_mutex;
|
||||
struct mutex recovery_mutex;
|
||||
struct mutex pva_busy_mutex;
|
||||
struct mutex ccq_mutex[MAX_PVA_INTERFACE];
|
||||
|
||||
struct pva_crashdump_debugfs_entry debugfs_entry_r5;
|
||||
struct pva_crashdump_debugfs_entry debugfs_entry_vpu0;
|
||||
@@ -431,6 +430,7 @@ struct pva {
|
||||
u32 circular_array_wr_pos;
|
||||
struct work_struct task_update_work;
|
||||
atomic_t n_pending_tasks;
|
||||
atomic_t recovery_cnt;
|
||||
struct workqueue_struct *task_status_workqueue;
|
||||
struct pva_trace_log pva_trace;
|
||||
struct pva_fw_debug_log fw_debug_log;
|
||||
@@ -438,6 +438,8 @@ struct pva {
|
||||
u32 submit_cmd_mode;
|
||||
|
||||
u32 r5_dbg_wait;
|
||||
u32 boot_count;
|
||||
int pva_power_on_err;
|
||||
bool timeout_enabled;
|
||||
u32 slcg_disable;
|
||||
u32 vmem_war_disable;
|
||||
@@ -447,9 +449,10 @@ struct pva {
|
||||
bool map_co_needed;
|
||||
bool boot_from_file;
|
||||
bool is_hv_mode;
|
||||
bool in_recovery;
|
||||
struct pva_vpu_util_info vpu_util_info;
|
||||
u32 profiling_level;
|
||||
|
||||
atomic_t ccq_polling[MAX_PVA_INTERFACE];
|
||||
struct work_struct pva_abort_handler_work;
|
||||
struct work_struct pva_fw_log_work;
|
||||
struct mutex pva_fw_log_mutex;
|
||||
@@ -494,7 +497,7 @@ void pva_trace_copy_to_ftrace(struct pva *pva);
|
||||
* @pva Pointer to pva structure
|
||||
*
|
||||
*/
|
||||
void pva_fw_log_dump(struct pva *pva);
|
||||
void pva_fw_log_dump(struct pva *pva, bool hold_mutex);
|
||||
|
||||
/**
|
||||
* @brief Register PVA ISR
|
||||
@@ -544,6 +547,15 @@ void pva_abort_init(struct pva *pva);
|
||||
*/
|
||||
void pva_abort(struct pva *pva);
|
||||
|
||||
/**
|
||||
* @brief Check if PVA is in recovery
|
||||
*
|
||||
* @param pva Pointer to PVA structure
|
||||
* @return True if in recover, else false
|
||||
*
|
||||
*/
|
||||
bool pva_recovery_acquire(struct pva *pva, struct mutex *mutex);
|
||||
void pva_recovery_release(struct pva *pva);
|
||||
/**
|
||||
* @brief Run the ucode selftests
|
||||
*
|
||||
@@ -639,3 +651,6 @@ static inline u64 nvpva_get_tsc_stamp(void)
|
||||
return timestamp;
|
||||
}
|
||||
#endif
|
||||
|
||||
int pva_busy(struct pva *pva, u32 attempts);
|
||||
void pva_idle(struct pva *pva);
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
#include <linux/nvhost.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "pva.h"
|
||||
#include "pva_sec_ec.h"
|
||||
@@ -26,20 +28,19 @@ static void pva_abort_handler(struct work_struct *work)
|
||||
|
||||
/* show checkpoint value here*/
|
||||
checkpoint = host1x_readl(pdev,
|
||||
cfg_ccq_status_r(pva->version, 0, 6));
|
||||
cfg_ccq_status_r(pva->version, 0, PVA_CCQ_STATUS6_INDEX));
|
||||
nvpva_warn(&pdev->dev, "Checkpoint value: 0x%08x",
|
||||
checkpoint);
|
||||
|
||||
/* Dump nvhost state to show the pending jobs */
|
||||
nvhost_debug_dump_device(pdev);
|
||||
nvpva_debug_dump_device(pva);
|
||||
|
||||
/* Copy trace points to ftrace buffer */
|
||||
pva_trace_copy_to_ftrace(pva);
|
||||
|
||||
pva_fw_log_dump(pva, true);
|
||||
|
||||
/*wake up sync cmd waiters*/
|
||||
for (i = 0; i < pva->version_config->irq_count; i++) {
|
||||
for (i = 0; i < pva->version_config->irq_count; i++) {
|
||||
if (pva->cmd_status[i] == PVA_CMD_STATUS_WFI) {
|
||||
pva->cmd_status[i] = PVA_CMD_STATUS_ABORTED;
|
||||
wake_up(&pva->cmd_waitqueue[i]);
|
||||
@@ -52,20 +53,21 @@ static void pva_abort_handler(struct work_struct *work)
|
||||
schedule();
|
||||
} while (mutex_trylock(&pva->mailbox_mutex) == false);
|
||||
|
||||
/* There is no ongoing activity anymore. Update mailbox status */
|
||||
for (i = 0; i < pva->version_config->irq_count; i++) {
|
||||
pva->cmd_status[i] = PVA_CMD_STATUS_INVALID;
|
||||
}
|
||||
/* There is no ongoing activity anymore. Update mailbox status */
|
||||
for (i = 0; i < pva->version_config->irq_count; i++)
|
||||
pva->cmd_status[i] = PVA_CMD_STATUS_INVALID;
|
||||
|
||||
/* Lock CCQ mutex to avoid asynchornous communication */
|
||||
mutex_lock(&pva->ccq_mutex);
|
||||
|
||||
/* Lock CCQ mutex to avoid asynchornous communication */
|
||||
for (i = 0; i < MAX_PVA_INTERFACE; i++)
|
||||
mutex_lock(&pva->ccq_mutex[i]);
|
||||
|
||||
/*
|
||||
* If boot was still on-going, skip over recovery and let boot-up
|
||||
* routine handle the failure
|
||||
*/
|
||||
if (!pva->booted) {
|
||||
nvpva_warn(&pdev->dev, "Recovery skipped: PVA is not booted");
|
||||
nvpva_warn(&pdev->dev, "Recovery skipped: PVA not booted");
|
||||
goto skip_recovery;
|
||||
}
|
||||
|
||||
@@ -78,42 +80,64 @@ static void pva_abort_handler(struct work_struct *work)
|
||||
/* enable error reporting to hsm*/
|
||||
pva_enable_ec_err_reporting(pva);
|
||||
|
||||
skip_recovery:
|
||||
|
||||
/* Remove pending tasks from the queue */
|
||||
nvpva_queue_abort_all(pva->pool);
|
||||
|
||||
nvpva_warn(&pdev->dev, "Recovery finished");
|
||||
if (pva->pva_power_on_err == 0)
|
||||
pva->in_recovery = false;
|
||||
|
||||
for (i = 0; i < MAX_PVA_INTERFACE; i++)
|
||||
mutex_unlock(&pva->ccq_mutex[i]);
|
||||
|
||||
skip_recovery:
|
||||
mutex_unlock(&pva->ccq_mutex);
|
||||
mutex_unlock(&pva->mailbox_mutex);
|
||||
mutex_unlock(&pva->pva_fw_log_mutex);
|
||||
nvpva_warn(&pdev->dev, "Recovery finished");
|
||||
pva_recovery_release(pva);
|
||||
}
|
||||
|
||||
void pva_recovery_release(struct pva *pva)
|
||||
{
|
||||
u32 recovery_cnt;
|
||||
|
||||
mutex_lock(&pva->recovery_mutex);
|
||||
recovery_cnt = atomic_read(&pva->recovery_cnt);
|
||||
if (recovery_cnt > 0)
|
||||
atomic_set(&pva->recovery_cnt, 0);
|
||||
|
||||
mutex_unlock(&pva->recovery_mutex);
|
||||
}
|
||||
|
||||
bool pva_recovery_acquire(struct pva *pva, struct mutex *mutex)
|
||||
{
|
||||
bool acquired = true;
|
||||
u32 recovery_cnt;
|
||||
|
||||
mutex_lock(&pva->recovery_mutex);
|
||||
recovery_cnt = atomic_read(&pva->recovery_cnt);
|
||||
if ((recovery_cnt > 0) || work_pending(&pva->pva_abort_handler_work))
|
||||
acquired = false;
|
||||
else
|
||||
if(mutex == NULL)
|
||||
atomic_set(&pva->recovery_cnt, 1);
|
||||
else
|
||||
mutex_lock(mutex);
|
||||
|
||||
mutex_unlock(&pva->recovery_mutex);
|
||||
|
||||
return acquired;
|
||||
}
|
||||
|
||||
void pva_abort(struct pva *pva)
|
||||
{
|
||||
struct platform_device *pdev = pva->pdev;
|
||||
size_t i;
|
||||
u32 checkpoint;
|
||||
|
||||
/* For selftest mode to finish the test */
|
||||
if (host1x_readl(pdev, hsp_ss0_state_r())
|
||||
& PVA_TEST_MODE) {
|
||||
for (i = 0; i < pva->version_config->irq_count; i++) {
|
||||
pva->cmd_status[i] = PVA_CMD_STATUS_DONE;
|
||||
wake_up(&pva->cmd_waitqueue[i]);
|
||||
}
|
||||
/* If recovery pending, ignore concurrent request */
|
||||
if (!pva_recovery_acquire(pva, NULL)) {
|
||||
WARN(true, "Recovery request while pending ignored.");
|
||||
return;
|
||||
}
|
||||
|
||||
/* show checkpoint value here*/
|
||||
checkpoint = host1x_readl(pdev,
|
||||
cfg_ccq_status_r(pva->version, 0, 6));
|
||||
nvpva_warn(&pdev->dev, "Checkpoint value: 0x%08x",
|
||||
checkpoint);
|
||||
|
||||
/* Copy trace points to ftrace buffer */
|
||||
pva_trace_copy_to_ftrace(pva);
|
||||
pva_fw_log_dump(pva);
|
||||
|
||||
pva->in_recovery = true;
|
||||
WARN(true, "Attempting to recover the engine");
|
||||
schedule_work(&pva->pva_abort_handler_work);
|
||||
}
|
||||
|
||||
@@ -59,14 +59,14 @@ static int pva_ccq_wait(struct pva *pva, int timeout)
|
||||
}
|
||||
|
||||
int pva_ccq_send_task_t19x(struct pva *pva, u32 queue_id, dma_addr_t task_addr,
|
||||
u8 batchsize, u32 flags)
|
||||
u8 batchsize, u8 *task_status, u32 flags)
|
||||
{
|
||||
int err = 0;
|
||||
struct pva_cmd_s cmd = {0};
|
||||
|
||||
(void)pva_cmd_submit_batch(&cmd, queue_id, task_addr, batchsize, flags);
|
||||
|
||||
mutex_lock(&pva->ccq_mutex);
|
||||
mutex_lock(&pva->ccq_mutex[1]);
|
||||
err = pva_ccq_wait(pva, 100);
|
||||
if (err < 0)
|
||||
goto err_wait_ccq;
|
||||
@@ -75,12 +75,12 @@ int pva_ccq_send_task_t19x(struct pva *pva, u32 queue_id, dma_addr_t task_addr,
|
||||
host1x_writel(pva->pdev, cfg_ccq_r(pva->version, 0), cmd.cmd_field[1]);
|
||||
host1x_writel(pva->pdev, cfg_ccq_r(pva->version, 0), cmd.cmd_field[0]);
|
||||
|
||||
mutex_unlock(&pva->ccq_mutex);
|
||||
mutex_unlock(&pva->ccq_mutex[1]);
|
||||
|
||||
return err;
|
||||
|
||||
err_wait_ccq:
|
||||
mutex_unlock(&pva->ccq_mutex);
|
||||
mutex_unlock(&pva->ccq_mutex[1]);
|
||||
pva_abort(pva);
|
||||
|
||||
return err;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* PVA Command Queue Interface handling
|
||||
*/
|
||||
@@ -13,6 +13,6 @@
|
||||
#include "pva.h"
|
||||
|
||||
int pva_ccq_send_task_t19x(struct pva *pva, u32 queue_id, dma_addr_t task_addr,
|
||||
u8 batchsize, u32 flags);
|
||||
u8 batchsize, u8 *task_status, u32 flags);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -23,19 +23,35 @@
|
||||
|
||||
#define MAX_CCQ_ELEMENTS 6
|
||||
|
||||
#define PVA_TASK_FREE 0
|
||||
#define PVA_TASK_ASSIGNED 1
|
||||
#define PVA_TASK_QUEUED 2
|
||||
#define PVA_TASK_SUBMITTED 3
|
||||
#define PVA_TASK_INVALID 4
|
||||
|
||||
|
||||
static int pva_ccq_wait(struct pva *pva, int timeout, unsigned int queue_id)
|
||||
{
|
||||
unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
|
||||
u32 poll_count = 0;
|
||||
u32 err = 0;
|
||||
|
||||
/*
|
||||
* Wait until there is free room in the CCQ. Otherwise the writes
|
||||
* could stall the CPU. Ignore the timeout in simulation.
|
||||
*/
|
||||
|
||||
atomic_add(1,&pva->ccq_polling[queue_id]);
|
||||
nvpva_dbg_fn(pva, "b %d", atomic_read(&pva->ccq_polling[queue_id]));
|
||||
|
||||
do {
|
||||
u32 val;
|
||||
|
||||
if(pva->in_recovery) {
|
||||
err = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((pva->timeout_enabled == true) && time_after(jiffies, end_jiffies)) {
|
||||
/* check once more if a slot is available*/
|
||||
val = PVA_EXTRACT(
|
||||
@@ -46,11 +62,12 @@ static int pva_ccq_wait(struct pva *pva, int timeout, unsigned int queue_id)
|
||||
if (val <= MAX_CCQ_ELEMENTS) {
|
||||
if (!poll_count)
|
||||
WARN(true, "pva_ccq_wait false time out on first check");
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
|
||||
nvpva_err(&pva->pdev->dev, "ccq wait timed out with %u in fifo", val);
|
||||
return -ETIMEDOUT;
|
||||
err = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
++poll_count;
|
||||
@@ -61,40 +78,59 @@ static int pva_ccq_wait(struct pva *pva, int timeout, unsigned int queue_id)
|
||||
4, 0, u32);
|
||||
|
||||
if (val <= MAX_CCQ_ELEMENTS)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
usleep_range(5, 10);
|
||||
} while (true);
|
||||
|
||||
atomic_sub(1,&pva->ccq_polling[queue_id]);
|
||||
|
||||
nvpva_dbg_fn(pva, "e %d", atomic_read(&pva->ccq_polling[queue_id]));
|
||||
|
||||
return err;
|
||||
}
|
||||
static int pva_ccq_send_cmd(struct pva *pva, u32 queue_id,
|
||||
struct pva_cmd_s *cmd)
|
||||
struct pva_cmd_s *cmd, u8 *task_status)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if(!pva->booted)
|
||||
err = -ENODEV;
|
||||
|
||||
err = pva_ccq_wait(pva, 100, queue_id);
|
||||
if (err < 0)
|
||||
goto err_wait_ccq;
|
||||
|
||||
if ((task_status != NULL) && (*task_status == PVA_TASK_INVALID)) {
|
||||
err = -EINVAL;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Make the writes to CCQ */
|
||||
host1x_writel(pva->pdev, cfg_ccq_r(pva->version, queue_id),
|
||||
cmd->cmd_field[1]);
|
||||
host1x_writel(pva->pdev, cfg_ccq_r(pva->version, queue_id),
|
||||
cmd->cmd_field[0]);
|
||||
if (task_status != NULL)
|
||||
*task_status = PVA_TASK_SUBMITTED;
|
||||
|
||||
return err;
|
||||
|
||||
err_wait_ccq:
|
||||
|
||||
pva_abort(pva);
|
||||
return err;
|
||||
}
|
||||
|
||||
int pva_ccq_send_task_t23x(struct pva *pva, u32 queue_id, dma_addr_t task_addr,
|
||||
u8 batchsize, u32 flags)
|
||||
u8 batchsize, u8 *task_status, u32 flags)
|
||||
{
|
||||
int err = 0;
|
||||
struct pva_cmd_s cmd = { 0 };
|
||||
|
||||
(void)pva_cmd_submit_batch(&cmd, queue_id, task_addr, batchsize, flags);
|
||||
|
||||
err = pva_ccq_send_cmd(pva, queue_id, &cmd);
|
||||
err = pva_ccq_send_cmd(pva, queue_id, &cmd, task_status);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -141,6 +177,7 @@ static int pva_ccq_wait_event(struct pva *pva, unsigned int queue_id, int wait_t
|
||||
pva->cmd_status[interface] ==
|
||||
PVA_CMD_STATUS_ABORTED);
|
||||
}
|
||||
|
||||
if (timeout <= 0) {
|
||||
err = -ETIMEDOUT;
|
||||
pva_abort(pva);
|
||||
@@ -173,13 +210,16 @@ int pva_ccq_send_cmd_sync(struct pva *pva, struct pva_cmd_s *cmd, u32 nregs,
|
||||
goto err_check_status;
|
||||
}
|
||||
|
||||
if(!pva->booted)
|
||||
err = -ENODEV;
|
||||
|
||||
/* Mark that we are waiting for an interrupt */
|
||||
pva->cmd_status[interface] = PVA_CMD_STATUS_WFI;
|
||||
memset(&pva->cmd_status_regs[interface], 0,
|
||||
sizeof(struct pva_cmd_status_regs));
|
||||
|
||||
/* Submit command to PVA */
|
||||
err = pva_ccq_send_cmd(pva, queue_id, cmd);
|
||||
err = pva_ccq_send_cmd(pva, queue_id, cmd, NULL);
|
||||
if (err < 0)
|
||||
goto err_send_command;
|
||||
|
||||
@@ -207,6 +247,9 @@ int pva_send_cmd_sync(struct pva *pva, struct pva_cmd_s *cmd, u32 nregs,
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if(!pva->booted)
|
||||
err = -ENODEV;
|
||||
|
||||
switch (pva->submit_cmd_mode) {
|
||||
case PVA_SUBMIT_MODE_MAILBOX:
|
||||
err = pva_mailbox_send_cmd_sync(pva, cmd, nregs, status_regs);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* PVA Command Queue Interface handling
|
||||
*/
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "pva_status_regs.h"
|
||||
|
||||
int pva_ccq_send_task_t23x(struct pva *pva, u32 queue_id, dma_addr_t task_addr,
|
||||
u8 batchsize, u32 flags);
|
||||
u8 batchsize, u8 *task_status, u32 flags);
|
||||
void pva_ccq_isr_handler(struct pva *pva, unsigned int queue_id);
|
||||
int pva_ccq_send_cmd_sync(struct pva *pva, struct pva_cmd_s *cmd, u32 nregs,
|
||||
u32 queue_id,
|
||||
|
||||
@@ -47,7 +47,7 @@ static int pva_crashdump(struct seq_file *s, void *data)
|
||||
(struct pva_crashdump_debugfs_entry *)s->private;
|
||||
struct pva *pva = entry->pva;
|
||||
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
err = pva_busy(pva, 2);
|
||||
if (err) {
|
||||
nvpva_dbg_info(pva, "err in powering up pva\n");
|
||||
goto err_poweron;
|
||||
@@ -55,7 +55,7 @@ static int pva_crashdump(struct seq_file *s, void *data)
|
||||
|
||||
pva_read_crashdump(s, &entry->seg_info);
|
||||
|
||||
nvhost_module_idle(pva->pdev);
|
||||
pva_idle(pva);
|
||||
|
||||
err_poweron:
|
||||
return err;
|
||||
@@ -138,7 +138,7 @@ static int fw_debug_log_open(struct inode *inode, struct file *file)
|
||||
iter->pva = pva;
|
||||
|
||||
if (pva->booted) {
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
err = pva_busy(pva, 2);
|
||||
if (err) {
|
||||
nvpva_err(&pva->pdev->dev, "err in powering up pva");
|
||||
err = -EIO;
|
||||
@@ -147,7 +147,7 @@ static int fw_debug_log_open(struct inode *inode, struct file *file)
|
||||
|
||||
save_fw_debug_log(pva);
|
||||
|
||||
nvhost_module_idle(pva->pdev);
|
||||
pva_idle(pva);
|
||||
}
|
||||
|
||||
iter->buffer = pva->fw_debug_log.saved_log;
|
||||
@@ -189,7 +189,7 @@ static int print_firmware_versions(struct seq_file *s, void *data)
|
||||
struct pva_version_info info;
|
||||
int ret = 0;
|
||||
|
||||
ret = nvhost_module_busy(pva->pdev);
|
||||
ret = pva_busy(pva, 2);
|
||||
if (ret < 0)
|
||||
goto err_poweron;
|
||||
|
||||
@@ -197,7 +197,7 @@ static int print_firmware_versions(struct seq_file *s, void *data)
|
||||
if (ret < 0)
|
||||
goto err_get_firmware_version;
|
||||
|
||||
nvhost_module_idle(pva->pdev);
|
||||
pva_idle(pva);
|
||||
|
||||
print_version(s, "pva_r5_version", info.pva_r5_version);
|
||||
print_version(s, "pva_compat_version", info.pva_compat_version);
|
||||
@@ -207,7 +207,7 @@ static int print_firmware_versions(struct seq_file *s, void *data)
|
||||
return 0;
|
||||
|
||||
err_get_firmware_version:
|
||||
nvhost_module_idle(pva->pdev);
|
||||
pva_idle(pva);
|
||||
err_poweron:
|
||||
return ret;
|
||||
}
|
||||
@@ -260,11 +260,11 @@ static void update_vpu_stats(struct pva *pva, bool stats_enabled)
|
||||
if (pm_runtime_suspended(&pva->pdev->dev)) {
|
||||
vpu_stats[0] = 0;
|
||||
vpu_stats[1] = 0;
|
||||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
err = pva_busy(pva, 2);
|
||||
if (err < 0) {
|
||||
dev_err(&pva->pdev->dev, "error in powering up pva %d",
|
||||
err);
|
||||
@@ -301,7 +301,7 @@ err_out:
|
||||
vpu_stats[0] = 0;
|
||||
vpu_stats[1] = 0;
|
||||
out:
|
||||
nvhost_module_idle(pva->pdev);
|
||||
pva_idle(pva);
|
||||
}
|
||||
|
||||
static int print_vpu_stats(struct seq_file *s, void *data)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2016-2024, NVIDIA CORPORATION. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
#include <nvidia/conftest.h>
|
||||
|
||||
@@ -1013,7 +1013,7 @@ static int pva_open(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
sema_init(&priv->queue->task_pool_sem, MAX_PVA_TASK_COUNT_PER_QUEUE);
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
err = pva_busy(pva, 2);
|
||||
if (err < 0) {
|
||||
dev_err(&pva->pdev->dev, "error in powering up pva %d",
|
||||
err);
|
||||
@@ -1032,7 +1032,7 @@ err_alloc_priv:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pva_queue_flush(struct pva *pva, struct nvpva_queue *queue)
|
||||
static int pva_queue_flush(struct pva *pva, struct nvpva_queue *queue)
|
||||
{
|
||||
u32 flags = PVA_CMD_INT_ON_ERR | PVA_CMD_INT_ON_COMPLETE;
|
||||
struct pva_cmd_status_regs status = {};
|
||||
@@ -1040,12 +1040,19 @@ static void pva_queue_flush(struct pva *pva, struct nvpva_queue *queue)
|
||||
int err = 0;
|
||||
u32 nregs;
|
||||
|
||||
if (!pva_recovery_acquire(pva, &pva->ccq_mutex[queue->id + 1])) {
|
||||
nvpva_warn(&pva->pdev->dev,
|
||||
"queue flush with abort pending ignored");
|
||||
err = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
nregs = pva_cmd_abort_task(&cmd, queue->id, flags);
|
||||
err = nvhost_module_busy(pva->pdev);
|
||||
if (err < 0) {
|
||||
dev_err(&pva->pdev->dev, "error in powering up pva %d",
|
||||
err);
|
||||
goto err_out;
|
||||
goto err_cleanup;
|
||||
}
|
||||
|
||||
err = pva->version_config->submit_cmd_sync(pva, &cmd, nregs, queue->id,
|
||||
@@ -1054,7 +1061,7 @@ static void pva_queue_flush(struct pva *pva, struct nvpva_queue *queue)
|
||||
if (err < 0) {
|
||||
dev_err(&pva->pdev->dev, "failed to issue FW abort command: %d",
|
||||
err);
|
||||
goto err_out;
|
||||
goto err_cleanup;
|
||||
}
|
||||
/* Ensure that response is valid */
|
||||
if (status.error != PVA_ERR_NO_ERROR) {
|
||||
@@ -1062,8 +1069,10 @@ static void pva_queue_flush(struct pva *pva, struct nvpva_queue *queue)
|
||||
status.error);
|
||||
}
|
||||
|
||||
err_cleanup:
|
||||
mutex_unlock(&pva->ccq_mutex[queue->id + 1]);
|
||||
err_out:
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pva_release(struct inode *inode, struct file *file)
|
||||
@@ -1071,17 +1080,19 @@ static int pva_release(struct inode *inode, struct file *file)
|
||||
struct pva_private *priv = file->private_data;
|
||||
bool queue_empty;
|
||||
int i;
|
||||
int err = 0;
|
||||
|
||||
flush_workqueue(priv->pva->task_status_workqueue);
|
||||
mutex_lock(&priv->queue->list_lock);
|
||||
queue_empty = list_empty(&priv->queue->tasklist);
|
||||
mutex_unlock(&priv->queue->list_lock);
|
||||
if (!queue_empty) {
|
||||
/* Cancel remaining tasks */
|
||||
nvpva_dbg_info(priv->pva, "cancel remaining tasks");
|
||||
pva_queue_flush(priv->pva, priv->queue);
|
||||
err = pva_queue_flush(priv->pva, priv->queue);
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->queue->list_lock);
|
||||
|
||||
/* make sure all tasks have been finished */
|
||||
for (i = 0; i < MAX_PVA_TASK_COUNT_PER_QUEUE; i++) {
|
||||
if (down_killable(&priv->queue->task_pool_sem) != 0) {
|
||||
@@ -1094,7 +1105,7 @@ static int pva_release(struct inode *inode, struct file *file)
|
||||
}
|
||||
}
|
||||
|
||||
nvhost_module_idle(priv->pva->pdev);
|
||||
pva_idle(priv->pva);
|
||||
|
||||
/* Release reference to client */
|
||||
nvpva_client_context_put(priv->client);
|
||||
|
||||
@@ -39,7 +39,7 @@ static irqreturn_t pva_system_isr(int irq, void *dev_id)
|
||||
struct pva *pva = dev_id;
|
||||
struct platform_device *pdev = pva->pdev;
|
||||
u32 checkpoint = host1x_readl(pdev,
|
||||
cfg_ccq_status_r(pva->version, 0, 6));
|
||||
cfg_ccq_status_r(pva->version, 0, PVA_CCQ_STATUS6_INDEX));
|
||||
u32 status7 = pva->version_config->read_mailbox(pdev, PVA_MBOX_ISR);
|
||||
u32 status5 = pva->version_config->read_mailbox(pdev, PVA_MBOX_AISR);
|
||||
u32 lic_int_status = host1x_readl(pdev,
|
||||
@@ -75,7 +75,6 @@ static irqreturn_t pva_system_isr(int irq, void *dev_id)
|
||||
|
||||
if (status7 & PVA_INT_PENDING) {
|
||||
nvpva_dbg_info(pva, "PVA ISR (%x)", status7);
|
||||
|
||||
pva_mailbox_isr(pva);
|
||||
}
|
||||
|
||||
|
||||
@@ -31,10 +31,13 @@ irqreturn_t pva_ccq_isr(int irq, void *dev_id)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (queue_id == MAX_PVA_QUEUE_COUNT + 1) {
|
||||
printk("Invalid IRQ received. Returning from ISR");
|
||||
nvpva_warn(&pdev->dev,
|
||||
"Invalid IRQ received. Returning from ISR");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
nvpva_dbg_info(pva, "Received ISR from CCQ block, IRQ: %d", irq);
|
||||
int_status = host1x_readl(pdev, cfg_ccq_status_r(pva->version,
|
||||
queue_id, PVA_CCQ_STATUS2_INDEX))
|
||||
@@ -54,10 +57,12 @@ irqreturn_t pva_ccq_isr(int irq, void *dev_id)
|
||||
isr_status = host1x_readl(pdev, cfg_ccq_status_r(pva->version,
|
||||
queue_id, PVA_CCQ_STATUS7_INDEX));
|
||||
}
|
||||
|
||||
if (int_status & PVA_VALID_CCQ_AISR) {
|
||||
aisr_status = host1x_readl(pdev, cfg_ccq_status_r(pva->version,
|
||||
queue_id, PVA_CCQ_STATUS8_INDEX));
|
||||
}
|
||||
|
||||
if (aisr_status & PVA_AISR_INT_PENDING) {
|
||||
nvpva_dbg_info(pva, "PVA CCQ AISR (%x)", aisr_status);
|
||||
if (aisr_status &
|
||||
@@ -76,21 +81,29 @@ irqreturn_t pva_ccq_isr(int irq, void *dev_id)
|
||||
"PVA AISR: \
|
||||
PVA_AISR_TASK_ERROR for queue id = %d",
|
||||
queue_id);
|
||||
|
||||
if (aisr_status & PVA_AISR_ABORT) {
|
||||
nvpva_warn(&pdev->dev, "PVA AISR: \
|
||||
PVA_AISR_ABORT for queue id = %d",
|
||||
queue_id);
|
||||
nvpva_warn(&pdev->dev, "Checkpoint value: 0x%08x",
|
||||
aisr_status);
|
||||
cfg_ccq_status_r(pva->version,
|
||||
queue_id,
|
||||
PVA_CCQ_STATUS6_INDEX));
|
||||
recover = true;
|
||||
}
|
||||
|
||||
/* Acknowledge AISR by writing status 1 */
|
||||
host1x_writel(pdev, cfg_ccq_status_r(pva->version, queue_id,
|
||||
PVA_CCQ_STATUS1_INDEX), 0x01U);
|
||||
}
|
||||
|
||||
if (isr_status & PVA_INT_PENDING) {
|
||||
pva_ccq_isr_handler(pva, queue_id);
|
||||
}
|
||||
|
||||
pva_trace_copy_to_ftrace(pva);
|
||||
|
||||
if (recover)
|
||||
pva_abort(pva);
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ static int pva_mailbox_send_cmd(struct pva *pva, struct pva_cmd_s *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pva_mailbox_wait_event(struct pva *pva, int wait_time)
|
||||
int pva_mailbox_wait_event(struct pva *pva, int wait_time, bool abort_ok)
|
||||
{
|
||||
int timeout = 1;
|
||||
int err = 0;
|
||||
@@ -93,15 +93,16 @@ int pva_mailbox_wait_event(struct pva *pva, int wait_time)
|
||||
&& (pva->cmd_status[PVA_MAILBOX_INDEX] !=
|
||||
PVA_CMD_STATUS_ABORTED)) {
|
||||
err = -ETIMEDOUT;
|
||||
pva_abort(pva);
|
||||
if(abort_ok)
|
||||
pva_abort(pva);
|
||||
} else {
|
||||
WARN(true, "wait_event_timeout reported false timeout");
|
||||
if (pva->cmd_status[PVA_MAILBOX_INDEX] ==
|
||||
PVA_CMD_STATUS_ABORTED) {
|
||||
err = -EIO;
|
||||
} else {
|
||||
err = 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
}
|
||||
} else if (pva->cmd_status[PVA_MAILBOX_INDEX] ==
|
||||
PVA_CMD_STATUS_ABORTED)
|
||||
@@ -117,7 +118,8 @@ void pva_mailbox_isr(struct pva *pva)
|
||||
struct platform_device *pdev = pva->pdev;
|
||||
u32 int_status = pva->version_config->read_mailbox(pdev, PVA_MBOX_ISR);
|
||||
if (pva->cmd_status[PVA_MAILBOX_INDEX] != PVA_CMD_STATUS_WFI) {
|
||||
nvpva_warn(&pdev->dev, "Unexpected PVA ISR (%x)", int_status);
|
||||
nvpva_warn(&pdev->dev, "Unexpected PVA ISR (%x, %X), ",
|
||||
int_status, pva->cmd_status[PVA_MAILBOX_INDEX]);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -143,6 +145,9 @@ int pva_mailbox_send_cmd_sync_locked(struct pva *pva,
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if(!pva->booted)
|
||||
err = -ENODEV;
|
||||
|
||||
if (status_regs == NULL) {
|
||||
err = -EINVAL;
|
||||
goto err_invalid_parameter;
|
||||
@@ -167,7 +172,7 @@ int pva_mailbox_send_cmd_sync_locked(struct pva *pva,
|
||||
#ifdef CONFIG_PVA_INTERRUPT_DISABLED
|
||||
err = pva_poll_mailbox_isr(pva, 100000);
|
||||
#else
|
||||
err = pva_mailbox_wait_event(pva, 100);
|
||||
err = pva_mailbox_wait_event(pva, 100, true);
|
||||
#endif
|
||||
if (err < 0)
|
||||
goto err_wait_response;
|
||||
@@ -175,15 +180,12 @@ int pva_mailbox_send_cmd_sync_locked(struct pva *pva,
|
||||
/* Return interrupt status back to caller */
|
||||
memcpy(status_regs, &pva->cmd_status_regs,
|
||||
sizeof(struct pva_cmd_status_regs));
|
||||
|
||||
pva->cmd_status[PVA_MAILBOX_INDEX] = PVA_CMD_STATUS_INVALID;
|
||||
return err;
|
||||
|
||||
err_wait_response:
|
||||
err_send_command:
|
||||
pva->cmd_status[PVA_MAILBOX_INDEX] = PVA_CMD_STATUS_INVALID;
|
||||
err_check_status:
|
||||
err_invalid_parameter:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* PVA mailbox header
|
||||
*/
|
||||
@@ -130,6 +130,6 @@ int pva_poll_mailbox_isr(struct pva *pva, int wait_time);
|
||||
* This function do the wait until the mailbox isr get invoked based on
|
||||
* the mailbox register set by the ucode.
|
||||
*/
|
||||
int pva_mailbox_wait_event(struct pva *pva, int wait_time);
|
||||
int pva_mailbox_wait_event(struct pva *pva, int wait_time, bool abort_ok);
|
||||
|
||||
#endif /*__PVA_MAINBOX_H__*/
|
||||
|
||||
@@ -1110,9 +1110,21 @@ static int pva_task_submit_mmio_ccq(struct pva_submit_task *task, u8 batchsize)
|
||||
u32 flags = PVA_CMD_INT_ON_ERR;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&task->pva->ccq_mutex[task->queue->id + 1]);
|
||||
|
||||
if (task->task_state != PVA_TASK_QUEUED) {
|
||||
err = -EINVAL;
|
||||
goto err_invalid;
|
||||
}
|
||||
|
||||
/* Construct submit command */
|
||||
err = task->pva->version_config->ccq_send_task(
|
||||
task->pva, task->queue->id, task->dma_addr, batchsize, flags);
|
||||
task->pva, task->queue->id, task->dma_addr, batchsize, &task->task_state, flags);
|
||||
|
||||
err_invalid:
|
||||
|
||||
mutex_unlock(&task->pva->ccq_mutex[task->queue->id + 1]);
|
||||
|
||||
return err;
|
||||
}
|
||||
static int pva_task_submit_mailbox(struct pva_submit_task *task, u8 batchsize)
|
||||
@@ -1123,6 +1135,15 @@ static int pva_task_submit_mailbox(struct pva_submit_task *task, u8 batchsize)
|
||||
u32 flags, nregs;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&task->pva->mailbox_mutex);
|
||||
if (task->task_state != PVA_TASK_QUEUED) {
|
||||
err = -EINVAL;
|
||||
mutex_unlock(&task->pva->mailbox_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_unlock(&task->pva->mailbox_mutex);
|
||||
|
||||
/* Construct submit command */
|
||||
flags = PVA_CMD_INT_ON_ERR | PVA_CMD_INT_ON_COMPLETE;
|
||||
nregs = pva_cmd_submit_batch(&cmd, queue->id, task->dma_addr, batchsize,
|
||||
@@ -1143,7 +1164,6 @@ static int pva_task_submit_mailbox(struct pva_submit_task *task, u8 batchsize)
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1184,6 +1204,7 @@ static int pva_task_submit(const struct pva_submit_tasks *task_header)
|
||||
#else
|
||||
timestamp = arch_counter_get_cntvct();
|
||||
#endif
|
||||
mutex_lock(&queue->list_lock);
|
||||
for (i = 0; i < task_header->num_tasks; i++) {
|
||||
struct pva_submit_task *task = task_header->tasks[i];
|
||||
struct pva_hw_task *hw_task = task->va;
|
||||
@@ -1194,13 +1215,13 @@ static int pva_task_submit(const struct pva_submit_tasks *task_header)
|
||||
nvpva_syncpt_incr_max(queue, task->fence_num);
|
||||
task->client->curr_sema_value += task->sem_num;
|
||||
|
||||
mutex_lock(&queue->list_lock);
|
||||
list_add_tail(&task->node, &queue->tasklist);
|
||||
mutex_unlock(&queue->list_lock);
|
||||
|
||||
hw_task->task.queued_time = timestamp;
|
||||
task->task_state = PVA_TASK_QUEUED;
|
||||
}
|
||||
|
||||
mutex_unlock(&queue->list_lock);
|
||||
|
||||
/*
|
||||
* TSC timestamp is same as CNTVCT. Task statistics are being
|
||||
* reported in TSC ticks.
|
||||
@@ -1263,6 +1284,9 @@ out:
|
||||
|
||||
err_submit:
|
||||
|
||||
if ((err == -ETIMEDOUT) || (err == -EINVAL))
|
||||
goto out_err;
|
||||
|
||||
for (i = 0; i < task_header->num_tasks; i++) {
|
||||
struct pva_submit_task *task = task_header->tasks[i];
|
||||
|
||||
@@ -1276,6 +1300,8 @@ err_submit:
|
||||
kref_put(&task->ref, pva_task_free);
|
||||
}
|
||||
|
||||
out_err:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1449,9 +1475,11 @@ static int pva_queue_submit(struct nvpva_queue *queue, void *args)
|
||||
err = pva_task_submit(task_header);
|
||||
if (err) {
|
||||
dev_err(&queue->vm_pdev->dev, "failed to submit task");
|
||||
mutex_lock(&queue->tail_lock);
|
||||
queue->hw_task_tail = queue->old_tail;
|
||||
mutex_unlock(&queue->tail_lock);
|
||||
if (queue->hw_task_tail != NULL) {
|
||||
mutex_lock(&queue->tail_lock);
|
||||
queue->hw_task_tail = queue->old_tail;
|
||||
mutex_unlock(&queue->tail_lock);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&client->sema_val_lock);
|
||||
@@ -1553,6 +1581,7 @@ static int pva_queue_abort(struct nvpva_queue *queue)
|
||||
|
||||
list_for_each_entry_safe(task, n, &queue->tasklist, node) {
|
||||
pva_queue_cleanup(queue, task);
|
||||
task->task_state = PVA_TASK_INVALID;
|
||||
list_del(&task->node);
|
||||
kref_put(&task->ref, pva_task_free);
|
||||
}
|
||||
@@ -1560,6 +1589,7 @@ static int pva_queue_abort(struct nvpva_queue *queue)
|
||||
/* Finish syncpoint increments to release waiters */
|
||||
nvhost_syncpt_set_min_update(queue->vm_pdev, queue->syncpt_id,
|
||||
atomic_read(&queue->syncpt_maxval));
|
||||
queue->hw_task_tail = NULL;
|
||||
mutex_unlock(&queue->list_lock);
|
||||
|
||||
return 0;
|
||||
@@ -1575,7 +1605,10 @@ pva_queue_dump_all(struct pva *pva,
|
||||
nvpva_err(&pva->pdev->dev, "Queue %u, Tasks\n", queue->id);
|
||||
mutex_lock(&queue->list_lock);
|
||||
list_for_each_entry(task, &queue->tasklist, node) {
|
||||
nvpva_err(&pva->pdev->dev, " #%u: exe_id = %u\n", i++, task->exe_id);
|
||||
nvpva_err(&pva->pdev->dev,
|
||||
" #%u: exe_id1 = %u, exe_id2 = %u\n",
|
||||
i++, task->exe_id1, task->exe_id2);
|
||||
|
||||
}
|
||||
|
||||
mutex_unlock(&queue->list_lock);
|
||||
|
||||
@@ -38,6 +38,12 @@
|
||||
|
||||
#define MAX_NUM_FRAMES NVPVA_TASK_MAX_HWSEQ_FRAME_COUNT_T26X
|
||||
|
||||
#define PVA_TASK_FREE 0
|
||||
#define PVA_TASK_ASSIGNED 1
|
||||
#define PVA_TASK_QUEUED 2
|
||||
#define PVA_TASK_SUBMITTED 3
|
||||
#define PVA_TASK_INVALID 4
|
||||
|
||||
struct dma_buf;
|
||||
|
||||
extern struct nvpva_queue_ops pva_queue_ops;
|
||||
@@ -195,6 +201,7 @@ struct pva_submit_task {
|
||||
u64 dst_surf_base_addr;
|
||||
bool is_system_app;
|
||||
bool default_sem_update_method;
|
||||
u8 task_state;
|
||||
};
|
||||
|
||||
struct pva_submit_tasks {
|
||||
@@ -220,7 +227,7 @@ struct pva_submit_tasks {
|
||||
ALIGN(((NVPVA_TASK_MAX_PREFENCES * ACTION_LIST_FENCE_SIZE) + \
|
||||
((NVPVA_TASK_MAX_FENCEACTIONS * 2U) * ACTION_LIST_FENCE_SIZE) + \
|
||||
NVPVA_TASK_MAX_INPUT_STATUS * \
|
||||
ACTION_LIST_STATUS_OPERATION_SIZE + \
|
||||
ACTION_LIST_STATUS_OPERATION_SIZE + \
|
||||
ACTION_LIST_TERMINATION_SIZE), \
|
||||
256)
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ static void read_linear(struct pva *pva, struct pva_trace_log *trace, u32 toff)
|
||||
dt = bh->start_time;
|
||||
for (i = 0 ; i < bh->n_entries ; i++) {
|
||||
dt = dt + tp->delta_time;
|
||||
nvpva_dbg_info(pva, "delta_time: %llu\t %s\t major: %u\t"
|
||||
nvpva_dbg_fw_trace(pva, "delta_time: %llu\t %s\t major: %u\t"
|
||||
"minor: %u\t flags: %u\tsequence: %u\targ1:"
|
||||
" %u\targ2: %u\n",
|
||||
dt, name, tp->major, tp->minor, tp->flags,
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#ifndef _PVA_TRACE_H_
|
||||
#define _PVA_TRACE_H_
|
||||
|
||||
#define NVPVA_DEFAULT_LG_MASK 0x00000000
|
||||
|
||||
/*
|
||||
* Individual Trace point
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user