mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
pva: mirror from gitlab cv/pva-sys-sw
This commit has below list of fixes: - kmd: Remove unnecessary checks - kmd: Fix DMA validation bug - Add macros for verbosity controlled logging apis - Add back pva_dbg_printf - update copyrights for the modified files - cleanup and fixes in nvlog support - Implement debugfs node for setting fw debug log level - Implement set debug log level cmd - Add debug log verbosity level control support - Implement circular buffer for debug logs - kmd: fix error handling while creating context Gitlab commit: a6acc89929f69d3aedff442068ee8e5725f03d5d Change-Id: Ib0fc058324f19c076c3de7990b41c7415707ff28 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3318294 GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Tested-by: Nan Wang <nanwa@nvidia.com> Reviewed-by: Nan Wang <nanwa@nvidia.com> Reviewed-by: Mohnish Jain <mohnishj@nvidia.com>
This commit is contained in:
@@ -25,6 +25,7 @@ PVA_SYS_ABSDIR := $(srctree.nvidia-oot)/drivers/video/tegra/host/pva
|
||||
|
||||
###### Begin generated section ######
|
||||
pva_objs += \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_abort.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_block_allocator.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_cmdbuf.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_context.o \
|
||||
@@ -43,6 +44,7 @@ pva_objs += \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_queue.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_resource_table.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_sha256.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_shared_buffer.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_silicon_boot.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_silicon_elf_parser.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_silicon_executable.o \
|
||||
@@ -58,6 +60,7 @@ pva_objs += \
|
||||
$(PVA_SYS_DIR)/src/kmd/common/pva_kmd_vpu_ocd.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/linux/pva_kmd_linux_debugfs.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/linux/pva_kmd_linux_device.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/linux/pva_kmd_linux_device_api.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/linux/pva_kmd_linux_device_memory.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/linux/pva_kmd_linux_driver.o \
|
||||
$(PVA_SYS_DIR)/src/kmd/linux/pva_kmd_linux_ioctl.o \
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Utility Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_BIT_H
|
||||
#define PVA_BIT_H
|
||||
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Utility Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_CHECKPOINT_H
|
||||
#define PVA_CHECKPOINT_H
|
||||
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Utility Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_CONFIG_H
|
||||
#define PVA_CONFIG_H
|
||||
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Utility Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_ERRORS_H
|
||||
#define PVA_ERRORS_H
|
||||
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2022 NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Host Interface Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_FW_VERSION_H
|
||||
#define PVA_FW_VERSION_H
|
||||
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Utility Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_PACKED_H
|
||||
#define PVA_PACKED_H
|
||||
/**
|
||||
|
||||
@@ -1,25 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2023 NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Direct Memory Access Driver Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/**
|
||||
* @file pva-sys-dma.h
|
||||
*
|
||||
* @brief Types and constants related to PVA DMA setup and DMA
|
||||
* descriptors.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_SYS_DMA_H
|
||||
#define PVA_SYS_DMA_H
|
||||
|
||||
@@ -1,24 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2023 NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Task Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/**
|
||||
* @file pva-sys-params.h
|
||||
*
|
||||
* @brief Types and constants related to VPU application parameters.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_SYS_PARAMS_H
|
||||
#define PVA_SYS_PARAMS_H
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Utility Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_TYPES_H
|
||||
#define PVA_TYPES_H
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2021 NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Host Interface Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_VERSION_H
|
||||
#define PVA_VERSION_H
|
||||
|
||||
|
||||
@@ -1,165 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022-2023 NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: VPU Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/**
|
||||
* @file pva-vpu-syscall-interface.h
|
||||
*
|
||||
* @brief Syscall command specification
|
||||
*
|
||||
* VPU uses syscall commands to request services from R5. A syscall command is a
|
||||
* 32bit value that consists of a 8 bit syscall ID and 24 bit parameter. If more
|
||||
* information needs to be passed to R5, the parameter field will be a pointer
|
||||
* to a VMEM location.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_VPU_SYSCALL_INTERFACE_H
|
||||
#define PVA_VPU_SYSCALL_INTERFACE_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL
|
||||
*
|
||||
* @brief PVA VPU SYS call IDs for each type of
|
||||
* SYS call.
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! @cond DISABLE_DOCUMENTATION
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for vpu printf write.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_WRITE (1U)
|
||||
//! @endcond
|
||||
/**
|
||||
* @brief VPU Syscall id for Icache prefetch.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_ICACHE_PREFETCH (2U)
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for masking exceptions.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_MASK_EXCEPTION (3U)
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for unmasking exceptions.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_UNMASK_EXCEPTION (4U)
|
||||
//! @cond DISABLE_DOCUMENTATION
|
||||
/**
|
||||
* @brief VPU Syscall id for sampling VPU performance counters
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_PERFMON_SAMPLE (5U)
|
||||
//! @endcond
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_WRITE_PARAM_GROUP
|
||||
*
|
||||
* @brief Parameter specification for syscall write
|
||||
*/
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_COMMAND_FIELDS_GROUP
|
||||
*
|
||||
* @brief The command format to be used while issuing vpu syscall command from VPU kernel to R5.
|
||||
* The fields mentioned in this group is used for submitting the command
|
||||
* through the Signal_R5 interface from VPU kernel.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the vpu syscall ID field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_MSB (31U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the vpu syscall ID field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_LSB (24U)
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the vpu syscall parameter field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PARAM_MSB (23U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the vpu syscall parameter field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PARAM_LSB (0U)
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_ICACHE_PREFETCH_PARAM_FIELDS_GROUP
|
||||
*
|
||||
* @brief The parameter format to be used while issuing vpu syscall command from VPU kernel to R5 for syscall icache prefetch.
|
||||
* The fields mentioned in this group is used for submitting the icache prefetch command
|
||||
* through the Signal_R5 interface from VPU kernel.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the prefetch cache line count field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_CACHE_LINE_COUNT_MSB (23U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the prefetch cache line count field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_CACHE_LINE_COUNT_LSB (16U)
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the prefetch address field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_ADDR_MSB (15U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the prefetch address field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_ADDR_LSB (0U)
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_MASK_UNMASK_PARAM_FIELDS_GROUP
|
||||
*
|
||||
* @brief The parameter format to be used while issuing vpu syscall command from VPU kernel
|
||||
* to R5 for masking or unmasking FP NaN Exception.
|
||||
* The fields mentioned in this group is used for submitting the mask and unmask FP NaN eception command
|
||||
* through the Signal_R5 interface from VPU kernel.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Parameter specification for syscall mask/unmask exceptions
|
||||
*/
|
||||
#define PVA_FW_PE_MASK_FP_INV_NAN (1U << 2U)
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @breif Write syscall parameter will be a pointer to this struct
|
||||
* @{
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_FW_H
|
||||
#define PVA_FW_H
|
||||
@@ -198,12 +190,15 @@ static inline uint32_t pva_fw_queue_space(uint32_t head, uint32_t tail,
|
||||
#define PVA_FW_MSG_R5_READY_TIME_LO_IDX 3
|
||||
#define PVA_FW_MSG_R5_READY_TIME_HI_IDX 4
|
||||
|
||||
#define PVA_MAX_DEBUG_LOG_MSG_CHARACTERS 100
|
||||
/* Parameters for message FLUSH PRINT */
|
||||
struct pva_fw_print_buffer_header {
|
||||
#define PVA_FW_PRINT_BUFFER_OVERFLOWED (1 << 0)
|
||||
#define PVA_FW_PRINT_FAILURE (1 << 1)
|
||||
uint32_t flags;
|
||||
uint32_t head;
|
||||
uint32_t tail;
|
||||
uint32_t size;
|
||||
/* Followed by print content */
|
||||
};
|
||||
|
||||
@@ -276,16 +271,32 @@ enum pva_fw_timestamp_t {
|
||||
TIMESTAMP_TYPE_TSE = 0,
|
||||
TIMESTAMP_TYPE_CYCLE_COUNT = 1
|
||||
};
|
||||
|
||||
struct pva_fw_profiling_buffer_header {
|
||||
#define PVA_FW_PROFILING_BUFFER_OVERFLOWED (1 << 0)
|
||||
#define PVA_FW_PROFILING_FAILURE (1 << 1)
|
||||
uint32_t flags;
|
||||
uint32_t tail;
|
||||
/* Followed by print content */
|
||||
};
|
||||
/* End of PVA FW Event profiling definitions */
|
||||
|
||||
/*
|
||||
* The buffers shared between KMD and FW may contain a mixture of different
|
||||
* types of messages. Each message type may have a different packing and size.
|
||||
* However, to keep processing of messages simple and efficient, we will
|
||||
* enforce enqueuing and dequeuing of fixed size messages only. The size of
|
||||
* each element in the buffer would be equal to the size of the largest possible
|
||||
* message. KMD can further parse these messages to extract the exact size of the
|
||||
* message.
|
||||
*/
|
||||
#define PVA_KMD_FW_BUF_ELEMENT_SIZE (sizeof(uint32_t) + sizeof(uint64_t))
|
||||
|
||||
// TODO: remove element size and buffer size fields from this struct.
|
||||
// This struct is shared between KMD and FW. FW should not be able to change
|
||||
// buffer size properties as KMD might use this for validation of buffer accesses.
|
||||
// If FW somehow corrupts 'size', KMD might end up accessing out of bounds.
|
||||
struct pva_fw_shared_buffer_header {
|
||||
#define PVA_KMD_FW_BUF_FLAG_OVERFLOW (1 << 0)
|
||||
#define PVA_KMD_FW_BUF_FLAG_ERROR (1 << 1)
|
||||
uint32_t flags;
|
||||
uint32_t element_size;
|
||||
uint32_t head;
|
||||
uint32_t tail;
|
||||
};
|
||||
|
||||
struct pva_kmd_fw_tegrastats {
|
||||
uint64_t window_start_time;
|
||||
uint64_t window_end_time;
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property
|
||||
* and proprietary rights in and to this software, related documentation
|
||||
* and any modifications thereto. Any use, reproduction, disclosure or
|
||||
* distribution of this software and related documentation without an express
|
||||
* license agreement from NVIDIA Corporation is strictly prohibited.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Unit: Boot Unit
|
||||
* SWUD Document:
|
||||
* p4sw-swarm.nvidia.com/view/sw/embedded/docs/projects/active/DRIVE_6.0/QNX/PLC_Work_Products/Element_WPs/Autonomous_Middleware/PVA/04_Unit_Design/PVA_FW/SWE-PVAFW-006-SWUD.pdf
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_FW_ADDRESS_MAP_H
|
||||
#define PVA_FW_ADDRESS_MAP_H
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_FW_HYP_H
|
||||
#define PVA_FW_HYP_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_RESOURCE_H
|
||||
#define PVA_RESOURCE_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_API_H
|
||||
#define PVA_API_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_API_CMDBUF_H
|
||||
#define PVA_API_CMDBUF_H
|
||||
@@ -532,7 +524,12 @@ struct pva_cmd_retire_barrier_group {
|
||||
struct pva_cmd_header header;
|
||||
};
|
||||
|
||||
#define PVA_CMD_OPCODE_COUNT 37U
|
||||
struct pva_cmd_gr_check {
|
||||
#define PVA_CMD_OPCODE_GR_CHECK 37U
|
||||
struct pva_cmd_header header;
|
||||
};
|
||||
|
||||
#define PVA_CMD_OPCODE_COUNT 38U
|
||||
|
||||
struct pva_cmd_init_resource_table {
|
||||
#define PVA_CMD_OPCODE_INIT_RESOURCE_TABLE (0U | PVA_CMD_PRIV_OPCODE_FLAG)
|
||||
@@ -584,12 +581,8 @@ struct pva_cmd_deinit_queue {
|
||||
struct pva_cmd_enable_fw_profiling {
|
||||
#define PVA_CMD_OPCODE_ENABLE_FW_PROFILING (5U | PVA_CMD_PRIV_OPCODE_FLAG)
|
||||
struct pva_cmd_header header;
|
||||
uint8_t buffer_offset_hi;
|
||||
uint8_t timestamp_type;
|
||||
uint8_t pad[2];
|
||||
uint32_t buffer_resource_id;
|
||||
uint32_t buffer_size;
|
||||
uint32_t buffer_offset_lo;
|
||||
uint8_t pad[3];
|
||||
uint32_t filter;
|
||||
};
|
||||
|
||||
@@ -619,7 +612,30 @@ struct pva_cmd_resume_fw {
|
||||
struct pva_cmd_header header;
|
||||
};
|
||||
|
||||
#define PVA_CMD_PRIV_OPCODE_COUNT 10U
|
||||
struct pva_cmd_init_shared_dram_buffer {
|
||||
#define PVA_CMD_OPCODE_INIT_SHARED_DRAM_BUFFER (10U | PVA_CMD_PRIV_OPCODE_FLAG)
|
||||
struct pva_cmd_header header;
|
||||
uint8_t interface;
|
||||
uint8_t buffer_iova_hi;
|
||||
uint8_t pad[2];
|
||||
uint32_t buffer_iova_lo;
|
||||
uint32_t buffer_size;
|
||||
};
|
||||
|
||||
struct pva_cmd_deinit_shared_dram_buffer {
|
||||
#define PVA_CMD_OPCODE_DEINIT_SHARED_DRAM_BUFFER \
|
||||
(11U | PVA_CMD_PRIV_OPCODE_FLAG)
|
||||
struct pva_cmd_header header;
|
||||
uint8_t interface;
|
||||
uint8_t pad[3];
|
||||
};
|
||||
struct pva_cmd_set_debug_log_level {
|
||||
#define PVA_CMD_OPCODE_SET_DEBUG_LOG_LEVEL (12U | PVA_CMD_PRIV_OPCODE_FLAG)
|
||||
struct pva_cmd_header header;
|
||||
uint32_t log_level;
|
||||
};
|
||||
|
||||
#define PVA_CMD_PRIV_OPCODE_COUNT 13U
|
||||
|
||||
#define PVA_MAX_CMDBUF_CHUNK_LEN 1024
|
||||
#define PVA_MAX_CMDBUF_CHUNK_SIZE (sizeof(uint32_t) * PVA_MAX_CMDBUF_CHUNK_LEN)
|
||||
|
||||
@@ -1,15 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
||||
*
|
||||
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
* property and proprietary rights in and to this material, related
|
||||
* documentation and any modifications thereto. Any use, reproduction,
|
||||
* disclosure or distribution of this material and related documentation
|
||||
* without an express license agreement from NVIDIA CORPORATION or
|
||||
* its affiliates is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_API_CUDA_H
|
||||
#define PVA_API_CUDA_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_API_DMA_H
|
||||
#define PVA_API_DMA_H
|
||||
#include "pva_api_types.h"
|
||||
|
||||
32
drivers/video/tegra/host/pva/src/include/pva_api_hostmem.h
Normal file
32
drivers/video/tegra/host/pva/src/include/pva_api_hostmem.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_API_HOSTMEM_H
|
||||
#define PVA_API_HOSTMEM_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "pva_api_types.h"
|
||||
|
||||
/**
|
||||
* @brief Create a PVA pointer from a libc allocated page-aligned host CPU pointer.
|
||||
* Linux only API.
|
||||
*
|
||||
* The caller is responsible for freeing the PVA memory object.
|
||||
*
|
||||
* @param[in] host_ptr Pointer to the host memory which needs to be imported to PVA.
|
||||
* @param[in] size Size of the buffer to be imported.
|
||||
* @param[in] access_mode Access mode for the buffer, determining the PVA's permissions for interaction.
|
||||
* @param[out] out_obj A pointer to the PVA memory object representing the imported buffer.
|
||||
*/
|
||||
enum pva_error pva_hostptr_import(void *host_ptr, size_t size,
|
||||
uint32_t access_mode,
|
||||
struct pva_memory **out_mem);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // PVA_API_HOSTMEM_H
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_API_NVSCI_H
|
||||
#define PVA_API_NVSCI_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_API_TYPES_H
|
||||
#define PVA_API_TYPES_H
|
||||
#if !defined(__KERNEL__)
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_API_VPU_H
|
||||
#define PVA_API_VPU_H
|
||||
#include "pva_api_types.h"
|
||||
@@ -30,4 +22,190 @@ struct pva_vpu_instance_data {
|
||||
uint32_t l2ram_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL
|
||||
*
|
||||
* @brief PVA VPU SYS call IDs for each type of
|
||||
* SYS call.
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! @cond DISABLE_DOCUMENTATION
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for vpu printf write.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_WRITE (1U)
|
||||
//! @endcond
|
||||
/**
|
||||
* @brief VPU Syscall id for Icache prefetch.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_ICACHE_PREFETCH (2U)
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for masking exceptions.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_MASK_EXCEPTION (3U)
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for unmasking exceptions.
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_UNMASK_EXCEPTION (4U)
|
||||
//! @cond DISABLE_DOCUMENTATION
|
||||
/**
|
||||
* @brief VPU Syscall id for sampling VPU performance counters
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_PERFMON_SAMPLE (5U)
|
||||
//! @endcond
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup PVA_PPE_SYSCALL
|
||||
*
|
||||
* @brief PVA PPE SYS call IDs for each type of
|
||||
* SYS call.
|
||||
* @{
|
||||
*/
|
||||
|
||||
//! @cond DISABLE_DOCUMENTATION
|
||||
|
||||
/**
|
||||
* @brief PPE Syscall id for ppe printf write.
|
||||
*/
|
||||
#define PVA_FW_PPE_SYSCALL_ID_WRITE (1U)
|
||||
|
||||
/**
|
||||
* @brief PPE Syscall id for masking exceptions.
|
||||
*/
|
||||
#define PVA_FW_PPE_SYSCALL_ID_MASK_EXCEPTION (2U)
|
||||
|
||||
/**
|
||||
* @brief PPE Syscall id for unmasking exceptions.
|
||||
*/
|
||||
#define PVA_FW_PPE_SYSCALL_ID_UNMASK_EXCEPTION (3U)
|
||||
|
||||
/**
|
||||
* @brief VPU Syscall id for sampling VPU performance counters
|
||||
*/
|
||||
#define PVA_FW_PPE_SYSCALL_ID_PERFMON_SAMPLE (4U)
|
||||
/**
|
||||
* @brief PPE Syscall id for Icache prefetch.
|
||||
*/
|
||||
#define PVA_FW_PPE_SYSCALL_ID_ICACHE_PREFETCH (5U)
|
||||
|
||||
//! @endcond
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @brief Lookup table to convert PPE syscall IDs to VPU syscall IDs
|
||||
* Index is PPE syscall ID, value is corresponding VPU syscall ID
|
||||
*/
|
||||
#define PVA_FW_PPE_TO_VPU_SYSCALL_LUT \
|
||||
{ \
|
||||
0U, /* Index 0: Invalid */ \
|
||||
PVA_FW_PE_SYSCALL_ID_WRITE, /* Index 1: Write */ \
|
||||
PVA_FW_PE_SYSCALL_ID_MASK_EXCEPTION, /* Index 2: Mask Exception */ \
|
||||
PVA_FW_PE_SYSCALL_ID_UNMASK_EXCEPTION, /* Index 3: Unmask Exception */ \
|
||||
PVA_FW_PE_SYSCALL_ID_PERFMON_SAMPLE, /* Index 4: Perfmon Sample */ \
|
||||
PVA_FW_PE_SYSCALL_ID_ICACHE_PREFETCH /* Index 5: ICache Prefetch */ \
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Maximum valid PPE syscall ID
|
||||
*/
|
||||
#define PVA_FW_PPE_SYSCALL_ID_MAX PVA_FW_PPE_SYSCALL_ID_ICACHE_PREFETCH
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_WRITE_PARAM_GROUP
|
||||
*
|
||||
* @brief Parameter specification for syscall write
|
||||
*/
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_COMMAND_FIELDS_GROUP
|
||||
*
|
||||
* @brief The command format to be used while issuing vpu syscall command from VPU kernel to R5.
|
||||
* The fields mentioned in this group is used for submitting the command
|
||||
* through the Signal_R5 interface from VPU kernel.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the vpu syscall ID field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_MSB (31U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the vpu syscall ID field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_ID_LSB (24U)
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the vpu syscall parameter field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PARAM_MSB (23U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the vpu syscall parameter field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PARAM_LSB (0U)
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_ICACHE_PREFETCH_PARAM_FIELDS_GROUP
|
||||
*
|
||||
* @brief The parameter format to be used while issuing vpu syscall command from VPU kernel to R5 for syscall icache prefetch.
|
||||
* The fields mentioned in this group is used for submitting the icache prefetch command
|
||||
* through the Signal_R5 interface from VPU kernel.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the prefetch cache line count field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_CACHE_LINE_COUNT_MSB (23U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the prefetch cache line count field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_CACHE_LINE_COUNT_LSB (16U)
|
||||
|
||||
/**
|
||||
* @brief The most significant bit of the prefetch address field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_ADDR_MSB (15U)
|
||||
|
||||
/**
|
||||
* @brief The least significant bit of the prefetch address field in
|
||||
* the vpu syscall command interface
|
||||
*/
|
||||
#define PVA_FW_PE_SYSCALL_PREFETCH_ADDR_LSB (0U)
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup PVA_VPU_SYSCALL_MASK_UNMASK_PARAM_FIELDS_GROUP
|
||||
*
|
||||
* @brief The parameter format to be used while issuing vpu syscall command from VPU kernel
|
||||
* to R5 for masking or unmasking FP NaN Exception.
|
||||
* The fields mentioned in this group is used for submitting the mask and unmask FP NaN eception command
|
||||
* through the Signal_R5 interface from VPU kernel.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Parameter specification for syscall mask/unmask exceptions
|
||||
*/
|
||||
#define PVA_FW_PE_MASK_FP_INV_NAN (1U << 2U)
|
||||
/** @} */
|
||||
|
||||
#endif // PVA_API_VPU_H
|
||||
|
||||
18
drivers/video/tegra/host/pva/src/kmd/common/pva_kmd_abort.c
Normal file
18
drivers/video/tegra/host/pva/src/kmd/common/pva_kmd_abort.c
Normal file
@@ -0,0 +1,18 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_abort.h"
|
||||
#include "pva_kmd_shim_init.h"
|
||||
|
||||
void pva_kmd_abort(struct pva_kmd_device *pva)
|
||||
{
|
||||
//TODO: Report to FSI first about the SW error code.
|
||||
pva_kmd_log_err("Abort: FW Reset Assert");
|
||||
/* Put the FW in reset ASSERT so the user space
|
||||
cannot access the CCQ and thus force them to
|
||||
destroy the contexts. On destroy all the contexts.
|
||||
KMD poweroff the FW whereas on first new contexts creation,
|
||||
KMD will load the firmware image & poweron device */
|
||||
pva_kmd_fw_reset_assert(pva);
|
||||
pva->recovery = true;
|
||||
}
|
||||
10
drivers/video/tegra/host/pva/src/kmd/common/pva_kmd_abort.h
Normal file
10
drivers/video/tegra/host/pva/src/kmd/common/pva_kmd_abort.h
Normal file
@@ -0,0 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_ABORT_H
|
||||
#define PVA_KMD_ABORT_H
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
|
||||
void pva_kmd_abort(struct pva_kmd_device *pva);
|
||||
|
||||
#endif //PVA_KMD_ABORT_H
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_block_allocator.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_api.h"
|
||||
@@ -34,7 +26,7 @@ pva_kmd_block_allocator_init(struct pva_kmd_block_allocator *allocator,
|
||||
err = PVA_NOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_init(&allocator->allocator_lock);
|
||||
return PVA_SUCCESS;
|
||||
err_out:
|
||||
return err;
|
||||
@@ -43,6 +35,7 @@ err_out:
|
||||
void pva_kmd_block_allocator_deinit(struct pva_kmd_block_allocator *allocator)
|
||||
{
|
||||
pva_kmd_free(allocator->slot_in_use);
|
||||
pva_kmd_mutex_deinit(&allocator->allocator_lock);
|
||||
}
|
||||
|
||||
static inline void *get_block(struct pva_kmd_block_allocator *allocator,
|
||||
@@ -66,6 +59,7 @@ void *pva_kmd_alloc_block(struct pva_kmd_block_allocator *allocator,
|
||||
void *block = NULL;
|
||||
uint32_t slot = INVALID_ID;
|
||||
|
||||
pva_kmd_mutex_lock(&allocator->allocator_lock);
|
||||
if (allocator->free_slot_head != INVALID_ID) {
|
||||
slot = allocator->free_slot_head;
|
||||
allocator->free_slot_head =
|
||||
@@ -75,15 +69,17 @@ void *pva_kmd_alloc_block(struct pva_kmd_block_allocator *allocator,
|
||||
slot = allocator->next_free_slot;
|
||||
allocator->next_free_slot++;
|
||||
} else {
|
||||
goto err_out;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
allocator->slot_in_use[slot] = true;
|
||||
pva_kmd_mutex_unlock(&allocator->allocator_lock);
|
||||
|
||||
*out_id = slot + allocator->base_id;
|
||||
block = get_block(allocator, slot);
|
||||
return block;
|
||||
err_out:
|
||||
unlock:
|
||||
pva_kmd_mutex_unlock(&allocator->allocator_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -97,13 +93,13 @@ static bool is_slot_valid(struct pva_kmd_block_allocator *allocator,
|
||||
return allocator->slot_in_use[slot];
|
||||
}
|
||||
|
||||
void *pva_kmd_get_block(struct pva_kmd_block_allocator *allocator, uint32_t id)
|
||||
void *pva_kmd_get_block_unsafe(struct pva_kmd_block_allocator *allocator,
|
||||
uint32_t id)
|
||||
{
|
||||
uint32_t slot = id - allocator->base_id;
|
||||
if (!is_slot_valid(allocator, slot)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return get_block(allocator, slot);
|
||||
}
|
||||
|
||||
@@ -112,8 +108,11 @@ enum pva_error pva_kmd_free_block(struct pva_kmd_block_allocator *allocator,
|
||||
{
|
||||
uint32_t slot = id - allocator->base_id;
|
||||
uint32_t *next;
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
pva_kmd_mutex_lock(&allocator->allocator_lock);
|
||||
if (!is_slot_valid(allocator, slot)) {
|
||||
return PVA_INVAL;
|
||||
err = PVA_INVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
allocator->slot_in_use[slot] = false;
|
||||
@@ -121,5 +120,7 @@ enum pva_error pva_kmd_free_block(struct pva_kmd_block_allocator *allocator,
|
||||
*next = allocator->free_slot_head;
|
||||
allocator->free_slot_head = slot;
|
||||
|
||||
return PVA_SUCCESS;
|
||||
unlock:
|
||||
pva_kmd_mutex_unlock(&allocator->allocator_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_BLOCK_ALLOCATOR_H
|
||||
#define PVA_KMD_BLOCK_ALLOCATOR_H
|
||||
|
||||
#include "pva_api.h"
|
||||
#include "pva_kmd_mutex.h"
|
||||
|
||||
struct pva_kmd_block_allocator {
|
||||
uint32_t free_slot_head;
|
||||
@@ -21,6 +14,7 @@ struct pva_kmd_block_allocator {
|
||||
uint32_t block_size;
|
||||
void *blocks;
|
||||
bool *slot_in_use;
|
||||
pva_kmd_mutex_t allocator_lock;
|
||||
};
|
||||
|
||||
enum pva_error
|
||||
@@ -41,7 +35,16 @@ pva_kmd_zalloc_block(struct pva_kmd_block_allocator *allocator,
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *pva_kmd_get_block(struct pva_kmd_block_allocator *allocator, uint32_t id);
|
||||
/** This API is not thread safe and has to be explicitly locked during use of the obtained block.
|
||||
* This is to ensure that a parallel free operation does not result in dangling pointer to obtained block.
|
||||
* Correct usage:
|
||||
* lock(allocator)
|
||||
* block - pva_kmd_get_block_unsafe();
|
||||
* use block
|
||||
* unlock(allocator)
|
||||
*/
|
||||
void *pva_kmd_get_block_unsafe(struct pva_kmd_block_allocator *allocator,
|
||||
uint32_t id);
|
||||
enum pva_error pva_kmd_free_block(struct pva_kmd_block_allocator *allocator,
|
||||
uint32_t id);
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_cmdbuf.h"
|
||||
#include "pva_api_cmdbuf.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
@@ -66,11 +58,13 @@ enum pva_error pva_kmd_cmdbuf_chunk_pool_init(
|
||||
err = pva_kmd_block_allocator_init(&cmdbuf_chunk_pool->block_allocator,
|
||||
mem_base_va, 0, chunk_size,
|
||||
num_chunks);
|
||||
pva_kmd_mutex_init(&cmdbuf_chunk_pool->chunk_state_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
void pva_kmd_cmdbuf_chunk_pool_deinit(struct pva_kmd_cmdbuf_chunk_pool *pool)
|
||||
{
|
||||
pva_kmd_mutex_deinit(&pool->chunk_state_lock);
|
||||
pva_kmd_block_allocator_deinit(&pool->block_allocator);
|
||||
}
|
||||
|
||||
@@ -124,6 +118,7 @@ pva_kmd_alloc_cmdbuf_chunk(struct pva_kmd_cmdbuf_chunk_pool *pool,
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
void *chunk;
|
||||
|
||||
pva_kmd_mutex_lock(&pool->chunk_state_lock);
|
||||
chunk = pva_kmd_alloc_block(&pool->block_allocator, out_chunk_id);
|
||||
if (chunk == NULL) {
|
||||
if (recycle_chunks(pool)) {
|
||||
@@ -134,7 +129,7 @@ pva_kmd_alloc_cmdbuf_chunk(struct pva_kmd_cmdbuf_chunk_pool *pool,
|
||||
err = PVA_NOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
pva_kmd_mutex_unlock(&pool->chunk_state_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_CMDBUF_H
|
||||
#define PVA_KMD_CMDBUF_H
|
||||
#include "pva_fw.h"
|
||||
@@ -37,6 +29,7 @@ struct pva_kmd_cmdbuf_chunk_pool {
|
||||
uint64_t chunk_states_offset;
|
||||
void *mem_base_va;
|
||||
struct pva_kmd_block_allocator block_allocator;
|
||||
pva_kmd_mutex_t chunk_state_lock;
|
||||
};
|
||||
|
||||
static inline uint64_t
|
||||
@@ -213,16 +206,10 @@ pva_kmd_set_cmd_unregister_resource(struct pva_cmd_unregister_resource *cmd,
|
||||
|
||||
static inline void
|
||||
pva_kmd_set_cmd_enable_fw_profiling(struct pva_cmd_enable_fw_profiling *cmd,
|
||||
uint32_t buffer_resource_id,
|
||||
uint32_t buffer_size, uint64_t offset,
|
||||
uint32_t filter, uint8_t timestamp_type)
|
||||
{
|
||||
cmd->header.opcode = PVA_CMD_OPCODE_ENABLE_FW_PROFILING;
|
||||
cmd->header.len = sizeof(*cmd) / sizeof(uint32_t);
|
||||
cmd->buffer_resource_id = buffer_resource_id;
|
||||
cmd->buffer_offset_hi = iova_hi(offset);
|
||||
cmd->buffer_offset_lo = iova_lo(offset);
|
||||
cmd->buffer_size = buffer_size;
|
||||
cmd->filter = filter;
|
||||
cmd->timestamp_type = timestamp_type;
|
||||
}
|
||||
@@ -247,6 +234,15 @@ static inline void pva_kmd_set_cmd_get_tegra_stats(
|
||||
cmd->enabled = enabled;
|
||||
}
|
||||
|
||||
static inline void
|
||||
pva_kmd_set_cmd_set_debug_log_level(struct pva_cmd_set_debug_log_level *cmd,
|
||||
uint32_t log_level)
|
||||
{
|
||||
cmd->header.opcode = PVA_CMD_OPCODE_SET_DEBUG_LOG_LEVEL;
|
||||
cmd->header.len = sizeof(*cmd) / sizeof(uint32_t);
|
||||
cmd->log_level = log_level;
|
||||
}
|
||||
|
||||
static inline void pva_kmd_set_cmd_suspend_fw(struct pva_cmd_suspend_fw *cmd)
|
||||
{
|
||||
uint64_t len = (sizeof(*cmd) / sizeof(uint32_t));
|
||||
@@ -262,4 +258,24 @@ static inline void pva_kmd_set_cmd_resume_fw(struct pva_cmd_resume_fw *cmd)
|
||||
ASSERT(len <= 255u);
|
||||
cmd->header.len = (uint8_t)(len);
|
||||
}
|
||||
|
||||
static inline void pva_kmd_set_cmd_init_shared_dram_buffer(
|
||||
struct pva_cmd_init_shared_dram_buffer *cmd, uint8_t interface,
|
||||
uint32_t buffer_iova, uint32_t buffer_size)
|
||||
{
|
||||
cmd->header.opcode = PVA_CMD_OPCODE_INIT_SHARED_DRAM_BUFFER;
|
||||
cmd->header.len = sizeof(*cmd) / sizeof(uint32_t);
|
||||
cmd->buffer_iova_hi = iova_hi(buffer_iova);
|
||||
cmd->buffer_iova_lo = iova_lo(buffer_iova);
|
||||
cmd->buffer_size = buffer_size;
|
||||
cmd->interface = interface;
|
||||
}
|
||||
|
||||
static inline void pva_kmd_set_cmd_deinit_shared_dram_buffer(
|
||||
struct pva_cmd_deinit_shared_dram_buffer *cmd, uint8_t interface)
|
||||
{
|
||||
cmd->header.opcode = PVA_CMD_OPCODE_DEINIT_SHARED_DRAM_BUFFER;
|
||||
cmd->header.len = sizeof(*cmd) / sizeof(uint32_t);
|
||||
cmd->interface = interface;
|
||||
}
|
||||
#endif // PVA_KMD_CMDBUF_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_CONSTANTS_H
|
||||
#define PVA_KMD_CONSTANTS_H
|
||||
@@ -38,8 +30,8 @@
|
||||
|
||||
#define PVA_KMD_TIMEOUT(val) (val * PVA_KMD_TIMEOUT_FACTOR)
|
||||
|
||||
#define PVA_KMD_TIMEOUT_RESOURCE_SEMA_MS PVA_KMD_TIMEOUT(100) /*< 100 ms */
|
||||
#define PVA_KMD_WAIT_FW_TIMEOUT_US PVA_KMD_TIMEOUT(1000000) /*< 1 second*/
|
||||
#define PVA_KMD_TIMEOUT_RESOURCE_SEMA_MS PVA_KMD_TIMEOUT(400) /*< 100 ms */
|
||||
#define PVA_KMD_WAIT_FW_TIMEOUT_US PVA_KMD_TIMEOUT(100000) /*< 100 ms */
|
||||
#define PVA_KMD_WAIT_FW_POLL_INTERVAL_US PVA_KMD_TIMEOUT(100) /*< 100 us*/
|
||||
#define PVA_KMD_FW_BOOT_TIMEOUT_MS PVA_KMD_TIMEOUT(1000) /*< 1 seconds */
|
||||
|
||||
@@ -59,4 +51,6 @@
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#define PVA_KMD_MAX_NUM_USER_DMA_CONFIG 1024
|
||||
|
||||
#endif // PVA_KMD_CONSTANTS_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_constants.h"
|
||||
@@ -17,15 +9,17 @@
|
||||
#include "pva_kmd_queue.h"
|
||||
#include "pva_kmd_context.h"
|
||||
#include "pva_kmd_constants.h"
|
||||
#include "pva_kmd_msg.h"
|
||||
|
||||
struct pva_kmd_context *pva_kmd_context_create(struct pva_kmd_device *pva)
|
||||
{
|
||||
uint32_t alloc_id;
|
||||
enum pva_error err;
|
||||
struct pva_kmd_context *ctx;
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
struct pva_kmd_context *ctx = NULL;
|
||||
|
||||
ctx = pva_kmd_zalloc_block(&pva->context_allocator, &alloc_id);
|
||||
if (ctx == NULL) {
|
||||
err = PVA_NOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
ctx->ccq_id = alloc_id;
|
||||
@@ -35,10 +29,11 @@ struct pva_kmd_context *pva_kmd_context_create(struct pva_kmd_device *pva)
|
||||
ctx->max_n_queues = PVA_MAX_NUM_QUEUES_PER_CONTEXT;
|
||||
ctx->ccq0_lock_ptr = &pva->ccq0_lock;
|
||||
pva_kmd_mutex_init(&ctx->ccq_lock);
|
||||
pva_kmd_mutex_init(&ctx->resource_table_lock);
|
||||
pva_kmd_mutex_init(&ctx->ocb_lock);
|
||||
ctx->queue_allocator_mem = pva_kmd_zalloc(sizeof(struct pva_kmd_queue) *
|
||||
ctx->max_n_queues);
|
||||
if (ctx->queue_allocator_mem == NULL) {
|
||||
err = PVA_NOMEM;
|
||||
goto free_ctx;
|
||||
}
|
||||
|
||||
@@ -49,13 +44,24 @@ struct pva_kmd_context *pva_kmd_context_create(struct pva_kmd_device *pva)
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto free_queue_mem;
|
||||
}
|
||||
/* Power on PVA if not already */
|
||||
err = pva_kmd_device_busy(ctx->pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto deinit_queue_allocator;
|
||||
}
|
||||
|
||||
return ctx;
|
||||
|
||||
deinit_queue_allocator:
|
||||
pva_kmd_block_allocator_deinit(&ctx->queue_allocator);
|
||||
free_queue_mem:
|
||||
pva_kmd_free(ctx->queue_allocator_mem);
|
||||
free_ctx:
|
||||
pva_kmd_free(ctx);
|
||||
pva_kmd_mutex_deinit(&ctx->ccq_lock);
|
||||
pva_kmd_mutex_deinit(&ctx->ocb_lock);
|
||||
pva_kmd_free_block(&pva->context_allocator, alloc_id);
|
||||
err_out:
|
||||
pva_kmd_log_err("Failed to create PVA context");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -186,12 +192,6 @@ enum pva_error pva_kmd_context_init(struct pva_kmd_context *ctx,
|
||||
struct pva_syncpt_rw_info *syncpts;
|
||||
uint64_t size;
|
||||
|
||||
/* Power on PVA if not already */
|
||||
err = pva_kmd_device_busy(ctx->pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Allocate RW syncpoints for this context */
|
||||
syncpts = (struct pva_syncpt_rw_info *)pva_kmd_alloc_block(
|
||||
&ctx->pva->syncpt_allocator, &ctx->syncpt_block_index);
|
||||
@@ -200,7 +200,7 @@ enum pva_error pva_kmd_context_init(struct pva_kmd_context *ctx,
|
||||
/* Init resource table for this context */
|
||||
err = pva_kmd_resource_table_init(&ctx->ctx_resource_table, ctx->pva,
|
||||
ctx->smmu_ctx_id, res_table_capacity,
|
||||
res_table_capacity);
|
||||
PVA_KMD_MAX_NUM_USER_DMA_CONFIG);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto drop_device;
|
||||
}
|
||||
@@ -238,13 +238,12 @@ enum pva_error pva_kmd_context_init(struct pva_kmd_context *ctx,
|
||||
}
|
||||
|
||||
/* Add submit memory to resource table */
|
||||
pva_kmd_mutex_lock(&ctx->pva->resource_table_lock);
|
||||
err = pva_kmd_add_dram_buffer_resource(&ctx->pva->dev_resource_table,
|
||||
ctx->submit_memory,
|
||||
&ctx->submit_memory_resource_id);
|
||||
pva_kmd_mutex_unlock(&ctx->pva->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto free_submit_memory;
|
||||
pva_kmd_device_memory_free(ctx->submit_memory);
|
||||
goto queue_deinit;
|
||||
}
|
||||
|
||||
/* Init chunk pool */
|
||||
@@ -277,6 +276,15 @@ enum pva_error pva_kmd_context_init(struct pva_kmd_context *ctx,
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto deinit_submitter;
|
||||
}
|
||||
|
||||
err = pva_kmd_shared_buffer_init(
|
||||
ctx->pva, ctx->ccq_id, PVA_KMD_FW_BUF_ELEMENT_SIZE,
|
||||
res_table_capacity, pva_kmd_handle_msg_resource_unreg,
|
||||
pva_kmd_resource_table_lock, pva_kmd_resource_table_unlock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto deinit_submitter;
|
||||
}
|
||||
|
||||
ctx->inited = true;
|
||||
|
||||
return PVA_SUCCESS;
|
||||
@@ -288,8 +296,6 @@ deinit_submitter:
|
||||
free_dram_buffer_resource:
|
||||
pva_kmd_drop_resource(&ctx->pva->dev_resource_table,
|
||||
ctx->submit_memory_resource_id);
|
||||
free_submit_memory:
|
||||
pva_kmd_device_memory_free(ctx->submit_memory);
|
||||
queue_deinit:
|
||||
pva_kmd_queue_deinit(&ctx->ctx_queue);
|
||||
pva_kmd_device_memory_free(ctx->ctx_queue_mem);
|
||||
@@ -297,7 +303,6 @@ deinit_table:
|
||||
pva_kmd_resource_table_deinit(&ctx->ctx_resource_table);
|
||||
drop_device:
|
||||
pva_kmd_device_idle(ctx->pva);
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -306,17 +311,20 @@ void pva_kmd_context_deinit(struct pva_kmd_context *ctx)
|
||||
enum pva_error err;
|
||||
|
||||
if (ctx->inited) {
|
||||
err = notify_fw_context_deinit(ctx);
|
||||
if (!ctx->pva->recovery) {
|
||||
err = notify_fw_context_deinit(ctx);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
}
|
||||
|
||||
err = pva_kmd_shared_buffer_deinit(ctx->pva, ctx->ccq_id);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
pva_kmd_verify_all_resources_free(&ctx->ctx_resource_table);
|
||||
|
||||
pva_kmd_device_idle(ctx->pva);
|
||||
pva_kmd_mutex_deinit(&ctx->submit_lock);
|
||||
pva_kmd_mutex_deinit(&ctx->chunk_pool_lock);
|
||||
pva_kmd_cmdbuf_chunk_pool_deinit(&ctx->chunk_pool);
|
||||
pva_kmd_mutex_lock(&ctx->pva->resource_table_lock);
|
||||
pva_kmd_drop_resource(&ctx->pva->dev_resource_table,
|
||||
ctx->submit_memory_resource_id);
|
||||
pva_kmd_mutex_unlock(&ctx->pva->resource_table_lock);
|
||||
pva_kmd_queue_deinit(&ctx->ctx_queue);
|
||||
pva_kmd_device_memory_free(ctx->ctx_queue_mem);
|
||||
pva_kmd_resource_table_deinit(&ctx->ctx_resource_table);
|
||||
@@ -330,14 +338,21 @@ static void pva_kmd_destroy_all_queues(struct pva_kmd_context *ctx)
|
||||
{
|
||||
enum pva_error err;
|
||||
struct pva_kmd_queue_destroy_in_args args;
|
||||
struct pva_kmd_queue *queue;
|
||||
|
||||
for (uint32_t queue_id = 0u; queue_id < ctx->max_n_queues; queue_id++) {
|
||||
struct pva_kmd_queue *queue =
|
||||
pva_kmd_get_block(&ctx->queue_allocator, queue_id);
|
||||
pva_kmd_mutex_lock(&ctx->queue_allocator.allocator_lock);
|
||||
queue = pva_kmd_get_block_unsafe(&ctx->queue_allocator,
|
||||
queue_id);
|
||||
if (queue != NULL) {
|
||||
pva_kmd_mutex_unlock(
|
||||
&ctx->queue_allocator.allocator_lock);
|
||||
args.queue_id = queue_id;
|
||||
err = pva_kmd_queue_destroy(ctx, &args);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
} else {
|
||||
pva_kmd_mutex_unlock(
|
||||
&ctx->queue_allocator.allocator_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -351,13 +366,13 @@ void pva_kmd_context_destroy(struct pva_kmd_context *ctx)
|
||||
pva_kmd_block_allocator_deinit(&ctx->queue_allocator);
|
||||
pva_kmd_free(ctx->queue_allocator_mem);
|
||||
pva_kmd_mutex_deinit(&ctx->ccq_lock);
|
||||
pva_kmd_mutex_deinit(&ctx->resource_table_lock);
|
||||
err = pva_kmd_free_block(&ctx->pva->context_allocator, ctx->ccq_id);
|
||||
pva_kmd_mutex_deinit(&ctx->ocb_lock);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
}
|
||||
|
||||
struct pva_kmd_context *pva_kmd_get_context(struct pva_kmd_device *pva,
|
||||
uint8_t alloc_id)
|
||||
{
|
||||
return pva_kmd_get_block(&pva->context_allocator, alloc_id);
|
||||
return pva_kmd_get_block_unsafe(&pva->context_allocator, alloc_id);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_CONTEXT_H
|
||||
#define PVA_KMD_CONTEXT_H
|
||||
@@ -36,7 +28,6 @@ struct pva_kmd_context {
|
||||
|
||||
bool inited;
|
||||
|
||||
pva_kmd_mutex_t resource_table_lock;
|
||||
struct pva_kmd_resource_table ctx_resource_table;
|
||||
|
||||
struct pva_kmd_submitter submitter;
|
||||
@@ -75,6 +66,7 @@ struct pva_kmd_context {
|
||||
/** Index of block of syncpoints allocated for this context */
|
||||
uint32_t syncpt_block_index;
|
||||
uint32_t syncpt_ids[PVA_NUM_RW_SYNCPTS_PER_CONTEXT];
|
||||
pva_kmd_mutex_t ocb_lock;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_debugfs.h"
|
||||
#include "pva_kmd_fw_profiler.h"
|
||||
@@ -15,6 +8,7 @@
|
||||
#include "pva_kmd_vpu_ocd.h"
|
||||
#include "pva_kmd_tegra_stats.h"
|
||||
#include "pva_kmd_vpu_app_auth.h"
|
||||
#include "pva_kmd_shared_buffer.h"
|
||||
|
||||
void pva_kmd_debugfs_create_nodes(struct pva_kmd_device *pva)
|
||||
{
|
||||
@@ -47,11 +41,19 @@ void pva_kmd_debugfs_create_nodes(struct pva_kmd_device *pva)
|
||||
&pva->debugfs_context.vpu_ocd_fops[i]);
|
||||
}
|
||||
|
||||
pva->debugfs_context.allowlist_fops.read = &get_vpu_allowlist_enabled;
|
||||
pva->debugfs_context.allowlist_fops.write = &update_vpu_allowlist;
|
||||
pva->debugfs_context.allowlist_fops.pdev = pva;
|
||||
pva_kmd_debugfs_create_file(pva, "vpu_app_authentication",
|
||||
&pva->debugfs_context.allowlist_fops);
|
||||
|
||||
pva->debugfs_context.fw_debug_log_level_fops.write =
|
||||
&update_fw_debug_log_level;
|
||||
pva->debugfs_context.fw_debug_log_level_fops.pdev = pva;
|
||||
pva_kmd_debugfs_create_file(
|
||||
pva, "fw_debug_log_level",
|
||||
&pva->debugfs_context.fw_debug_log_level_fops);
|
||||
|
||||
pva_kmd_device_init_profiler(pva);
|
||||
pva_kmd_device_init_tegra_stats(pva);
|
||||
}
|
||||
@@ -63,8 +65,26 @@ void pva_kmd_debugfs_destroy_nodes(struct pva_kmd_device *pva)
|
||||
pva_kmd_debugfs_remove_nodes(pva);
|
||||
}
|
||||
|
||||
static uint64_t read_from_buffer_to_user(void *to, uint64_t count,
|
||||
uint64_t offset, const void *from,
|
||||
uint64_t available)
|
||||
{
|
||||
if (offset >= available || !count) {
|
||||
return 0;
|
||||
}
|
||||
if (count > available - offset) {
|
||||
count = available - offset;
|
||||
}
|
||||
if (pva_kmd_copy_data_to_user(to, (uint8_t *)from + offset, count)) {
|
||||
pva_kmd_log_err("failed to copy read buffer to user");
|
||||
return 0;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int64_t print_vpu_stats(struct pva_kmd_tegrastats *kmd_tegra_stats,
|
||||
uint8_t *out_buffer, uint64_t len)
|
||||
uint8_t *out_buffer, uint64_t offset,
|
||||
uint64_t len)
|
||||
{
|
||||
char kernel_buffer[256];
|
||||
int64_t formatted_len;
|
||||
@@ -90,19 +110,13 @@ static int64_t print_vpu_stats(struct pva_kmd_tegrastats *kmd_tegra_stats,
|
||||
}
|
||||
|
||||
// Copy the formatted string from kernel buffer to user buffer
|
||||
if (pva_kmd_copy_data_to_user(out_buffer, kernel_buffer,
|
||||
formatted_len)) {
|
||||
pva_kmd_log_err("failed to copy read buffer to user");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return formatted_len;
|
||||
return read_from_buffer_to_user(out_buffer, len, offset, kernel_buffer,
|
||||
formatted_len);
|
||||
}
|
||||
|
||||
int64_t update_vpu_stats(struct pva_kmd_device *dev, void *file_data,
|
||||
uint8_t *out_buffer, uint64_t offset, uint64_t size)
|
||||
{
|
||||
uint64_t size_read = 0U;
|
||||
struct pva_kmd_tegrastats kmd_tegra_stats;
|
||||
|
||||
kmd_tegra_stats.window_start_time = 0;
|
||||
@@ -113,9 +127,23 @@ int64_t update_vpu_stats(struct pva_kmd_device *dev, void *file_data,
|
||||
pva_kmd_log_err("Reading VPU stats");
|
||||
pva_kmd_notify_fw_get_tegra_stats(dev, &kmd_tegra_stats);
|
||||
|
||||
size_read = print_vpu_stats(&kmd_tegra_stats, out_buffer, size);
|
||||
return print_vpu_stats(&kmd_tegra_stats, out_buffer, offset, size);
|
||||
}
|
||||
|
||||
return size_read;
|
||||
int64_t get_vpu_allowlist_enabled(struct pva_kmd_device *pva, void *file_data,
|
||||
uint8_t *out_buffer, uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
// 1 byte for '0' or '1' and another 1 byte for the Null character
|
||||
char out_str[2];
|
||||
pva_kmd_mutex_lock(&(pva->pva_auth->allow_list_lock));
|
||||
snprintf(out_str, sizeof(out_str), "%d",
|
||||
(int)pva->pva_auth->pva_auth_enable);
|
||||
pva_kmd_mutex_unlock(&(pva->pva_auth->allow_list_lock));
|
||||
|
||||
// Copy the formatted string from kernel buffer to user buffer
|
||||
return read_from_buffer_to_user(out_buffer, size, offset, out_str,
|
||||
sizeof(out_str));
|
||||
}
|
||||
|
||||
int64_t update_vpu_allowlist(struct pva_kmd_device *pva, void *file_data,
|
||||
@@ -123,20 +151,70 @@ int64_t update_vpu_allowlist(struct pva_kmd_device *pva, void *file_data,
|
||||
uint64_t size)
|
||||
{
|
||||
char strbuf[2]; // 1 byte for '0' or '1' and another 1 byte for the Null character
|
||||
uint32_t base = 10;
|
||||
uint32_t pva_auth_enable;
|
||||
unsigned long retval;
|
||||
|
||||
if (size == 0) {
|
||||
pva_kmd_log_err("Write failed, no data provided");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Copy a single character, ignore the rest
|
||||
retval = pva_kmd_copy_data_from_user(strbuf, in_buffer, 1);
|
||||
if (retval != 0u) {
|
||||
pva_kmd_log_err("Failed to copy write buffer from user");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Explicitly null terminate the string for conversion
|
||||
strbuf[1] = '\0';
|
||||
pva_auth_enable = pva_kmd_strtol(strbuf, base);
|
||||
|
||||
pva_kmd_mutex_lock(&(pva->pva_auth->allow_list_lock));
|
||||
pva->pva_auth->pva_auth_enable = (pva_auth_enable == 1) ? true : false;
|
||||
|
||||
if (pva->pva_auth->pva_auth_enable)
|
||||
pva->pva_auth->pva_auth_allow_list_parsed = false;
|
||||
|
||||
pva_kmd_mutex_unlock(&(pva->pva_auth->allow_list_lock));
|
||||
return size;
|
||||
}
|
||||
|
||||
int64_t update_fw_debug_log_level(struct pva_kmd_device *pva, void *file_data,
|
||||
const uint8_t *in_buffer, uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
uint32_t log_level;
|
||||
unsigned long retval;
|
||||
char strbuf[11]; // 10 bytes for the highest 32bit value and another 1 byte for the Null character
|
||||
uint32_t base = 10;
|
||||
|
||||
retval = pva_kmd_copy_data_from_user(strbuf, in_buffer, sizeof(strbuf));
|
||||
if (retval != 0u) {
|
||||
pva_kmd_log_err("Failed to copy write buffer from user");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pva_auth_enable = pva_kmd_strtol(strbuf, 16);
|
||||
log_level = pva_kmd_strtol(strbuf, base);
|
||||
|
||||
pva->pva_auth->pva_auth_enable = (pva_auth_enable == 1) ? true : false;
|
||||
pva_kmd_print_str_u64("Setting debug log level to", log_level);
|
||||
pva->fw_debug_log_level = log_level;
|
||||
|
||||
if (pva->pva_auth->pva_auth_enable)
|
||||
pva->pva_auth->pva_auth_allow_list_parsed = false;
|
||||
/* If device is on, busy the device and set the debug log level */
|
||||
if (pva_kmd_device_maybe_on(pva) == true) {
|
||||
enum pva_error err;
|
||||
err = pva_kmd_device_busy(pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err(
|
||||
"pva_kmd_device_busy failed when submitting set debug log level cmd");
|
||||
goto err_end;
|
||||
}
|
||||
|
||||
return 2;
|
||||
pva_kmd_notify_fw_set_debug_log_level(pva, log_level);
|
||||
|
||||
pva_kmd_device_idle(pva);
|
||||
}
|
||||
err_end:
|
||||
return strlen(strbuf);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_DEBUGFS_H
|
||||
#define PVA_KMD_DEBUGFS_H
|
||||
#include "pva_kmd.h"
|
||||
@@ -44,6 +37,7 @@ struct pva_kmd_debugfs_context {
|
||||
void *data_hwpm;
|
||||
struct pva_kmd_file_ops vpu_ocd_fops[NUM_VPU_BLOCKS];
|
||||
struct pva_kmd_fw_profiling_config g_fw_profiling_config;
|
||||
struct pva_kmd_file_ops fw_debug_log_level_fops;
|
||||
};
|
||||
|
||||
void pva_kmd_debugfs_create_nodes(struct pva_kmd_device *dev);
|
||||
@@ -53,4 +47,11 @@ int64_t update_vpu_stats(struct pva_kmd_device *dev, void *file_data,
|
||||
int64_t update_vpu_allowlist(struct pva_kmd_device *pva, void *file_data,
|
||||
const uint8_t *in_buffer, uint64_t offset,
|
||||
uint64_t size);
|
||||
int64_t get_vpu_allowlist_enabled(struct pva_kmd_device *pva, void *file_data,
|
||||
uint8_t *out_buffer, uint64_t offset,
|
||||
uint64_t size);
|
||||
int64_t update_fw_debug_log_level(struct pva_kmd_device *dev, void *file_data,
|
||||
const uint8_t *in_buffer, uint64_t offset,
|
||||
uint64_t size);
|
||||
|
||||
#endif //PVA_KMD_DEBUGFS_H
|
||||
|
||||
@@ -1,15 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_api_types.h"
|
||||
#include "pva_kmd_fw_debug.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_api_cmdbuf.h"
|
||||
#include "pva_api.h"
|
||||
@@ -25,12 +17,15 @@
|
||||
#include "pva_kmd_regs.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
#include "pva_kmd_fw_profiler.h"
|
||||
#include "pva_kmd_fw_debug.h"
|
||||
#include "pva_kmd_vpu_app_auth.h"
|
||||
#include "pva_utils.h"
|
||||
#include "pva_kmd_debugfs.h"
|
||||
#include "pva_kmd_tegra_stats.h"
|
||||
#include "pva_kmd_shim_silicon.h"
|
||||
#include "pva_kmd_shared_buffer.h"
|
||||
|
||||
#include "pva_kmd_abort.h"
|
||||
/**
|
||||
* @brief Send address and size of the resource table to FW through CCQ.
|
||||
*
|
||||
@@ -192,7 +187,6 @@ struct pva_kmd_device *pva_kmd_device_create(enum pva_chip_id chip_id,
|
||||
pva->max_n_contexts = PVA_MAX_NUM_USER_CONTEXTS;
|
||||
pva_kmd_mutex_init(&pva->powercycle_lock);
|
||||
pva_kmd_mutex_init(&pva->ccq0_lock);
|
||||
pva_kmd_mutex_init(&pva->resource_table_lock);
|
||||
pva_kmd_sema_init(&pva->fw_boot_sema, 0);
|
||||
size = safe_mulu32((uint32_t)sizeof(struct pva_kmd_context),
|
||||
pva->max_n_contexts);
|
||||
@@ -229,6 +223,7 @@ struct pva_kmd_device *pva_kmd_device_create(enum pva_chip_id chip_id,
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
|
||||
pva->is_suspended = false;
|
||||
pva->fw_debug_log_level = 0U;
|
||||
|
||||
return pva;
|
||||
}
|
||||
@@ -260,9 +255,8 @@ void pva_kmd_device_destroy(struct pva_kmd_device *pva)
|
||||
pva_kmd_block_allocator_deinit(&pva->context_allocator);
|
||||
pva_kmd_free(pva->context_mem);
|
||||
pva_kmd_mutex_deinit(&pva->ccq0_lock);
|
||||
pva_kmd_mutex_deinit(&pva->resource_table_lock);
|
||||
pva_kmd_mutex_deinit(&pva->powercycle_lock);
|
||||
pva_kmd_free(pva->pva_auth);
|
||||
pva_kmd_deinit_vpu_app_auth(pva);
|
||||
pva_kmd_free(pva);
|
||||
}
|
||||
|
||||
@@ -290,7 +284,22 @@ enum pva_error pva_kmd_device_busy(struct pva_kmd_device *pva)
|
||||
pva_kmd_send_resource_table_info_by_ccq(
|
||||
pva, &pva->dev_resource_table);
|
||||
pva_kmd_send_queue_info_by_ccq(pva, &pva->dev_queue);
|
||||
|
||||
// TODO: need better error handling here
|
||||
err = pva_kmd_shared_buffer_init(
|
||||
pva, PVA_PRIV_CCQ_ID, PVA_KMD_FW_BUF_ELEMENT_SIZE,
|
||||
PVA_KMD_FW_PROFILING_BUF_NUM_ELEMENTS,
|
||||
pva_kmd_process_fw_profiling_message, NULL, NULL);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err_u64(
|
||||
"pva kmd buffer initialization failed for interface ",
|
||||
PVA_PRIV_CCQ_ID);
|
||||
goto unlock;
|
||||
}
|
||||
pva_kmd_notify_fw_enable_profiling(pva);
|
||||
/* Set FW debug log level */
|
||||
pva_kmd_notify_fw_set_debug_log_level(pva,
|
||||
pva->fw_debug_log_level);
|
||||
}
|
||||
pva->refcount = safe_addu32(pva->refcount, 1U);
|
||||
|
||||
@@ -301,15 +310,22 @@ unlock:
|
||||
|
||||
void pva_kmd_device_idle(struct pva_kmd_device *pva)
|
||||
{
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
|
||||
pva_kmd_mutex_lock(&pva->powercycle_lock);
|
||||
ASSERT(pva->refcount > 0);
|
||||
pva->refcount--;
|
||||
if (pva->refcount == 0) {
|
||||
/* Disable FW profiling */
|
||||
/* TODO: once debugfs is up, move these calls */
|
||||
// pva_kmd_notify_fw_disable_profiling(pva);
|
||||
// pva_kmd_drain_fw_profiling_buffer(pva,
|
||||
// &pva->fw_profiling_buffer);
|
||||
if (!pva->recovery) {
|
||||
/* Disable FW profiling */
|
||||
/* TODO: once debugfs is up, move these calls */
|
||||
pva_kmd_notify_fw_disable_profiling(pva);
|
||||
}
|
||||
// TOOD: need better error handling here
|
||||
err = pva_kmd_shared_buffer_deinit(pva, PVA_PRIV_CCQ_ID);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("pva_kmd_shared_buffer_deinit failed");
|
||||
}
|
||||
pva_kmd_deinit_fw(pva);
|
||||
pva_kmd_power_off(pva);
|
||||
}
|
||||
@@ -326,6 +342,7 @@ enum pva_error pva_kmd_ccq_push_with_timeout(struct pva_kmd_device *pva,
|
||||
if (timeout_us == 0) {
|
||||
pva_kmd_log_err(
|
||||
"pva_kmd_ccq_push_with_timeout Timed out");
|
||||
pva_kmd_abort(pva);
|
||||
return PVA_TIMEDOUT;
|
||||
}
|
||||
pva_kmd_sleep_us(sleep_interval_us);
|
||||
@@ -336,3 +353,15 @@ enum pva_error pva_kmd_ccq_push_with_timeout(struct pva_kmd_device *pva,
|
||||
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
bool pva_kmd_device_maybe_on(struct pva_kmd_device *pva)
|
||||
{
|
||||
bool device_on = false;
|
||||
|
||||
pva_kmd_mutex_lock(&pva->powercycle_lock);
|
||||
if (pva->refcount > 0) {
|
||||
device_on = true;
|
||||
}
|
||||
pva_kmd_mutex_unlock(&pva->powercycle_lock);
|
||||
return device_on;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_DEVICE_H
|
||||
#define PVA_KMD_DEVICE_H
|
||||
@@ -25,6 +17,7 @@
|
||||
#include "pva_kmd_shim_init.h"
|
||||
#include "pva_kmd_shim_ccq.h"
|
||||
#include "pva_kmd_fw_profiler.h"
|
||||
#include "pva_kmd_fw_debug.h"
|
||||
#include "pva_kmd_constants.h"
|
||||
#include "pva_kmd_debugfs.h"
|
||||
|
||||
@@ -76,7 +69,6 @@ struct pva_kmd_device {
|
||||
void *context_mem;
|
||||
struct pva_kmd_block_allocator context_allocator;
|
||||
|
||||
pva_kmd_mutex_t resource_table_lock;
|
||||
struct pva_kmd_resource_table dev_resource_table;
|
||||
|
||||
struct pva_kmd_submitter submitter;
|
||||
@@ -100,12 +92,24 @@ struct pva_kmd_device {
|
||||
|
||||
/** ISR post this semaphore when FW completes boot */
|
||||
pva_kmd_sema_t fw_boot_sema;
|
||||
bool recovery;
|
||||
|
||||
struct pva_kmd_device_memory *fw_debug_mem;
|
||||
struct pva_kmd_device_memory *fw_bin_mem;
|
||||
struct pva_kmd_device_memory *fw_profiling_buffer_memory;
|
||||
uint32_t fw_profiling_buffer_resource_id;
|
||||
struct pva_kmd_fw_profiling_buffer fw_profiling_buffer;
|
||||
|
||||
// 'kmd_fw_buffers' holds DRAM buffers shared between KMD and FW
|
||||
// - Today, we have 1 buffer per CCQ. This may need to be extended in future
|
||||
// to support buffered communication through mailbox
|
||||
// - Buffers will be used for the following purposes
|
||||
// - CCQ 0: Communications common to a VM
|
||||
// -- example, FW profiling data and NSIGHT data
|
||||
// - CCQ 1-8: Communications specific to each context
|
||||
// -- example, resource unregistration requests
|
||||
// In the future, we may want to extend this to support communications between
|
||||
// FW and Hypervisor
|
||||
struct pva_kmd_shared_buffer kmd_fw_buffers[PVA_MAX_NUM_CCQ];
|
||||
|
||||
uint32_t fw_debug_log_level;
|
||||
struct pva_kmd_fw_print_buffer fw_print_buffer;
|
||||
|
||||
struct pva_kmd_device_memory *tegra_stats_memory;
|
||||
@@ -155,4 +159,6 @@ void pva_kmd_send_resource_table_info_by_ccq(
|
||||
|
||||
void pva_kmd_send_queue_info_by_ccq(struct pva_kmd_device *pva,
|
||||
struct pva_kmd_queue *queue);
|
||||
|
||||
bool pva_kmd_device_maybe_on(struct pva_kmd_device *pva);
|
||||
#endif // PVA_KMD_DEVICE_H
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_DEVICE_MEMORY_POOL_H
|
||||
#define PVA_KMD_DEVICE_MEMORY_POOL_H
|
||||
#include "pva_api_types.h"
|
||||
|
||||
struct pva_kmd_device;
|
||||
|
||||
struct pva_kmd_devmem_view {
|
||||
uint64_t iova;
|
||||
void *va;
|
||||
};
|
||||
|
||||
struct pva_kmd_devmem_pool {
|
||||
};
|
||||
|
||||
enum pva_error pva_kmd_devmem_pool_init(struct pva_kmd_device *dev,
|
||||
uint32_t smmu_context_id,
|
||||
uint32_t block_size,
|
||||
uint32_t alloc_step,
|
||||
struct pva_kmd_devmem_pool *pool);
|
||||
|
||||
enum pva_error pva_kmd_devmem_pool_acquire(struct pva_kmd_devmem_pool *pool,
|
||||
struct pva_kmd_devmem_view *view);
|
||||
|
||||
enum pva_error pva_kmd_devmem_pool_release(struct pva_kmd_devmem_pool *pool,
|
||||
struct pva_kmd_devmem_view *view);
|
||||
|
||||
enum pva_error pva_kmd_devmem_pool_deinit(struct pva_kmd_devmem_pool *pool);
|
||||
|
||||
#endif
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_dma_cfg.h"
|
||||
#include "pva_utils.h"
|
||||
#include "pva_kmd_resource_table.h"
|
||||
@@ -15,18 +7,18 @@
|
||||
|
||||
#define PVA_KMD_INVALID_CH_IDX 0xFF
|
||||
|
||||
void pva_kmd_unload_dma_config(struct pva_kmd_dma_resource_aux *dma_aux)
|
||||
void pva_kmd_unload_dma_config_unsafe(struct pva_kmd_dma_resource_aux *dma_aux)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < dma_aux->dram_res_count; i++) {
|
||||
pva_kmd_drop_resource(dma_aux->res_table,
|
||||
dma_aux->static_dram_res_ids[i]);
|
||||
pva_kmd_drop_resource_unsafe(dma_aux->res_table,
|
||||
dma_aux->static_dram_res_ids[i]);
|
||||
}
|
||||
|
||||
if (dma_aux->vpu_bin_res_id != PVA_RESOURCE_ID_INVALID) {
|
||||
pva_kmd_drop_resource(dma_aux->res_table,
|
||||
dma_aux->vpu_bin_res_id);
|
||||
pva_kmd_drop_resource_unsafe(dma_aux->res_table,
|
||||
dma_aux->vpu_bin_res_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,7 +134,7 @@ pva_kmd_load_dma_config(struct pva_kmd_resource_table *resource_table,
|
||||
|
||||
return PVA_SUCCESS;
|
||||
drop_res:
|
||||
pva_kmd_unload_dma_config(dma_aux);
|
||||
pva_kmd_unload_dma_config_unsafe(dma_aux);
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_DMA_CFG_H
|
||||
#define PVA_KMD_DMA_CFG_H
|
||||
|
||||
@@ -135,5 +127,5 @@ pva_kmd_load_dma_config(struct pva_kmd_resource_table *resource_table,
|
||||
struct pva_kmd_dma_resource_aux *dma_aux,
|
||||
void *fw_dma_cfg, uint32_t *out_fw_fetch_size);
|
||||
|
||||
void pva_kmd_unload_dma_config(struct pva_kmd_dma_resource_aux *dma_aux);
|
||||
void pva_kmd_unload_dma_config_unsafe(struct pva_kmd_dma_resource_aux *dma_aux);
|
||||
#endif // PVA_KMD_DMA_CFG_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_resource_table.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_resource_table.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
#include "pva_kmd_hwseq_validate.h"
|
||||
@@ -341,7 +334,7 @@ pva_kmd_dma_use_resources(struct pva_dma_config const *dma_cfg,
|
||||
if (dma_cfg->header.vpu_exec_resource_id != PVA_RESOURCE_ID_INVALID) {
|
||||
struct pva_kmd_resource_record *vpu_bin_rec;
|
||||
|
||||
vpu_bin_rec = pva_kmd_use_resource(
|
||||
vpu_bin_rec = pva_kmd_use_resource_unsafe(
|
||||
dma_aux->res_table,
|
||||
dma_cfg->header.vpu_exec_resource_id);
|
||||
if (vpu_bin_rec == NULL) {
|
||||
@@ -371,8 +364,8 @@ pva_kmd_dma_use_resources(struct pva_dma_config const *dma_cfg,
|
||||
if (slot_buf->type == PVA_DMA_STATIC_BINDING_DRAM) {
|
||||
struct pva_kmd_resource_record *rec;
|
||||
|
||||
rec = pva_kmd_use_resource(dma_aux->res_table,
|
||||
slot_buf->dram.resource_id);
|
||||
rec = pva_kmd_use_resource_unsafe(
|
||||
dma_aux->res_table, slot_buf->dram.resource_id);
|
||||
if (rec == NULL) {
|
||||
pva_kmd_log_err(
|
||||
"DRAM buffers used by DMA config do not exist");
|
||||
@@ -415,13 +408,13 @@ pva_kmd_dma_use_resources(struct pva_dma_config const *dma_cfg,
|
||||
return PVA_SUCCESS;
|
||||
drop_dram:
|
||||
for (i = 0; i < dma_aux->dram_res_count; i++) {
|
||||
pva_kmd_drop_resource(dma_aux->res_table,
|
||||
dma_aux->static_dram_res_ids[i]);
|
||||
pva_kmd_drop_resource_unsafe(dma_aux->res_table,
|
||||
dma_aux->static_dram_res_ids[i]);
|
||||
}
|
||||
drop_vpu_bin:
|
||||
if (dma_aux->vpu_bin_res_id != PVA_RESOURCE_ID_INVALID) {
|
||||
pva_kmd_drop_resource(dma_aux->res_table,
|
||||
dma_aux->vpu_bin_res_id);
|
||||
pva_kmd_drop_resource_unsafe(dma_aux->res_table,
|
||||
dma_aux->vpu_bin_res_id);
|
||||
}
|
||||
err_out:
|
||||
return err;
|
||||
@@ -630,6 +623,7 @@ static enum pva_error get_access_size(struct pva_dma_descriptor *desc,
|
||||
int32_t dim_offset = 0;
|
||||
uint32_t dim_offset_U = 0U;
|
||||
uint32_t num_bytes = 0U;
|
||||
int64_t offset_to_add = 0;
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
pva_math_error math_err = MATH_OP_SUCCESS;
|
||||
|
||||
@@ -658,6 +652,7 @@ static enum pva_error get_access_size(struct pva_dma_descriptor *desc,
|
||||
pva_kmd_log_err("Offset is too large");
|
||||
goto err_out;
|
||||
}
|
||||
offset_to_add = convert_to_signed_s64(attr->offset);
|
||||
|
||||
dim_offset_U = mulu32((uint32_t)(attr->line_pitch),
|
||||
subu32(ty, 1U, &math_err), &math_err);
|
||||
@@ -674,6 +669,7 @@ static enum pva_error get_access_size(struct pva_dma_descriptor *desc,
|
||||
}
|
||||
start = 0LL;
|
||||
end = (int64_t)attr->cb_size;
|
||||
offset_to_add = 0;
|
||||
goto end;
|
||||
}
|
||||
|
||||
@@ -706,11 +702,8 @@ static enum pva_error get_access_size(struct pva_dma_descriptor *desc,
|
||||
|
||||
end:
|
||||
entry->start_addr =
|
||||
adds64(mins64(start, end), convert_to_signed_s64(attr->offset),
|
||||
&math_err);
|
||||
entry->end_addr =
|
||||
adds64(maxs64(start, end), convert_to_signed_s64(attr->offset),
|
||||
&math_err);
|
||||
adds64(mins64(start, end), offset_to_add, &math_err);
|
||||
entry->end_addr = adds64(maxs64(start, end), offset_to_add, &math_err);
|
||||
|
||||
if (is_dst) {
|
||||
dst2->start_addr =
|
||||
@@ -743,7 +736,7 @@ pva_kmd_compute_dma_access(struct pva_dma_config const *dma_cfg,
|
||||
* Check if DMA descriptor has been used in HW Sequencer.
|
||||
* If used, skip_swseq_size_compute = true
|
||||
* else skip_swseq_size_compute = false
|
||||
*
|
||||
*
|
||||
* If skip_swseq_size_compute == true then set access_sizes to 0
|
||||
* else go ahead with access_sizes calculation.access_sizes
|
||||
*/
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_resource_table.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
#include "pva_api.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_EXECUTABLE_H
|
||||
#define PVA_KMD_EXECUTABLE_H
|
||||
#include "pva_kmd.h"
|
||||
|
||||
@@ -1,45 +1,104 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_fw_debug.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_api.h"
|
||||
#include "pva_api_cmdbuf.h"
|
||||
#include "pva_api_types.h"
|
||||
#include "pva_bit.h"
|
||||
#include "pva_fw.h"
|
||||
#include "pva_kmd_cmdbuf.h"
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_fw_debug.h"
|
||||
#include "pva_kmd_constants.h"
|
||||
#include "pva_utils.h"
|
||||
|
||||
enum pva_error pva_kmd_notify_fw_set_debug_log_level(struct pva_kmd_device *pva,
|
||||
uint32_t log_level)
|
||||
{
|
||||
struct pva_kmd_submitter *submitter = &pva->submitter;
|
||||
struct pva_kmd_cmdbuf_builder builder;
|
||||
struct pva_cmd_set_debug_log_level *cmd;
|
||||
uint32_t fence_val;
|
||||
enum pva_error err;
|
||||
|
||||
err = pva_kmd_submitter_prepare(submitter, &builder);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
cmd = pva_kmd_reserve_cmd_space(&builder, sizeof(*cmd));
|
||||
ASSERT(cmd != NULL);
|
||||
|
||||
pva_kmd_set_cmd_set_debug_log_level(cmd, log_level);
|
||||
pva_kmd_print_str_u64("set debug log level cmd:", cmd->log_level);
|
||||
|
||||
err = pva_kmd_submitter_submit(submitter, &builder, &fence_val);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("set debug log level cmd submission failed");
|
||||
goto cancel_builder;
|
||||
}
|
||||
|
||||
err = pva_kmd_submitter_wait(submitter, fence_val,
|
||||
PVA_KMD_WAIT_FW_POLL_INTERVAL_US,
|
||||
PVA_KMD_WAIT_FW_TIMEOUT_US);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err(
|
||||
"Waiting for FW timed out when setting debug log level");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
cancel_builder:
|
||||
pva_kmd_cmdbuf_builder_cancel(&builder);
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void pva_kmd_drain_fw_print(struct pva_kmd_fw_print_buffer *print_buffer)
|
||||
{
|
||||
uint32_t tail = print_buffer->buffer_info->tail;
|
||||
struct pva_fw_print_buffer_header *buf_info = print_buffer->buffer_info;
|
||||
uint32_t tail = buf_info->tail;
|
||||
|
||||
if (tail > print_buffer->size) {
|
||||
if (tail > buf_info->size) {
|
||||
pva_kmd_log_err(
|
||||
"Firmware print tail is out of bounds! Refusing to print\n");
|
||||
pva_dbg_printf("Tail %u vs size %u\n", tail,
|
||||
print_buffer->size);
|
||||
return;
|
||||
}
|
||||
|
||||
while (print_buffer->head < tail) {
|
||||
uint32_t max_len = tail - print_buffer->head;
|
||||
const char *str = print_buffer->content + print_buffer->head;
|
||||
if (buf_info->head > buf_info->size) {
|
||||
pva_kmd_log_err(
|
||||
"Firmware print head is out of bounds! Refusing to print\n");
|
||||
return;
|
||||
}
|
||||
|
||||
while (buf_info->head != tail) {
|
||||
uint32_t max_len;
|
||||
uint32_t head = buf_info->head;
|
||||
const char *str = print_buffer->content + head;
|
||||
uint32_t print_size;
|
||||
|
||||
/* It must be null terminted */
|
||||
if (print_buffer->content[tail - 1] != '\0') {
|
||||
pva_kmd_log_err(
|
||||
"Firmware print is not null terminated! Refusing to print");
|
||||
if ((head + PVA_MAX_DEBUG_LOG_MSG_CHARACTERS) >
|
||||
buf_info->size) {
|
||||
buf_info->head = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (head < tail) {
|
||||
max_len = tail - head;
|
||||
} else {
|
||||
max_len = buf_info->size - head;
|
||||
}
|
||||
|
||||
print_size = strnlen(str, max_len);
|
||||
pva_kmd_print_str(str);
|
||||
|
||||
/* +1 for null terminator */
|
||||
print_buffer->head += print_size + 1;
|
||||
head = (head + print_size + 1);
|
||||
if (head >= buf_info->size) {
|
||||
head = 0;
|
||||
}
|
||||
buf_info->head = head;
|
||||
}
|
||||
|
||||
if (print_buffer->buffer_info->flags & PVA_FW_PRINT_BUFFER_OVERFLOWED) {
|
||||
|
||||
@@ -1,26 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_FW_DEBUG_H
|
||||
#define PVA_KMD_FW_DEBUG_H
|
||||
#include "pva_api.h"
|
||||
#include "pva_fw.h"
|
||||
#include "pva_kmd_device.h"
|
||||
|
||||
struct pva_kmd_fw_print_buffer {
|
||||
struct pva_fw_print_buffer_header *buffer_info;
|
||||
char const *content;
|
||||
uint32_t size;
|
||||
uint32_t head;
|
||||
};
|
||||
|
||||
enum pva_error pva_kmd_notify_fw_set_debug_log_level(struct pva_kmd_device *pva,
|
||||
uint32_t log_level);
|
||||
|
||||
void pva_kmd_drain_fw_print(struct pva_kmd_fw_print_buffer *print_buffer);
|
||||
|
||||
#endif // PVA_KMD_FW_DEBUG_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_api_cmdbuf.h"
|
||||
#include "pva_api_types.h"
|
||||
#include "pva_bit.h"
|
||||
@@ -17,6 +9,7 @@
|
||||
#include "pva_kmd_constants.h"
|
||||
#include "pva_utils.h"
|
||||
#include "pva_kmd_fw_profiler.h"
|
||||
#include "pva_kmd_shared_buffer.h"
|
||||
|
||||
// TODO: This is here temporarily just for testing. Should be moved to a common header
|
||||
#define CMD_ID(x) PVA_EXTRACT(x, 6, 0, uint8_t)
|
||||
@@ -97,47 +90,12 @@ static inline const char *pva_fw_get_cmd_name(uint32_t opcode)
|
||||
|
||||
void pva_kmd_device_init_profiler(struct pva_kmd_device *pva)
|
||||
{
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
const uint32_t profiling_buffer_size = PVA_KMD_FW_PROFILING_BUFFER_SIZE;
|
||||
|
||||
struct pva_kmd_fw_profiling_buffer *fw_profiling_buffer =
|
||||
&pva->fw_profiling_buffer;
|
||||
|
||||
// Event message should be 32-bit to keep logging latency low
|
||||
ASSERT(sizeof(struct pva_fw_event_message) == sizeof(uint32_t));
|
||||
|
||||
pva->fw_profiling_buffer_memory =
|
||||
pva_kmd_device_memory_alloc_map(profiling_buffer_size, pva,
|
||||
PVA_ACCESS_RW,
|
||||
PVA_R5_SMMU_CONTEXT_ID);
|
||||
ASSERT(pva->fw_profiling_buffer_memory != NULL);
|
||||
|
||||
/* Add profiling memory to resource table */
|
||||
err = pva_kmd_add_dram_buffer_resource(
|
||||
&pva->dev_resource_table, pva->fw_profiling_buffer_memory,
|
||||
&pva->fw_profiling_buffer_resource_id);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
pva_kmd_update_fw_resource_table(&pva->dev_resource_table);
|
||||
|
||||
fw_profiling_buffer->buffer_info =
|
||||
(struct pva_fw_profiling_buffer_header *)
|
||||
pva->fw_profiling_buffer_memory->va;
|
||||
fw_profiling_buffer->content =
|
||||
pva_offset_pointer(pva->fw_profiling_buffer_memory->va,
|
||||
sizeof(*fw_profiling_buffer->buffer_info));
|
||||
fw_profiling_buffer->size = pva->fw_profiling_buffer_memory->size;
|
||||
fw_profiling_buffer->head = 0U;
|
||||
fw_profiling_buffer->buffer_info->flags = 0U;
|
||||
fw_profiling_buffer->buffer_info->tail = 0U;
|
||||
|
||||
pva->debugfs_context.g_fw_profiling_config.enabled = false;
|
||||
pva->debugfs_context.g_fw_profiling_config.filter = 0x0;
|
||||
}
|
||||
|
||||
void pva_kmd_device_deinit_profiler(struct pva_kmd_device *pva)
|
||||
{
|
||||
pva_kmd_drop_resource(&pva->dev_resource_table,
|
||||
pva->fw_profiling_buffer_resource_id);
|
||||
pva->debugfs_context.g_fw_profiling_config.enabled = false;
|
||||
}
|
||||
|
||||
@@ -146,12 +104,18 @@ enum pva_error pva_kmd_notify_fw_enable_profiling(struct pva_kmd_device *pva)
|
||||
struct pva_kmd_cmdbuf_builder builder;
|
||||
struct pva_kmd_submitter *dev_submitter = &pva->submitter;
|
||||
struct pva_cmd_enable_fw_profiling *cmd;
|
||||
uint64_t buffer_offset = 0U;
|
||||
uint32_t filter = 0U;
|
||||
uint8_t timestamp_type = TIMESTAMP_TYPE_CYCLE_COUNT;
|
||||
uint32_t fence_val;
|
||||
enum pva_error err;
|
||||
|
||||
struct pva_kmd_shared_buffer *profiling_buffer =
|
||||
&pva->kmd_fw_buffers[PVA_PRIV_CCQ_ID];
|
||||
|
||||
// Ensure that the DRAM buffer that backs FW profiling was allocated
|
||||
if (profiling_buffer->resource_memory == NULL) {
|
||||
return PVA_INVALID_RESOURCE;
|
||||
}
|
||||
// filter |= PVA_FW_EVENT_DO_CMD;
|
||||
filter |= PVA_FW_EVENT_RUN_VPU;
|
||||
|
||||
@@ -159,20 +123,13 @@ enum pva_error pva_kmd_notify_fw_enable_profiling(struct pva_kmd_device *pva)
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
pva->fw_profiling_buffer.head = 0U;
|
||||
pva->fw_profiling_buffer.buffer_info->flags = 0U;
|
||||
pva->fw_profiling_buffer.buffer_info->tail = 0U;
|
||||
|
||||
err = pva_kmd_submitter_prepare(dev_submitter, &builder);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
cmd = pva_kmd_reserve_cmd_space(&builder, sizeof(*cmd));
|
||||
ASSERT(cmd != NULL);
|
||||
pva_kmd_set_cmd_enable_fw_profiling(
|
||||
cmd, pva->fw_profiling_buffer_resource_id,
|
||||
pva->fw_profiling_buffer.size, buffer_offset, filter,
|
||||
timestamp_type);
|
||||
pva_kmd_set_cmd_enable_fw_profiling(cmd, filter, timestamp_type);
|
||||
|
||||
err = pva_kmd_submitter_submit(dev_submitter, &builder, &fence_val);
|
||||
if (err != PVA_SUCCESS) {
|
||||
@@ -281,58 +238,43 @@ static void decode_and_print_event(unsigned long walltime,
|
||||
}
|
||||
}
|
||||
|
||||
void pva_kmd_drain_fw_profiling_buffer(
|
||||
struct pva_kmd_device *pva,
|
||||
struct pva_kmd_fw_profiling_buffer *profiling_buffer)
|
||||
enum pva_error pva_kmd_process_fw_profiling_message(void *context,
|
||||
uint8_t interface,
|
||||
uint8_t *element)
|
||||
{
|
||||
struct pva_kmd_device *pva = (struct pva_kmd_device *)context;
|
||||
|
||||
uint64_t timestamp = 0;
|
||||
char msg_string[200] = { '\0' };
|
||||
struct pva_fw_event_message message;
|
||||
uint64_t prev_walltime = 0U;
|
||||
uint64_t timestamp = 0U;
|
||||
static uint64_t prev_walltime = 0U;
|
||||
uint64_t relative_time = 0U;
|
||||
uint32_t buffer_space;
|
||||
|
||||
// TODO: R5 frequency is hard-coded for now. Get this at runtime.
|
||||
static const uint32_t r5_freq = 716800000U;
|
||||
static const unsigned long r5_cycle_duration = 1000000000000 / r5_freq;
|
||||
unsigned long walltime = 0U; // in nanoseconds
|
||||
uint64_t walltime_diff;
|
||||
static const uint64_t r5_cycle_duration = 1000000000000 / r5_freq;
|
||||
uint64_t walltime = 0U; // in nanoseconds
|
||||
|
||||
const uint32_t message_size =
|
||||
sizeof(message) +
|
||||
pva->debugfs_context.g_fw_profiling_config.timestamp_size;
|
||||
uint32_t *profiling_buffer_head = &profiling_buffer->head;
|
||||
uint32_t profiling_buffer_tail = profiling_buffer->buffer_info->tail;
|
||||
while (*profiling_buffer_head < profiling_buffer_tail) {
|
||||
buffer_space = safe_addu32(*profiling_buffer_head,
|
||||
safe_subu32(message_size, 1U));
|
||||
ASSERT(buffer_space <= profiling_buffer_tail);
|
||||
memcpy(&message,
|
||||
&profiling_buffer->content[*profiling_buffer_head],
|
||||
sizeof(message));
|
||||
memcpy(×tamp,
|
||||
&profiling_buffer->content[*profiling_buffer_head +
|
||||
sizeof(message)],
|
||||
pva->debugfs_context.g_fw_profiling_config
|
||||
.timestamp_size);
|
||||
memcpy(&message, element, sizeof(message));
|
||||
memcpy(×tamp, &element[sizeof(message)],
|
||||
pva->debugfs_context.g_fw_profiling_config.timestamp_size);
|
||||
|
||||
if (pva->debugfs_context.g_fw_profiling_config.timestamp_type ==
|
||||
TIMESTAMP_TYPE_TSE) {
|
||||
walltime = (timestamp << 5);
|
||||
} else if (pva->debugfs_context.g_fw_profiling_config
|
||||
.timestamp_type ==
|
||||
TIMESTAMP_TYPE_CYCLE_COUNT) {
|
||||
timestamp = PVA_LOW32(timestamp);
|
||||
walltime = (r5_cycle_duration * timestamp) / 1000U;
|
||||
}
|
||||
walltime_diff = safe_subu64((uint64_t)walltime, prev_walltime);
|
||||
relative_time = (prev_walltime == 0U) ? 0U : walltime_diff;
|
||||
decode_and_print_event(walltime, relative_time, message,
|
||||
&msg_string[0]);
|
||||
pva_kmd_print_str(msg_string);
|
||||
*profiling_buffer_head = *profiling_buffer_head + message_size;
|
||||
prev_walltime = walltime;
|
||||
if (pva->debugfs_context.g_fw_profiling_config.timestamp_type ==
|
||||
TIMESTAMP_TYPE_TSE) {
|
||||
walltime = (timestamp << 5);
|
||||
} else if (pva->debugfs_context.g_fw_profiling_config.timestamp_type ==
|
||||
TIMESTAMP_TYPE_CYCLE_COUNT) {
|
||||
timestamp = PVA_LOW32(timestamp);
|
||||
walltime = safe_mulu64(r5_cycle_duration, timestamp);
|
||||
walltime = walltime / 1000U;
|
||||
}
|
||||
relative_time = (prev_walltime > walltime) ?
|
||||
0U :
|
||||
safe_subu64(walltime, prev_walltime);
|
||||
decode_and_print_event(walltime, relative_time, message,
|
||||
&msg_string[0]);
|
||||
pva_kmd_print_str(msg_string);
|
||||
prev_walltime = walltime;
|
||||
|
||||
return;
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1,24 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_FW_PROFILER_H
|
||||
#define PVA_KMD_FW_PROFILER_H
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_shared_buffer.h"
|
||||
|
||||
struct pva_kmd_fw_profiling_buffer {
|
||||
#define PVA_KMD_FW_PROFILING_BUFFER_SIZE (512 * 1024)
|
||||
struct pva_fw_profiling_buffer_header *buffer_info;
|
||||
char const *content;
|
||||
uint32_t size;
|
||||
uint32_t head;
|
||||
};
|
||||
#define PVA_KMD_FW_PROFILING_BUF_NUM_ELEMENTS (4096)
|
||||
|
||||
struct pva_kmd_fw_profiling_config {
|
||||
uint32_t filter;
|
||||
@@ -31,9 +18,9 @@ void pva_kmd_device_init_profiler(struct pva_kmd_device *pva);
|
||||
|
||||
void pva_kmd_device_deinit_profiler(struct pva_kmd_device *pva);
|
||||
|
||||
void pva_kmd_drain_fw_profiling_buffer(
|
||||
struct pva_kmd_device *pva,
|
||||
struct pva_kmd_fw_profiling_buffer *profiling_buffer);
|
||||
enum pva_error pva_kmd_process_fw_profiling_message(void *context,
|
||||
uint8_t interface,
|
||||
uint8_t *element);
|
||||
|
||||
enum pva_error pva_kmd_notify_fw_enable_profiling(struct pva_kmd_device *pva);
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_hwseq_validate.h"
|
||||
#include "pva_api_dma.h"
|
||||
@@ -228,17 +220,6 @@ static enum pva_error validate_cb_tiles(struct pva_hwseq_priv *hwseq,
|
||||
subs64((int64_t)ty, 1LL, &math_err), &math_err),
|
||||
(int64_t)tx, &math_err);
|
||||
|
||||
end_addr = adds64(end_addr,
|
||||
muls64((int64_t)head_desc->src.rpt1,
|
||||
head_desc->dst.adv1, &math_err),
|
||||
&math_err);
|
||||
|
||||
if ((head_desc->dst.adv2 > 0) && (end_addr > head_desc->dst.adv2)) {
|
||||
pva_kmd_log_err(
|
||||
"Tile voxel size exceeds destination advance amount on dim2");
|
||||
return PVA_INVAL;
|
||||
}
|
||||
|
||||
end_addr = muls64(end_addr,
|
||||
convert_to_signed_s64(1ULL
|
||||
<< (head_desc->log2_pixel_size &
|
||||
@@ -412,7 +393,7 @@ static enum pva_error validate_dst_vmem(struct pva_hwseq_priv *hwseq,
|
||||
|
||||
num_bytes = convert_to_signed_s64(
|
||||
1ULL << (head_desc->log2_pixel_size & MAX_BYTES_PER_PIXEL));
|
||||
offset = convert_to_signed_s64(head_desc->src.offset);
|
||||
offset = convert_to_signed_s64(head_desc->dst.offset);
|
||||
|
||||
*vmem_tile_count = get_vmem_tile_count(&head_desc->dst, has_dim3);
|
||||
|
||||
@@ -578,18 +559,7 @@ static enum pva_error validate_src_vmem(struct pva_hwseq_priv *hwseq,
|
||||
(int64_t)tx, &math_err);
|
||||
|
||||
if (0U != head_desc->src.cb_enable) {
|
||||
end_addr = adds64(muls64((int64_t)head_desc->dst.rpt1,
|
||||
head_desc->src.adv1, &math_err),
|
||||
end_addr, &math_err);
|
||||
|
||||
if ((head_desc->src.adv2 > 0) &&
|
||||
(end_addr > head_desc->src.adv2)) {
|
||||
pva_kmd_log_err(
|
||||
"Tile voxel size exceeds source advance amount on dim2");
|
||||
return PVA_INVAL;
|
||||
}
|
||||
end_addr = muls64(end_addr, num_bytes, &math_err);
|
||||
|
||||
hwseq->access_sizes[head_desc_id].src.start_addr =
|
||||
mins64(end_addr, 0LL);
|
||||
hwseq->access_sizes[head_desc_id].src.end_addr =
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_HWSEQ_VALIDATE_H
|
||||
#define PVA_KMD_HWSEQ_VALIDATE_H
|
||||
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_id_allocator.h"
|
||||
#include "pva_api_types.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
|
||||
enum pva_error pva_kmd_id_allocator_init(struct pva_kmd_id_allocator *allocator,
|
||||
uint32_t base_id, uint32_t n_entries)
|
||||
{
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
|
||||
allocator->n_entries = n_entries;
|
||||
allocator->n_free_ids = n_entries;
|
||||
allocator->n_used_ids = 0;
|
||||
|
||||
// Allocate space for both free and used IDs
|
||||
allocator->free_ids = pva_kmd_zalloc(sizeof(uint32_t) * n_entries * 2);
|
||||
if (allocator->free_ids == NULL) {
|
||||
err = PVA_NOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
allocator->used_ids = allocator->free_ids + n_entries;
|
||||
|
||||
// Put free IDs in reverse order so that we allocate in ascending order
|
||||
for (uint32_t i = 0; i < n_entries; i++) {
|
||||
allocator->free_ids[i] = base_id + n_entries - i - 1;
|
||||
}
|
||||
|
||||
return PVA_SUCCESS;
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
enum pva_error
|
||||
pva_kmd_id_allocator_deinit(struct pva_kmd_id_allocator *allocator)
|
||||
{
|
||||
pva_kmd_free(allocator->free_ids);
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_alloc_id(struct pva_kmd_id_allocator *allocator,
|
||||
uint32_t *id)
|
||||
{
|
||||
if (allocator->n_free_ids == 0) {
|
||||
return PVA_NOENT;
|
||||
}
|
||||
|
||||
allocator->n_free_ids--;
|
||||
*id = allocator->free_ids[allocator->n_free_ids];
|
||||
|
||||
allocator->used_ids[allocator->n_used_ids] = *id;
|
||||
allocator->n_used_ids++;
|
||||
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
void pva_kmd_free_id(struct pva_kmd_id_allocator *allocator, uint32_t id)
|
||||
{
|
||||
ASSERT(allocator->n_used_ids > 0);
|
||||
ASSERT(allocator->n_free_ids < allocator->n_entries);
|
||||
|
||||
allocator->free_ids[allocator->n_free_ids] = id;
|
||||
allocator->n_free_ids++;
|
||||
|
||||
allocator->n_used_ids--;
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_ID_ALLOCATOR_H
|
||||
#define PVA_KMD_ID_ALLOCATOR_H
|
||||
#include "pva_api_types.h"
|
||||
|
||||
struct pva_kmd_id_allocator {
|
||||
uint32_t n_entries;
|
||||
uint32_t *free_ids;
|
||||
uint32_t *used_ids;
|
||||
uint32_t n_free_ids;
|
||||
uint32_t n_used_ids;
|
||||
};
|
||||
|
||||
enum pva_error pva_kmd_id_allocator_init(struct pva_kmd_id_allocator *allocator,
|
||||
uint32_t base_id, uint32_t n_entries);
|
||||
|
||||
enum pva_error
|
||||
pva_kmd_id_allocator_deinit(struct pva_kmd_id_allocator *allocator);
|
||||
|
||||
enum pva_error pva_kmd_alloc_id(struct pva_kmd_id_allocator *allocator,
|
||||
uint32_t *id);
|
||||
|
||||
void pva_kmd_free_id(struct pva_kmd_id_allocator *allocator, uint32_t id);
|
||||
|
||||
#endif /* PVA_KMD_ID_ALLOCATOR_H */
|
||||
@@ -1,21 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_msg.h"
|
||||
#include "pva_fw.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_kmd_thread_sema.h"
|
||||
#include "pva_kmd_fw_debug.h"
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_context.h"
|
||||
#include "pva_kmd_abort.h"
|
||||
|
||||
static uint8_t get_msg_type(uint32_t hdr)
|
||||
{
|
||||
@@ -58,6 +50,7 @@ void pva_kmd_handle_hyp_msg(void *pva_dev, uint32_t const *data, uint8_t len)
|
||||
memcpy(abort_msg + 2, &data[1], size);
|
||||
abort_msg[PVA_FW_MSG_ABORT_STR_MAX_LEN] = '\0';
|
||||
pva_kmd_log_err(abort_msg);
|
||||
pva_kmd_abort(pva);
|
||||
} break;
|
||||
case PVA_FW_MSG_TYPE_FLUSH_PRINT:
|
||||
pva_kmd_drain_fw_print(&pva->fw_print_buffer);
|
||||
@@ -68,31 +61,31 @@ void pva_kmd_handle_hyp_msg(void *pva_dev, uint32_t const *data, uint8_t len)
|
||||
}
|
||||
}
|
||||
|
||||
void pva_kmd_handle_msg(void *pva_dev, uint32_t const *data, uint8_t len)
|
||||
enum pva_error pva_kmd_handle_msg_resource_unreg(void *context,
|
||||
uint8_t interface,
|
||||
uint8_t *element)
|
||||
{
|
||||
struct pva_kmd_device *pva = pva_dev;
|
||||
// TODO: if the mapping of CCQ_ID to interface is not 1:1, we need to
|
||||
// find the CCQ_ID/table_id from interface
|
||||
uint8_t table_id = interface;
|
||||
struct pva_kmd_device *pva;
|
||||
struct pva_kmd_context *ctx;
|
||||
uint32_t resource_id;
|
||||
|
||||
uint8_t type = get_msg_type(data[0]);
|
||||
switch (type) {
|
||||
case PVA_FW_MSG_TYPE_RESOURCE_UNREGISTER: {
|
||||
uint8_t table_id =
|
||||
PVA_EXTRACT(data[0], PVA_FW_MSG_RESOURCE_TABLE_ID_MSB,
|
||||
PVA_FW_MSG_RESOURCE_TABLE_ID_LSB, uint8_t);
|
||||
/* Resource table ID equals context id */
|
||||
struct pva_kmd_context *ctx =
|
||||
pva_kmd_get_context(pva, table_id);
|
||||
uint32_t i;
|
||||
ASSERT(context != NULL);
|
||||
pva = (struct pva_kmd_device *)context;
|
||||
ctx = pva_kmd_get_context(pva, table_id);
|
||||
|
||||
pva_kmd_mutex_lock(&ctx->resource_table_lock);
|
||||
for (i = 1; i < len; i++) {
|
||||
pva_kmd_drop_resource(&ctx->ctx_resource_table,
|
||||
data[i]);
|
||||
}
|
||||
pva_kmd_mutex_unlock(&ctx->resource_table_lock);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
FAULT("Unexpected CCQ msg type from FW");
|
||||
break;
|
||||
}
|
||||
ASSERT(ctx != NULL);
|
||||
ASSERT(element != NULL);
|
||||
|
||||
/* Resource table ID equals context id */
|
||||
memcpy(&resource_id, element, sizeof(resource_id));
|
||||
|
||||
// We do not lock the resource table here because this function is intended
|
||||
// to be called from the shared buffer processing function which should acquire
|
||||
// the required lock.
|
||||
pva_kmd_drop_resource_unsafe(&ctx->ctx_resource_table, resource_id);
|
||||
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_MSG_H
|
||||
#define PVA_KMD_MSG_H
|
||||
|
||||
#include "pva_api.h"
|
||||
|
||||
/**
|
||||
@@ -24,3 +20,9 @@ void pva_kmd_handle_hyp_msg(void *pva_dev, uint32_t const *data, uint8_t len);
|
||||
* These messages come from CCQ0 statues registers.
|
||||
*/
|
||||
void pva_kmd_handle_msg(void *pva_dev, uint32_t const *data, uint8_t len);
|
||||
|
||||
// TODO: move to a better location OR consolidate handling of all message types here
|
||||
enum pva_error pva_kmd_handle_msg_resource_unreg(void *context,
|
||||
uint8_t interface,
|
||||
uint8_t *element);
|
||||
#endif // PVA_KMD_MSG_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_MUTEX_H
|
||||
#define PVA_KMD_MUTEX_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_op_handler.h"
|
||||
#include "pva_kmd_resource_table.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
@@ -101,18 +93,8 @@ pva_kmd_op_memory_register_async(struct pva_kmd_context *ctx,
|
||||
goto release;
|
||||
}
|
||||
|
||||
if ((smmu_ctx_id == PVA_R5_SMMU_CONTEXT_ID) &&
|
||||
(dev_mem->iova < FW_SHARED_MEMORY_START)) {
|
||||
pva_kmd_log_err(
|
||||
"Not able to map memory in the R5 shared region");
|
||||
err = PVA_NOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_lock(&ctx->resource_table_lock);
|
||||
err = pva_kmd_add_dram_buffer_resource(&ctx->ctx_resource_table,
|
||||
dev_mem, &resource_id);
|
||||
pva_kmd_mutex_unlock(&ctx->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto unmap;
|
||||
}
|
||||
@@ -195,7 +177,6 @@ static enum pva_error pva_kmd_op_executable_register_async(
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_lock(&ctx->resource_table_lock);
|
||||
err = pva_kmd_add_vpu_bin_resource(&ctx->ctx_resource_table, exec_data,
|
||||
args->size, &resource_id);
|
||||
if (err == PVA_SUCCESS) {
|
||||
@@ -205,7 +186,6 @@ static enum pva_error pva_kmd_op_executable_register_async(
|
||||
num_symbols = rec->vpu_bin.symbol_table.n_symbols;
|
||||
pva_kmd_drop_resource(&ctx->ctx_resource_table, resource_id);
|
||||
}
|
||||
pva_kmd_mutex_unlock(&ctx->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
@@ -272,14 +252,12 @@ pva_kmd_op_dma_register_async(struct pva_kmd_context *ctx,
|
||||
// Discard the data we are about to pass to pva_kmd_add_dma_config_resource
|
||||
read_data(in_buffer, dma_cfg_payload_size);
|
||||
|
||||
pva_kmd_mutex_lock(&ctx->resource_table_lock);
|
||||
dma_config_size =
|
||||
safe_addu32(dma_cfg_payload_size,
|
||||
(uint32_t)sizeof(args->dma_config_header));
|
||||
err = pva_kmd_add_dma_config_resource(&ctx->ctx_resource_table,
|
||||
dma_cfg_data, dma_config_size,
|
||||
&resource_id);
|
||||
pva_kmd_mutex_unlock(&ctx->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
@@ -467,10 +445,8 @@ pva_kmd_op_syncpt_register_async(struct pva_kmd_context *ctx,
|
||||
dev_mem.size = ctx->pva->syncpt_offset * ctx->pva->num_syncpts;
|
||||
dev_mem.pva = ctx->pva;
|
||||
dev_mem.smmu_ctx_idx = PVA_R5_SMMU_CONTEXT_ID;
|
||||
pva_kmd_mutex_lock(&ctx->resource_table_lock);
|
||||
err = pva_kmd_add_syncpt_resource(&ctx->ctx_resource_table, &dev_mem,
|
||||
&resource_id);
|
||||
pva_kmd_mutex_unlock(&ctx->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
@@ -486,7 +462,8 @@ pva_kmd_op_syncpt_register_async(struct pva_kmd_context *ctx,
|
||||
update_cmd, ctx->resource_table_id, resource_id, &entry);
|
||||
|
||||
/* Register RW syncpts */
|
||||
syncpts = (struct pva_syncpt_rw_info *)pva_kmd_get_block(
|
||||
pva_kmd_mutex_lock(&ctx->pva->syncpt_allocator.allocator_lock);
|
||||
syncpts = (struct pva_syncpt_rw_info *)pva_kmd_get_block_unsafe(
|
||||
&ctx->pva->syncpt_allocator, ctx->syncpt_block_index);
|
||||
ASSERT(syncpts != NULL);
|
||||
|
||||
@@ -496,20 +473,18 @@ pva_kmd_op_syncpt_register_async(struct pva_kmd_context *ctx,
|
||||
}
|
||||
|
||||
dev_mem.iova = syncpts[0].syncpt_iova;
|
||||
pva_kmd_mutex_unlock(&ctx->pva->syncpt_allocator.allocator_lock);
|
||||
dev_mem.va = 0;
|
||||
dev_mem.size = ctx->pva->syncpt_offset * PVA_NUM_RW_SYNCPTS_PER_CONTEXT;
|
||||
dev_mem.pva = ctx->pva;
|
||||
dev_mem.smmu_ctx_idx = PVA_R5_SMMU_CONTEXT_ID;
|
||||
pva_kmd_mutex_lock(&ctx->resource_table_lock);
|
||||
err = pva_kmd_add_syncpt_resource(&ctx->ctx_resource_table, &dev_mem,
|
||||
&resource_id);
|
||||
pva_kmd_mutex_unlock(&ctx->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
syncpt_register_out.syncpt_rw_res_id = resource_id;
|
||||
syncpt_register_out.synpt_size = ctx->pva->syncpt_offset;
|
||||
ctx->ctx_resource_table.syncpt_allocator = &ctx->pva->syncpt_allocator;
|
||||
update_cmd =
|
||||
pva_kmd_reserve_cmd_space(cmdbuf_builder, sizeof(*update_cmd));
|
||||
ASSERT(update_cmd != NULL);
|
||||
@@ -556,6 +531,7 @@ static enum pva_error pva_kmd_op_queue_create(struct pva_kmd_context *ctx,
|
||||
err = PVA_INVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
pva_kmd_read_syncpt_val(ctx->pva, ctx->syncpt_ids[queue_id],
|
||||
&queue_out_args.syncpt_fence_counter);
|
||||
|
||||
@@ -616,7 +592,6 @@ pva_kmd_op_executable_get_symbols(struct pva_kmd_context *ctx,
|
||||
|
||||
sym_in_args = read_data(
|
||||
in_arg, sizeof(struct pva_kmd_executable_get_symbols_in_args));
|
||||
|
||||
rec = pva_kmd_use_resource(&ctx->ctx_resource_table,
|
||||
sym_in_args->exec_resource_id);
|
||||
if (rec == NULL) {
|
||||
@@ -693,8 +668,10 @@ pva_kmd_op_synced_submit(struct pva_kmd_context *ctx,
|
||||
err = pva_kmd_submitter_wait(&ctx->submitter, fence_val,
|
||||
PVA_KMD_WAIT_FW_POLL_INTERVAL_US,
|
||||
PVA_KMD_WAIT_FW_TIMEOUT_US);
|
||||
/* TODO: handle this error when FW reboot is supported */
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return PVA_SUCCESS;
|
||||
cancel_submit:
|
||||
@@ -710,12 +687,19 @@ static enum pva_error pva_kmd_sync_ops_handler(struct pva_kmd_context *ctx,
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
struct pva_kmd_op_header *header;
|
||||
|
||||
if (ctx->pva->recovery) {
|
||||
pva_kmd_log_err("In Recovery state, do not accept ops");
|
||||
err = PVA_INVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!access_ok(in_arg, sizeof(struct pva_kmd_op_header))) {
|
||||
err = PVA_INVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
header = read_data(in_arg, sizeof(struct pva_kmd_op_header));
|
||||
|
||||
switch (header->op_type) {
|
||||
case PVA_KMD_OP_CONTEXT_INIT:
|
||||
err = pva_kmd_op_context_init(ctx, in_arg, out_arg);
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_OP_HANDLER_H
|
||||
#define PVA_KMD_OP_HANDLER_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_fw.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
@@ -159,8 +151,10 @@ enum pva_error pva_kmd_complete_resume(struct pva_kmd_device *pva)
|
||||
|
||||
/**Initialize resource table */
|
||||
for (uint32_t j = 0; j < ctx->max_n_queues; j++) {
|
||||
queue = pva_kmd_get_block(&ctx->queue_allocator,
|
||||
j);
|
||||
pva_kmd_mutex_lock(
|
||||
&ctx->queue_allocator.allocator_lock);
|
||||
queue = pva_kmd_get_block_unsafe(
|
||||
&ctx->queue_allocator, j);
|
||||
if (queue != NULL) {
|
||||
pva_dbg_printf(
|
||||
"PVA: Resume queue for context %d, queue %d\n",
|
||||
@@ -180,6 +174,8 @@ enum pva_error pva_kmd_complete_resume(struct pva_kmd_device *pva)
|
||||
queue->queue_memory->iova,
|
||||
queue->max_num_submit);
|
||||
}
|
||||
pva_kmd_mutex_unlock(
|
||||
&ctx->queue_allocator.allocator_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_PM_H
|
||||
#define PVA_KMD_PM_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_fw.h"
|
||||
#include "pva_kmd_device_memory.h"
|
||||
@@ -117,7 +109,9 @@ static enum pva_error notify_fw_queue_deinit(struct pva_kmd_context *ctx,
|
||||
err = pva_kmd_submitter_wait(&ctx->submitter, fence_val,
|
||||
PVA_KMD_WAIT_FW_POLL_INTERVAL_US,
|
||||
PVA_KMD_WAIT_FW_TIMEOUT_US);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
return PVA_SUCCESS;
|
||||
cancel_submitter:
|
||||
pva_kmd_cmdbuf_builder_cancel(&builder);
|
||||
@@ -166,13 +160,6 @@ pva_kmd_queue_create(struct pva_kmd_context *ctx,
|
||||
goto err_free_kmd_memory;
|
||||
}
|
||||
|
||||
if (submission_mem_kmd->iova < FW_SHARED_MEMORY_START) {
|
||||
pva_kmd_log_err(
|
||||
"Not able to map memory in the R5 shared region");
|
||||
err = PVA_NOMEM;
|
||||
goto unmap_iova;
|
||||
}
|
||||
|
||||
err = pva_kmd_submitter_prepare(&ctx->submitter, &builder);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto unmap_iova;
|
||||
@@ -230,14 +217,20 @@ pva_kmd_queue_destroy(struct pva_kmd_context *ctx,
|
||||
* Send command to FW to stop queue usage. Wait for ack.
|
||||
* This call needs to be added after syncpoint and ccq functions are ready.
|
||||
*/
|
||||
queue = pva_kmd_get_block(&ctx->queue_allocator, in_args->queue_id);
|
||||
pva_kmd_mutex_lock(&ctx->queue_allocator.allocator_lock);
|
||||
queue = pva_kmd_get_block_unsafe(&ctx->queue_allocator,
|
||||
in_args->queue_id);
|
||||
if (queue == NULL) {
|
||||
pva_kmd_mutex_unlock(&ctx->queue_allocator.allocator_lock);
|
||||
return PVA_INVAL;
|
||||
}
|
||||
|
||||
err = notify_fw_queue_deinit(ctx, queue);
|
||||
if (err != PVA_SUCCESS) {
|
||||
return err;
|
||||
if (!ctx->pva->recovery) {
|
||||
err = notify_fw_queue_deinit(ctx, queue);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_mutex_unlock(
|
||||
&ctx->queue_allocator.allocator_lock);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
pva_kmd_device_memory_iova_unmap(queue->queue_memory);
|
||||
@@ -245,6 +238,7 @@ pva_kmd_queue_destroy(struct pva_kmd_context *ctx,
|
||||
pva_kmd_device_memory_free(queue->queue_memory);
|
||||
|
||||
pva_kmd_queue_deinit(queue);
|
||||
pva_kmd_mutex_unlock(&ctx->queue_allocator.allocator_lock);
|
||||
|
||||
err = pva_kmd_free_block(&ctx->queue_allocator, in_args->queue_id);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_QUEUE_H
|
||||
#define PVA_KMD_QUEUE_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_REGS_H
|
||||
#define PVA_KMD_REGS_H
|
||||
|
||||
|
||||
@@ -1,15 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_resource_table.h"
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_context.h"
|
||||
#include "pva_kmd_constants.h"
|
||||
|
||||
static uint32_t get_max_dma_config_size(struct pva_kmd_device *pva)
|
||||
@@ -71,6 +64,7 @@ pva_kmd_resource_table_init(struct pva_kmd_resource_table *res_table,
|
||||
ASSERT(res_table->table_mem != NULL);
|
||||
|
||||
pva_kmd_sema_init(&res_table->resource_semaphore, n_entries);
|
||||
pva_kmd_mutex_init(&res_table->resource_table_lock);
|
||||
|
||||
size = (uint64_t)safe_mulu32(sizeof(struct pva_kmd_resource_record),
|
||||
n_entries);
|
||||
@@ -103,20 +97,9 @@ pva_kmd_resource_table_init(struct pva_kmd_resource_table *res_table,
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
void pva_kmd_resource_table_deinit(struct pva_kmd_resource_table *res_table)
|
||||
{
|
||||
pva_kmd_free(res_table->dma_aux);
|
||||
pva_kmd_block_allocator_deinit(&res_table->dma_config_allocator);
|
||||
pva_kmd_device_memory_free(res_table->dma_config_mem);
|
||||
pva_kmd_block_allocator_deinit(&res_table->resource_record_allocator);
|
||||
pva_kmd_free(res_table->records_mem);
|
||||
pva_kmd_sema_deinit(&res_table->resource_semaphore);
|
||||
pva_kmd_device_memory_free(res_table->table_mem);
|
||||
}
|
||||
|
||||
static struct pva_kmd_resource_record *
|
||||
pva_kmd_alloc_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t *out_resource_id)
|
||||
pva_kmd_alloc_resource_id(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t *out_resource_id)
|
||||
{
|
||||
enum pva_error err;
|
||||
struct pva_kmd_resource_record *rec = NULL;
|
||||
@@ -128,7 +111,10 @@ pva_kmd_alloc_resource(struct pva_kmd_resource_table *resource_table,
|
||||
}
|
||||
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("Failed to wait for resource IDs");
|
||||
pva_dbg_printf(
|
||||
"kmd: allocation failed. ctx_id = %u, n_entries = %u\n",
|
||||
resource_table->user_smmu_ctx_id,
|
||||
resource_table->n_entries);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -140,8 +126,9 @@ out:
|
||||
return rec;
|
||||
}
|
||||
|
||||
static void pva_kmd_free_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id)
|
||||
static void
|
||||
pva_kmd_free_resource_id(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
enum pva_error err;
|
||||
|
||||
@@ -152,22 +139,62 @@ static void pva_kmd_free_resource(struct pva_kmd_resource_table *resource_table,
|
||||
pva_kmd_sema_post(&resource_table->resource_semaphore);
|
||||
}
|
||||
|
||||
static void
|
||||
pva_kmd_release_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
enum pva_error err;
|
||||
struct pva_kmd_resource_record *rec = pva_kmd_get_block_unsafe(
|
||||
&resource_table->resource_record_allocator, resource_id);
|
||||
|
||||
ASSERT(rec != NULL);
|
||||
|
||||
switch (rec->type) {
|
||||
case PVA_RESOURCE_TYPE_DRAM:
|
||||
if (rec->dram.syncpt != true) {
|
||||
pva_kmd_device_memory_free(rec->dram.mem);
|
||||
}
|
||||
break;
|
||||
case PVA_RESOURCE_TYPE_EXEC_BIN:
|
||||
pva_kmd_unload_executable(&rec->vpu_bin.symbol_table,
|
||||
rec->vpu_bin.metainfo_mem,
|
||||
rec->vpu_bin.sections_mem);
|
||||
break;
|
||||
case PVA_RESOURCE_TYPE_DMA_CONFIG: {
|
||||
struct pva_kmd_dma_resource_aux *dma_aux;
|
||||
dma_aux = &resource_table->dma_aux[rec->dma_config.block_index];
|
||||
pva_kmd_unload_dma_config_unsafe(dma_aux);
|
||||
err = pva_kmd_free_block(&resource_table->dma_config_allocator,
|
||||
rec->dma_config.block_index);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
FAULT("Unsupported resource type");
|
||||
}
|
||||
|
||||
pva_kmd_free_resource_id(resource_table, resource_id);
|
||||
}
|
||||
|
||||
enum pva_error
|
||||
pva_kmd_add_syncpt_resource(struct pva_kmd_resource_table *resource_table,
|
||||
struct pva_kmd_device_memory *dev_mem,
|
||||
uint32_t *out_resource_id)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec =
|
||||
pva_kmd_alloc_resource(resource_table, out_resource_id);
|
||||
pva_kmd_alloc_resource_id(resource_table, out_resource_id);
|
||||
|
||||
if (rec == NULL) {
|
||||
pva_kmd_log_err("No more resource id");
|
||||
return PVA_NO_RESOURCE_ID;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
if (*out_resource_id > resource_table->curr_max_resource_id) {
|
||||
resource_table->curr_max_resource_id = *out_resource_id;
|
||||
}
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
|
||||
rec->type = PVA_RESOURCE_TYPE_DRAM;
|
||||
rec->dram.mem = dev_mem;
|
||||
@@ -183,16 +210,18 @@ pva_kmd_add_dram_buffer_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t *out_resource_id)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec =
|
||||
pva_kmd_alloc_resource(resource_table, out_resource_id);
|
||||
pva_kmd_alloc_resource_id(resource_table, out_resource_id);
|
||||
|
||||
if (rec == NULL) {
|
||||
pva_kmd_log_err("No more resource id");
|
||||
return PVA_NO_RESOURCE_ID;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
if (*out_resource_id > resource_table->curr_max_resource_id) {
|
||||
resource_table->curr_max_resource_id = *out_resource_id;
|
||||
}
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
|
||||
rec->type = PVA_RESOURCE_TYPE_DRAM;
|
||||
rec->dram.mem = dev_mem;
|
||||
@@ -213,15 +242,22 @@ get_fw_resource(struct pva_kmd_resource_table *res_table, uint32_t resource_id)
|
||||
return &entries[index];
|
||||
}
|
||||
|
||||
/** Since this API is called only during init time no need to add lock for resource entry */
|
||||
void pva_kmd_update_fw_resource_table(struct pva_kmd_resource_table *res_table)
|
||||
{
|
||||
uint32_t id;
|
||||
struct pva_kmd_resource_record *rec;
|
||||
uint32_t max_resource_id;
|
||||
|
||||
for (id = PVA_RESOURCE_ID_BASE; id <= res_table->curr_max_resource_id;
|
||||
id++) {
|
||||
/** This lock is unnecessary but added to avoid painful process of proving false positive on coverity */
|
||||
pva_kmd_mutex_lock(&res_table->resource_table_lock);
|
||||
max_resource_id = res_table->curr_max_resource_id;
|
||||
pva_kmd_mutex_unlock(&res_table->resource_table_lock);
|
||||
|
||||
for (id = PVA_RESOURCE_ID_BASE; id <= max_resource_id; id++) {
|
||||
struct pva_resource_entry *entry =
|
||||
get_fw_resource(res_table, id);
|
||||
struct pva_kmd_resource_record *rec = pva_kmd_get_block(
|
||||
rec = pva_kmd_get_block_unsafe(
|
||||
&res_table->resource_record_allocator, id);
|
||||
if (rec == NULL) {
|
||||
continue;
|
||||
@@ -246,10 +282,10 @@ void pva_kmd_update_fw_resource_table(struct pva_kmd_resource_table *res_table)
|
||||
}
|
||||
|
||||
struct pva_kmd_resource_record *
|
||||
pva_kmd_use_resource(struct pva_kmd_resource_table *res_table,
|
||||
uint32_t resource_id)
|
||||
pva_kmd_use_resource_unsafe(struct pva_kmd_resource_table *res_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec = pva_kmd_get_block(
|
||||
struct pva_kmd_resource_record *rec = pva_kmd_get_block_unsafe(
|
||||
&res_table->resource_record_allocator, resource_id);
|
||||
|
||||
if (rec == NULL) {
|
||||
@@ -260,11 +296,31 @@ pva_kmd_use_resource(struct pva_kmd_resource_table *res_table,
|
||||
return rec;
|
||||
}
|
||||
|
||||
struct pva_kmd_resource_record *
|
||||
pva_kmd_use_resource(struct pva_kmd_resource_table *res_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec;
|
||||
pva_kmd_mutex_lock(&res_table->resource_table_lock);
|
||||
rec = pva_kmd_get_block_unsafe(&res_table->resource_record_allocator,
|
||||
resource_id);
|
||||
|
||||
if (rec == NULL) {
|
||||
pva_kmd_mutex_unlock(&res_table->resource_table_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rec->ref_count = safe_addu32(rec->ref_count, 1U);
|
||||
pva_kmd_mutex_unlock(&res_table->resource_table_lock);
|
||||
return rec;
|
||||
}
|
||||
|
||||
/** This API is not thread safe but only used inside pva_kmd_load_dma_config which is already protected */
|
||||
struct pva_kmd_resource_record *
|
||||
pva_kmd_peek_resource(struct pva_kmd_resource_table *res_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec = pva_kmd_get_block(
|
||||
struct pva_kmd_resource_record *rec = pva_kmd_get_block_unsafe(
|
||||
&res_table->resource_record_allocator, resource_id);
|
||||
|
||||
return rec;
|
||||
@@ -272,47 +328,27 @@ pva_kmd_peek_resource(struct pva_kmd_resource_table *res_table,
|
||||
|
||||
void pva_kmd_drop_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
pva_kmd_drop_resource_unsafe(resource_table, resource_id);
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
}
|
||||
|
||||
void pva_kmd_drop_resource_unsafe(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec;
|
||||
|
||||
rec = pva_kmd_get_block(&resource_table->resource_record_allocator,
|
||||
resource_id);
|
||||
rec = pva_kmd_get_block_unsafe(
|
||||
&resource_table->resource_record_allocator, resource_id);
|
||||
|
||||
ASSERT(rec != NULL);
|
||||
if (rec == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
rec->ref_count = safe_subu32(rec->ref_count, 1U);
|
||||
if (rec->ref_count == 0) {
|
||||
pva_dbg_printf("Dropping resource %u of type %u\n", resource_id,
|
||||
rec->type);
|
||||
switch (rec->type) {
|
||||
case PVA_RESOURCE_TYPE_DRAM:
|
||||
if (rec->dram.syncpt != true) {
|
||||
pva_kmd_device_memory_free(rec->dram.mem);
|
||||
}
|
||||
break;
|
||||
case PVA_RESOURCE_TYPE_EXEC_BIN:
|
||||
pva_kmd_unload_executable(&rec->vpu_bin.symbol_table,
|
||||
rec->vpu_bin.metainfo_mem,
|
||||
rec->vpu_bin.sections_mem);
|
||||
break;
|
||||
case PVA_RESOURCE_TYPE_DMA_CONFIG: {
|
||||
struct pva_kmd_dma_resource_aux *dma_aux;
|
||||
dma_aux =
|
||||
&resource_table
|
||||
->dma_aux[rec->dma_config.block_index];
|
||||
pva_kmd_unload_dma_config(dma_aux);
|
||||
pva_kmd_free_block(
|
||||
&resource_table->dma_config_allocator,
|
||||
rec->dma_config.block_index);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
pva_kmd_log_err("Unsupported resource type");
|
||||
pva_kmd_fault();
|
||||
}
|
||||
|
||||
pva_kmd_free_resource(resource_table, resource_id);
|
||||
pva_kmd_release_resource(resource_table, resource_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,7 +359,7 @@ pva_kmd_add_vpu_bin_resource(struct pva_kmd_resource_table *resource_table,
|
||||
{
|
||||
uint32_t res_id;
|
||||
struct pva_kmd_resource_record *rec =
|
||||
pva_kmd_alloc_resource(resource_table, &res_id);
|
||||
pva_kmd_alloc_resource_id(resource_table, &res_id);
|
||||
enum pva_error err;
|
||||
struct pva_kmd_vpu_bin_resource *vpu_bin;
|
||||
|
||||
@@ -341,9 +377,11 @@ pva_kmd_add_vpu_bin_resource(struct pva_kmd_resource_table *resource_table,
|
||||
goto free_block;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
if (res_id > resource_table->curr_max_resource_id) {
|
||||
resource_table->curr_max_resource_id = res_id;
|
||||
}
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
|
||||
rec->type = PVA_RESOURCE_TYPE_EXEC_BIN;
|
||||
rec->ref_count = 1;
|
||||
@@ -351,7 +389,7 @@ pva_kmd_add_vpu_bin_resource(struct pva_kmd_resource_table *resource_table,
|
||||
|
||||
return PVA_SUCCESS;
|
||||
free_block:
|
||||
pva_kmd_free_resource(resource_table, res_id);
|
||||
pva_kmd_free_resource_id(resource_table, res_id);
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
@@ -361,8 +399,8 @@ pva_kmd_make_resource_entry(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id,
|
||||
struct pva_resource_entry *entry)
|
||||
{
|
||||
struct pva_kmd_resource_record *rec =
|
||||
pva_kmd_use_resource(resource_table, resource_id);
|
||||
struct pva_kmd_resource_record *rec;
|
||||
rec = pva_kmd_use_resource(resource_table, resource_id);
|
||||
if (rec == NULL) {
|
||||
return PVA_NO_RESOURCE_ID;
|
||||
}
|
||||
@@ -426,22 +464,26 @@ enum pva_error pva_kmd_add_dma_config_resource(
|
||||
|
||||
dma_aux = &resource_table->dma_aux[block_idx];
|
||||
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
err = pva_kmd_load_dma_config(resource_table, dma_config_payload,
|
||||
dma_config_size, dma_aux, fw_dma_cfg,
|
||||
&fw_fetch_size);
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto free_block;
|
||||
}
|
||||
|
||||
rec = pva_kmd_alloc_resource(resource_table, &res_id);
|
||||
rec = pva_kmd_alloc_resource_id(resource_table, &res_id);
|
||||
if (rec == NULL) {
|
||||
err = PVA_NO_RESOURCE_ID;
|
||||
goto unload_dma;
|
||||
}
|
||||
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
if (res_id > resource_table->curr_max_resource_id) {
|
||||
resource_table->curr_max_resource_id = res_id;
|
||||
}
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
|
||||
rec->type = PVA_RESOURCE_TYPE_DMA_CONFIG;
|
||||
rec->ref_count = 1;
|
||||
@@ -457,21 +499,60 @@ enum pva_error pva_kmd_add_dma_config_resource(
|
||||
|
||||
return PVA_SUCCESS;
|
||||
unload_dma:
|
||||
pva_kmd_unload_dma_config(dma_aux);
|
||||
pva_kmd_mutex_lock(&resource_table->resource_table_lock);
|
||||
pva_kmd_unload_dma_config_unsafe(dma_aux);
|
||||
pva_kmd_mutex_unlock(&resource_table->resource_table_lock);
|
||||
free_block:
|
||||
pva_kmd_free_block(&resource_table->dma_config_allocator, block_idx);
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void pva_kmd_verify_all_resources_free(
|
||||
struct pva_kmd_resource_table *resource_table)
|
||||
static enum pva_error
|
||||
pva_kmd_release_all_resources(struct pva_kmd_resource_table *res_table)
|
||||
{
|
||||
enum pva_error err;
|
||||
for (uint32_t i = 0; i < resource_table->n_entries; i++) {
|
||||
err = pva_kmd_sema_wait_timeout(
|
||||
&resource_table->resource_semaphore,
|
||||
PVA_KMD_TIMEOUT_RESOURCE_SEMA_MS);
|
||||
ASSERT(err == PVA_SUCCESS);
|
||||
uint32_t id;
|
||||
|
||||
pva_kmd_mutex_lock(&res_table->resource_table_lock);
|
||||
|
||||
// Iterate through all possible resource IDs
|
||||
for (id = PVA_RESOURCE_ID_BASE; id <= res_table->curr_max_resource_id;
|
||||
id++) {
|
||||
struct pva_kmd_resource_record *rec =
|
||||
pva_kmd_peek_resource(res_table, id);
|
||||
if (rec != NULL) {
|
||||
pva_kmd_release_resource(res_table, id);
|
||||
}
|
||||
}
|
||||
pva_kmd_mutex_unlock(&res_table->resource_table_lock);
|
||||
return PVA_SUCCESS;
|
||||
}
|
||||
|
||||
void pva_kmd_resource_table_deinit(struct pva_kmd_resource_table *res_table)
|
||||
{
|
||||
pva_kmd_release_all_resources(res_table);
|
||||
pva_kmd_free(res_table->dma_aux);
|
||||
pva_kmd_block_allocator_deinit(&res_table->dma_config_allocator);
|
||||
pva_kmd_device_memory_free(res_table->dma_config_mem);
|
||||
pva_kmd_block_allocator_deinit(&res_table->resource_record_allocator);
|
||||
pva_kmd_free(res_table->records_mem);
|
||||
pva_kmd_mutex_deinit(&res_table->resource_table_lock);
|
||||
pva_kmd_sema_deinit(&res_table->resource_semaphore);
|
||||
pva_kmd_device_memory_free(res_table->table_mem);
|
||||
}
|
||||
|
||||
void pva_kmd_resource_table_lock(struct pva_kmd_device *pva,
|
||||
uint8_t res_table_id)
|
||||
{
|
||||
struct pva_kmd_context *ctx = pva_kmd_get_context(pva, res_table_id);
|
||||
|
||||
pva_kmd_mutex_lock(&ctx->ctx_resource_table.resource_table_lock);
|
||||
}
|
||||
|
||||
void pva_kmd_resource_table_unlock(struct pva_kmd_device *pva,
|
||||
uint8_t res_table_id)
|
||||
{
|
||||
struct pva_kmd_context *ctx = pva_kmd_get_context(pva, res_table_id);
|
||||
|
||||
pva_kmd_mutex_unlock(&ctx->ctx_resource_table.resource_table_lock);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_RESOURCE_TABLE_H
|
||||
#define PVA_KMD_RESOURCE_TABLE_H
|
||||
#include "pva_fw.h"
|
||||
@@ -85,13 +77,11 @@ struct pva_kmd_resource_table {
|
||||
* allocation shared by all DMA configs */
|
||||
struct pva_kmd_dma_resource_aux *dma_aux;
|
||||
|
||||
/** Pointer to syncpt_allocator in pva_kmd_device created during kmd boot */
|
||||
struct pva_kmd_block_allocator *syncpt_allocator;
|
||||
|
||||
/** Memory for resource records */
|
||||
void *records_mem;
|
||||
struct pva_kmd_block_allocator resource_record_allocator;
|
||||
struct pva_kmd_device *pva;
|
||||
pva_kmd_mutex_t resource_table_lock;
|
||||
};
|
||||
|
||||
enum pva_error
|
||||
@@ -132,6 +122,10 @@ pva_kmd_add_dma_config_resource(struct pva_kmd_resource_table *resource_table,
|
||||
* TODO: make use and drop thread safe.
|
||||
* */
|
||||
struct pva_kmd_resource_record *
|
||||
pva_kmd_use_resource_unsafe(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id);
|
||||
|
||||
struct pva_kmd_resource_record *
|
||||
pva_kmd_use_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id);
|
||||
|
||||
@@ -142,12 +136,17 @@ pva_kmd_peek_resource(struct pva_kmd_resource_table *resource_table,
|
||||
void pva_kmd_drop_resource(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id);
|
||||
|
||||
void pva_kmd_drop_resource_unsafe(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id);
|
||||
|
||||
enum pva_error
|
||||
pva_kmd_make_resource_entry(struct pva_kmd_resource_table *resource_table,
|
||||
uint32_t resource_id,
|
||||
struct pva_resource_entry *entry);
|
||||
|
||||
void pva_kmd_verify_all_resources_free(
|
||||
struct pva_kmd_resource_table *resource_table);
|
||||
void pva_kmd_resource_table_lock(struct pva_kmd_device *pva,
|
||||
uint8_t res_table_id);
|
||||
|
||||
void pva_kmd_resource_table_unlock(struct pva_kmd_device *pva,
|
||||
uint8_t res_table_id);
|
||||
#endif // PVA_KMD_RESOURCE_TABLE_H
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_sha256.h"
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_SHA256_H
|
||||
#define PVA_KMD_SHA256_H
|
||||
|
||||
@@ -0,0 +1,248 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_abort.h"
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_shared_buffer.h"
|
||||
|
||||
static void
|
||||
setup_cmd_init_shared_dram_buffer(void *cmd, uint8_t interface,
|
||||
struct pva_kmd_shared_buffer *fw_buffer)
|
||||
{
|
||||
struct pva_cmd_init_shared_dram_buffer *init_cmd =
|
||||
(struct pva_cmd_init_shared_dram_buffer *)cmd;
|
||||
|
||||
pva_kmd_set_cmd_init_shared_dram_buffer(
|
||||
init_cmd, interface, fw_buffer->resource_memory->iova,
|
||||
fw_buffer->resource_memory->size);
|
||||
}
|
||||
|
||||
static void
|
||||
setup_cmd_deinit_shared_dram_buffer(void *cmd, uint8_t interface,
|
||||
struct pva_kmd_shared_buffer *fw_buffer)
|
||||
{
|
||||
struct pva_cmd_deinit_shared_dram_buffer *deinit_cmd =
|
||||
(struct pva_cmd_deinit_shared_dram_buffer *)cmd;
|
||||
|
||||
pva_kmd_set_cmd_deinit_shared_dram_buffer(deinit_cmd, interface);
|
||||
}
|
||||
|
||||
static enum pva_error
|
||||
notify_fw(struct pva_kmd_device *pva, uint8_t interface,
|
||||
void (*setup_cmd_cb)(void *cmd, uint8_t interface,
|
||||
struct pva_kmd_shared_buffer *fw_buffer),
|
||||
size_t cmd_size)
|
||||
{
|
||||
enum pva_error err;
|
||||
struct pva_kmd_cmdbuf_builder builder;
|
||||
struct pva_kmd_submitter *dev_submitter = &pva->submitter;
|
||||
struct pva_kmd_shared_buffer *fw_buffer;
|
||||
void *cmd_space;
|
||||
uint32_t fence_val;
|
||||
|
||||
ASSERT(interface < PVA_MAX_NUM_CCQ);
|
||||
|
||||
fw_buffer = &pva->kmd_fw_buffers[interface];
|
||||
|
||||
err = pva_kmd_submitter_prepare(dev_submitter, &builder);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
// Make sure FW buffer was allocated
|
||||
ASSERT(fw_buffer->header != NULL);
|
||||
|
||||
cmd_space = pva_kmd_reserve_cmd_space(&builder, cmd_size);
|
||||
ASSERT(cmd_space != NULL);
|
||||
|
||||
// Let the setup callback configure the specific command
|
||||
setup_cmd_cb(cmd_space, interface, fw_buffer);
|
||||
|
||||
err = pva_kmd_submitter_submit(dev_submitter, &builder, &fence_val);
|
||||
if (err != PVA_SUCCESS) {
|
||||
// Error is either QUEUE_FULL or TIMEDOUT
|
||||
goto cancel_builder;
|
||||
}
|
||||
|
||||
err = pva_kmd_submitter_wait(dev_submitter, fence_val,
|
||||
PVA_KMD_WAIT_FW_POLL_INTERVAL_US,
|
||||
PVA_KMD_WAIT_FW_TIMEOUT_US);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err(
|
||||
"Waiting for FW timed out while processing buffer command");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return PVA_SUCCESS;
|
||||
|
||||
cancel_builder:
|
||||
pva_kmd_cmdbuf_builder_cancel(&builder);
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_shared_buffer_init(
|
||||
struct pva_kmd_device *pva, uint8_t interface, uint32_t element_size,
|
||||
uint32_t num_entries, shared_buffer_process_element_cb process_cb,
|
||||
shared_buffer_lock_cb lock_cb, shared_buffer_lock_cb unlock_cb)
|
||||
{
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
|
||||
struct pva_kmd_device_memory *device_memory;
|
||||
struct pva_kmd_shared_buffer *buffer;
|
||||
uint64_t buffer_size;
|
||||
|
||||
ASSERT(interface < PVA_MAX_NUM_CCQ);
|
||||
buffer = &pva->kmd_fw_buffers[interface];
|
||||
|
||||
// Ensure that the buffer body is a multiple of 'element size'
|
||||
buffer_size = safe_mulu64(num_entries, element_size);
|
||||
buffer_size = safe_addu64(buffer_size,
|
||||
sizeof(struct pva_fw_shared_buffer_header));
|
||||
|
||||
device_memory = pva_kmd_device_memory_alloc_map(
|
||||
buffer_size, pva, PVA_ACCESS_RW, PVA_R5_SMMU_CONTEXT_ID);
|
||||
if (device_memory == NULL) {
|
||||
return PVA_NOMEM;
|
||||
}
|
||||
|
||||
buffer->header =
|
||||
(struct pva_fw_shared_buffer_header *)device_memory->va;
|
||||
buffer->header->flags = 0U;
|
||||
buffer->header->element_size = element_size;
|
||||
buffer->header->head = 0U;
|
||||
buffer->header->tail = 0U;
|
||||
buffer->body =
|
||||
(pva_offset_pointer(buffer->header, sizeof(*buffer->header)));
|
||||
buffer->process_cb = process_cb;
|
||||
buffer->lock_cb = lock_cb;
|
||||
buffer->unlock_cb = unlock_cb;
|
||||
buffer->resource_offset = 0U;
|
||||
buffer->resource_memory = device_memory;
|
||||
|
||||
err = pva_kmd_bind_shared_buffer_handler(pva, interface, pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto free_buffer_memory;
|
||||
}
|
||||
|
||||
err = notify_fw(pva, interface, setup_cmd_init_shared_dram_buffer,
|
||||
sizeof(struct pva_cmd_init_shared_dram_buffer));
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto release_handler;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
release_handler:
|
||||
pva_kmd_release_shared_buffer_handler(pva, interface);
|
||||
free_buffer_memory:
|
||||
pva_kmd_device_memory_free(device_memory);
|
||||
return err;
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_shared_buffer_deinit(struct pva_kmd_device *pva,
|
||||
uint8_t interface)
|
||||
{
|
||||
enum pva_error err = PVA_SUCCESS;
|
||||
struct pva_kmd_shared_buffer *buffer;
|
||||
|
||||
ASSERT(interface < PVA_MAX_NUM_CCQ);
|
||||
buffer = &pva->kmd_fw_buffers[interface];
|
||||
|
||||
if (!pva->recovery) {
|
||||
err = notify_fw(
|
||||
pva, interface, setup_cmd_deinit_shared_dram_buffer,
|
||||
sizeof(struct pva_cmd_deinit_shared_dram_buffer));
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("Failed to deinit FW buffer");
|
||||
}
|
||||
}
|
||||
pva_kmd_release_shared_buffer_handler(pva, interface);
|
||||
|
||||
pva_kmd_shared_buffer_process(pva, interface);
|
||||
|
||||
pva_kmd_device_memory_free(buffer->resource_memory);
|
||||
buffer->resource_memory = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void pva_kmd_shared_buffer_process(void *pva_dev, uint8_t interface)
|
||||
{
|
||||
struct pva_kmd_device *pva = (struct pva_kmd_device *)pva_dev;
|
||||
struct pva_kmd_shared_buffer *fw_buffer =
|
||||
&pva->kmd_fw_buffers[interface];
|
||||
uint32_t *buffer_head;
|
||||
uint32_t buffer_tail;
|
||||
uint32_t buffer_size;
|
||||
uint8_t *buffer_body;
|
||||
uint32_t element_size;
|
||||
uint8_t *current_element = NULL;
|
||||
|
||||
ASSERT(fw_buffer->resource_memory->size > sizeof(*fw_buffer->header));
|
||||
|
||||
buffer_head = &fw_buffer->header->head;
|
||||
buffer_tail = fw_buffer->header->tail;
|
||||
buffer_size =
|
||||
fw_buffer->resource_memory->size - sizeof(*fw_buffer->header);
|
||||
buffer_body = fw_buffer->body;
|
||||
element_size = fw_buffer->header->element_size;
|
||||
|
||||
ASSERT(buffer_body != NULL);
|
||||
|
||||
// Ensure element size fits within the buffer
|
||||
ASSERT(buffer_size % element_size == 0);
|
||||
|
||||
// check buffer header to see if there was an overflow
|
||||
if (fw_buffer->header->flags & PVA_KMD_FW_BUF_FLAG_OVERFLOW) {
|
||||
// Clear the overflow flag
|
||||
// Note: this might be error prone. We are writing the flag here and at
|
||||
// the same time, the FW might be updating the flag too. Since the
|
||||
// flag is only being used to detect overflow today, we will ignore
|
||||
// this issue for now.
|
||||
fw_buffer->header->flags &= ~PVA_KMD_FW_BUF_FLAG_OVERFLOW;
|
||||
|
||||
// Log the overflow
|
||||
pva_kmd_log_err_u64("Buffer overflow detected on interface",
|
||||
interface);
|
||||
|
||||
if (interface >= PVA_USER_CCQ_BASE) {
|
||||
// Buffers corresponding to user CCQs are used only for sending resource
|
||||
// unregistration requests to KMD.
|
||||
// If there is an overflow on this interface, we should abort the associated user
|
||||
// context in order to prevent further memory leak.
|
||||
// Note that ideally this should never happen as the buffer is expected to be
|
||||
// the same size as the resource table.
|
||||
// TODO: abort only the user context, not the device.
|
||||
pva_kmd_abort(pva);
|
||||
}
|
||||
|
||||
// Buffer corresponding to CCQ 0 is used for sending messages common to a VM.
|
||||
// Today, these messages are only FW profiling and NSIGHT profiling messages.
|
||||
// Even if there is an overflow, we can continue processing the buffer.
|
||||
// We will drop the overflowed messages.
|
||||
}
|
||||
|
||||
if (fw_buffer->lock_cb != NULL) {
|
||||
fw_buffer->lock_cb(pva, interface);
|
||||
}
|
||||
|
||||
// Loop while `head` has not yet caught up to `tail`
|
||||
while (*buffer_head != buffer_tail) {
|
||||
// Ensure current position is valid
|
||||
ASSERT(*buffer_head < buffer_size);
|
||||
|
||||
// Retrieve the current element in the buffer
|
||||
current_element = (void *)&buffer_body[*buffer_head];
|
||||
|
||||
// Call the user-provided callback with the current element and context
|
||||
fw_buffer->process_cb(pva, interface, current_element);
|
||||
|
||||
// Advance the head pointer in a circular buffer fashion
|
||||
*buffer_head = (*buffer_head + element_size) % buffer_size;
|
||||
}
|
||||
|
||||
if (fw_buffer->unlock_cb != NULL) {
|
||||
fw_buffer->unlock_cb(pva, interface);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SHARED_BUFFER_H
|
||||
#define PVA_KMD_SHARED_BUFFER_H
|
||||
|
||||
#include "pva_kmd_device.h"
|
||||
|
||||
typedef enum pva_error (*shared_buffer_process_element_cb)(void *context,
|
||||
uint8_t interface,
|
||||
uint8_t *element);
|
||||
|
||||
typedef void (*shared_buffer_lock_cb)(struct pva_kmd_device *pva,
|
||||
uint8_t interface);
|
||||
|
||||
struct pva_kmd_shared_buffer {
|
||||
// Only 'header' is in shared DRAM memory
|
||||
// Other fields are local to KMD and should be used for internal bookkeeping
|
||||
struct pva_fw_shared_buffer_header *header;
|
||||
// 'body' tracks the begining of buffer contents in DRAM
|
||||
uint8_t *body;
|
||||
// 'process_cb' callback is used to process elements in the buffer
|
||||
shared_buffer_process_element_cb process_cb;
|
||||
// 'lock_cb' callback is used to lock the buffer
|
||||
shared_buffer_lock_cb lock_cb;
|
||||
// 'unlock_cb' callback is used to unlock the buffer
|
||||
shared_buffer_lock_cb unlock_cb;
|
||||
// 'resource_memory' is used to track the memory allocated for the buffer
|
||||
struct pva_kmd_device_memory *resource_memory;
|
||||
// 'resource_offset' is used to track offset of buffer in 'resource_id'
|
||||
uint32_t resource_offset;
|
||||
};
|
||||
|
||||
enum pva_error pva_kmd_shared_buffer_init(
|
||||
struct pva_kmd_device *pva, uint8_t interface, uint32_t element_size,
|
||||
uint32_t buffer_size, shared_buffer_process_element_cb process_cb,
|
||||
shared_buffer_lock_cb lock_cb, shared_buffer_lock_cb unlock_cb);
|
||||
|
||||
enum pva_error pva_kmd_shared_buffer_deinit(struct pva_kmd_device *pva,
|
||||
uint8_t interface);
|
||||
|
||||
void pva_kmd_shared_buffer_process(void *pva_dev, uint8_t interface);
|
||||
|
||||
enum pva_error pva_kmd_bind_shared_buffer_handler(void *pva_dev,
|
||||
uint8_t interface,
|
||||
void *data);
|
||||
|
||||
void pva_kmd_release_shared_buffer_handler(void *pva_dev, uint8_t interface);
|
||||
#endif
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_fw_address_map.h"
|
||||
@@ -32,9 +24,11 @@ static void init_fw_print_buffer(struct pva_kmd_fw_print_buffer *print_buffer,
|
||||
print_buffer->buffer_info = pva_offset_pointer(
|
||||
debug_buffer_va,
|
||||
FW_TRACE_BUFFER_SIZE + FW_CODE_COVERAGE_BUFFER_SIZE);
|
||||
print_buffer->size =
|
||||
print_buffer->buffer_info->size =
|
||||
FW_DEBUG_LOG_BUFFER_SIZE - sizeof(*print_buffer->buffer_info);
|
||||
print_buffer->head = 0;
|
||||
print_buffer->buffer_info->head = 0;
|
||||
print_buffer->buffer_info->tail = 0;
|
||||
print_buffer->buffer_info->flags = 0;
|
||||
print_buffer->content = pva_offset_pointer(
|
||||
print_buffer->buffer_info, sizeof(*print_buffer->buffer_info));
|
||||
}
|
||||
@@ -165,6 +159,36 @@ void pva_kmd_config_sid(struct pva_kmd_device *pva)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t pva_kmd_get_syncpt_ro_offset(struct pva_kmd_device *pva)
|
||||
{
|
||||
if (pva->num_syncpts > 0U) {
|
||||
uint64_t offset;
|
||||
offset = safe_subu64(pva->syncpt_ro_iova,
|
||||
pva_kmd_get_r5_iova_start());
|
||||
|
||||
ASSERT(offset <= UINT32_MAX);
|
||||
return (uint32_t)offset;
|
||||
} else {
|
||||
// This is only for SIM mode where syncpoints are not supported.
|
||||
return PVA_R5_SYNCPT_REGION_IOVA_OFFSET_NOT_SET;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t pva_kmd_get_syncpt_rw_offset(struct pva_kmd_device *pva)
|
||||
{
|
||||
if (pva->num_syncpts > 0U) {
|
||||
uint64_t offset;
|
||||
offset = safe_subu64(pva->syncpt_rw_iova,
|
||||
pva_kmd_get_r5_iova_start());
|
||||
|
||||
ASSERT(offset <= UINT32_MAX);
|
||||
return (uint32_t)offset;
|
||||
} else {
|
||||
// This is only for SIM mode where syncpoints are not supported.
|
||||
return PVA_R5_SYNCPT_REGION_IOVA_OFFSET_NOT_SET;
|
||||
}
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_init_fw(struct pva_kmd_device *pva)
|
||||
{
|
||||
uint64_t seg_reg_value;
|
||||
@@ -220,12 +244,8 @@ enum pva_error pva_kmd_init_fw(struct pva_kmd_device *pva)
|
||||
/* Write shared memory allocation start address to mailbox and FW will
|
||||
* program user segment register accordingly so that virtual address
|
||||
* PVA_SHARED_MEMORY_START will point to the allocation start address.
|
||||
*
|
||||
* We deliberately also choose PVA_SHARED_MEMORY_START as the allocation
|
||||
* start address so that the net result is that user segment register
|
||||
* will be programmed to 0.
|
||||
*/
|
||||
seg_reg_value = FW_SHARED_MEMORY_START;
|
||||
seg_reg_value = pva_kmd_get_r5_iova_start();
|
||||
pva_kmd_write_mailbox(pva, PVA_MBOXID_USERSEG_L,
|
||||
iova_lo(seg_reg_value));
|
||||
pva_kmd_write_mailbox(pva, PVA_MBOXID_USERSEG_H,
|
||||
@@ -248,7 +268,7 @@ enum pva_error pva_kmd_init_fw(struct pva_kmd_device *pva)
|
||||
pva_kmd_write(pva, pva->regspec.sec_lic_intr_enable,
|
||||
PVA_BIT(0) /*Watchdog*/
|
||||
| PVA_INSERT(0x1, 4, 1) /* HSP1 */
|
||||
| PVA_INSERT(0x7, 7, 5) /* All H1X errors */);
|
||||
| PVA_INSERT(0x3, 7, 5) /* All H1X errors */);
|
||||
|
||||
/* Bind interrupts */
|
||||
err = pva_kmd_bind_intr_handler(pva, PVA_KMD_INTR_LINE_SEC_LIC,
|
||||
@@ -256,11 +276,6 @@ enum pva_error pva_kmd_init_fw(struct pva_kmd_device *pva)
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto free_fw_debug_mem;
|
||||
}
|
||||
err = pva_kmd_bind_intr_handler(pva, PVA_KMD_INTR_LINE_CCQ0,
|
||||
pva_kmd_isr, pva);
|
||||
if (err != PVA_SUCCESS) {
|
||||
goto free_sec_lic;
|
||||
}
|
||||
|
||||
/* Take R5 out of reset */
|
||||
pva_kmd_write(pva, PVA_REG_PROC_CPUHALT_ADDR, 0x1);
|
||||
@@ -271,13 +286,12 @@ enum pva_error pva_kmd_init_fw(struct pva_kmd_device *pva)
|
||||
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("Waiting for FW boot timed out.");
|
||||
goto free_ccq0;
|
||||
goto free_sec_lic;
|
||||
}
|
||||
pva->recovery = false;
|
||||
|
||||
return err;
|
||||
|
||||
free_ccq0:
|
||||
pva_kmd_free_intr(pva, PVA_KMD_INTR_LINE_CCQ0);
|
||||
free_sec_lic:
|
||||
pva_kmd_free_intr(pva, PVA_KMD_INTR_LINE_SEC_LIC);
|
||||
free_fw_debug_mem:
|
||||
@@ -293,7 +307,6 @@ out:
|
||||
|
||||
void pva_kmd_deinit_fw(struct pva_kmd_device *pva)
|
||||
{
|
||||
pva_kmd_free_intr(pva, PVA_KMD_INTR_LINE_CCQ0);
|
||||
pva_kmd_free_intr(pva, PVA_KMD_INTR_LINE_SEC_LIC);
|
||||
pva_kmd_drain_fw_print(&pva->fw_print_buffer);
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_SILICON_BOOT_H
|
||||
#define PVA_KMD_SILICON_BOOT_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_silicon_elf_parser.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SILICON_ELF_PARSER_H
|
||||
#define PVA_KMD_SILICON_ELF_PARSER_H
|
||||
#include "pva_api.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_executable.h"
|
||||
#include "pva_kmd_silicon_elf_parser.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_kmd_silicon_hwpm.h"
|
||||
#include "pva_kmd_silicon_utils.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SILICON_HWPM_H
|
||||
#define PVA_KMD_SILICON_HWPM_H
|
||||
#include "pva_kmd.h"
|
||||
|
||||
@@ -1,18 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_silicon_isr.h"
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_fw_hyp.h"
|
||||
#include "pva_kmd_msg.h"
|
||||
#include "pva_kmd_abort.h"
|
||||
|
||||
struct pva_fw_msg {
|
||||
uint8_t len;
|
||||
@@ -33,12 +26,14 @@ static void read_hyp_msg(struct pva_kmd_device *pva, struct pva_fw_msg *msg)
|
||||
}
|
||||
}
|
||||
|
||||
void pva_kmd_hyp_isr(void *data)
|
||||
void pva_kmd_hyp_isr(void *data, enum pva_kmd_intr_line intr_line)
|
||||
{
|
||||
struct pva_kmd_device *pva = data;
|
||||
uint32_t intr_status;
|
||||
uint32_t wdt_val, hsp_val, h1x_val;
|
||||
|
||||
(void)intr_line;
|
||||
|
||||
intr_status = pva_kmd_read(pva, pva->regspec.sec_lic_intr_status);
|
||||
|
||||
wdt_val = PVA_EXTRACT(intr_status, PVA_REG_SEC_LIC_INTR_WDT_MSB,
|
||||
@@ -54,8 +49,8 @@ void pva_kmd_hyp_isr(void *data)
|
||||
intr_status &
|
||||
PVA_MASK(PVA_REG_SEC_LIC_INTR_WDT_MSB,
|
||||
PVA_REG_SEC_LIC_INTR_WDT_LSB));
|
||||
/* TODO: reboot firmware when we can */
|
||||
FAULT("PVA watchdog timeout!");
|
||||
pva_kmd_log_err("PVA watchdog timeout!");
|
||||
pva_kmd_abort(pva);
|
||||
}
|
||||
|
||||
if (h1x_val != 0) {
|
||||
@@ -65,6 +60,7 @@ void pva_kmd_hyp_isr(void *data)
|
||||
intr_status &
|
||||
PVA_MASK(PVA_REG_SEC_LIC_INTR_H1X_MSB,
|
||||
PVA_REG_SEC_LIC_INTR_H1X_LSB));
|
||||
pva_kmd_abort(pva);
|
||||
}
|
||||
|
||||
if (hsp_val != 0) {
|
||||
@@ -81,55 +77,50 @@ void pva_kmd_hyp_isr(void *data)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t read_ccq0_status(struct pva_kmd_device *pva, uint8_t status_id)
|
||||
static uint32_t read_ccq_status(struct pva_kmd_device *pva, uint8_t ccq_id,
|
||||
uint8_t status_id)
|
||||
{
|
||||
return pva_kmd_read(pva, pva->regspec.ccq_regs[0].status[status_id]);
|
||||
return pva_kmd_read(pva,
|
||||
pva->regspec.ccq_regs[ccq_id].status[status_id]);
|
||||
}
|
||||
|
||||
static void write_ccq0_status(struct pva_kmd_device *pva, uint8_t status_id,
|
||||
uint32_t value)
|
||||
static void write_ccq_status(struct pva_kmd_device *pva, uint8_t ccq_id,
|
||||
uint8_t status_id, uint32_t value)
|
||||
{
|
||||
pva_kmd_write(pva, pva->regspec.ccq_regs[0].status[status_id], value);
|
||||
}
|
||||
|
||||
static void read_ccq_msg(struct pva_kmd_device *pva, struct pva_fw_msg *msg)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
msg->data[0] = read_ccq0_status(pva, PVA_FW_MSG_STATUS_LAST);
|
||||
msg->len = PVA_EXTRACT(msg->data[0], PVA_FW_MSG_LEN_MSB,
|
||||
PVA_FW_MSG_LEN_LSB, uint8_t);
|
||||
ASSERT(msg->len <= PVA_ARRAY_SIZE(msg->data));
|
||||
for (i = 1; i < msg->len; i++) {
|
||||
msg->data[i] =
|
||||
read_ccq0_status(pva, PVA_FW_MSG_STATUS_BASE + i - 1);
|
||||
}
|
||||
pva_kmd_write(pva, pva->regspec.ccq_regs[ccq_id].status[status_id],
|
||||
value);
|
||||
}
|
||||
|
||||
/* Handle interrupt from CCQ0 */
|
||||
void pva_kmd_isr(void *data)
|
||||
void pva_kmd_isr(void *data, enum pva_kmd_intr_line intr_line)
|
||||
{
|
||||
struct pva_kmd_device *pva = data;
|
||||
uint32_t intr_status;
|
||||
uint8_t intr_interface = intr_line - PVA_KMD_INTR_LINE_CCQ0;
|
||||
|
||||
intr_status = read_ccq_status(pva, intr_interface, 2) &
|
||||
PVA_REG_CCQ_STATUS2_INTR_ALL_BITS;
|
||||
|
||||
intr_status =
|
||||
read_ccq0_status(pva, 2) & PVA_REG_CCQ_STATUS2_INTR_ALL_BITS;
|
||||
pva_dbg_printf("CCQ0_INTR_STATUS 0x%x\n", intr_status);
|
||||
/* Clear interupt status This must be done prior to ack CCQ messages
|
||||
* otherwise we risk losing CCQ messages.
|
||||
*/
|
||||
write_ccq0_status(pva, 2, intr_status);
|
||||
write_ccq_status(pva, intr_interface, 2, intr_status);
|
||||
|
||||
if (intr_status & PVA_REG_CCQ_STATUS2_INTR_STATUS8_BIT) {
|
||||
struct pva_fw_msg msg;
|
||||
|
||||
read_ccq_msg(pva, &msg);
|
||||
|
||||
pva_kmd_handle_msg(pva, &msg.data[0], msg.len);
|
||||
|
||||
/* Ack through status1 write. */
|
||||
write_ccq0_status(pva, 1, 0 /* Value doesn't matter for now */);
|
||||
pva_kmd_shared_buffer_process(pva, intr_interface);
|
||||
}
|
||||
|
||||
/* We don't care about Status7 or CCQ overflow interrupt */
|
||||
}
|
||||
|
||||
enum pva_error pva_kmd_bind_shared_buffer_handler(void *pva_dev,
|
||||
uint8_t interface, void *data)
|
||||
{
|
||||
struct pva_kmd_device *pva = (struct pva_kmd_device *)pva_dev;
|
||||
return pva_kmd_bind_intr_handler(
|
||||
pva, PVA_KMD_INTR_LINE_CCQ0 + interface, pva_kmd_isr, data);
|
||||
}
|
||||
|
||||
void pva_kmd_release_shared_buffer_handler(void *pva_dev, uint8_t interface)
|
||||
{
|
||||
struct pva_kmd_device *pva = (struct pva_kmd_device *)pva_dev;
|
||||
pva_kmd_free_intr(pva, PVA_KMD_INTR_LINE_CCQ0 + interface);
|
||||
}
|
||||
|
||||
@@ -1,20 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SILICON_ISR_H
|
||||
#define PVA_KMD_SILICON_ISR_H
|
||||
#include "pva_kmd_silicon_utils.h"
|
||||
#include "pva_kmd_device.h"
|
||||
|
||||
void pva_kmd_hyp_isr(void *data);
|
||||
void pva_kmd_hyp_isr(void *data, enum pva_kmd_intr_line intr_line);
|
||||
|
||||
void pva_kmd_isr(void *data);
|
||||
/* CCQ interrupt handler */
|
||||
void pva_kmd_isr(void *data, enum pva_kmd_intr_line intr_line);
|
||||
|
||||
#endif // PVA_KMD_SILICON_ISR_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_silicon_utils.h"
|
||||
#include "pva_kmd_device.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_SILICON_UTILS_H
|
||||
#define PVA_KMD_SILICON_UTILS_H
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_submitter.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_kmd_abort.h"
|
||||
|
||||
void pva_kmd_submitter_init(struct pva_kmd_submitter *submitter,
|
||||
struct pva_kmd_queue *queue,
|
||||
@@ -121,13 +114,15 @@ enum pva_error pva_kmd_submitter_submit(struct pva_kmd_submitter *submitter,
|
||||
PVA_CMDBUF_FLAGS_ENGINE_AFFINITY_LSB);
|
||||
|
||||
pva_kmd_mutex_lock(submitter->submit_lock);
|
||||
submitter->fence_future_value += 1U;
|
||||
submitter->fence_future_value =
|
||||
safe_wraparound_inc_u32(submitter->fence_future_value);
|
||||
submit_info.postfences[0].value = submitter->fence_future_value;
|
||||
err = pva_kmd_queue_submit(submitter->queue, &submit_info);
|
||||
if (err == PVA_SUCCESS) {
|
||||
*out_fence_val = submitter->fence_future_value;
|
||||
} else {
|
||||
submitter->fence_future_value -= 1U;
|
||||
submitter->fence_future_value =
|
||||
safe_wraparound_dec_u32(submitter->fence_future_value);
|
||||
pva_kmd_cmdbuf_builder_cancel(builder);
|
||||
}
|
||||
pva_kmd_mutex_unlock(submitter->submit_lock);
|
||||
@@ -148,6 +143,7 @@ enum pva_error pva_kmd_submitter_wait(struct pva_kmd_submitter *submitter,
|
||||
time_spent = safe_addu32(time_spent, poll_interval_us);
|
||||
if (time_spent >= timeout_us) {
|
||||
pva_kmd_log_err("pva_kmd_submitter_wait Timed out");
|
||||
pva_kmd_abort(submitter->queue->pva);
|
||||
return PVA_TIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_SUBMITTER_H
|
||||
#define PVA_KMD_SUBMITTER_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_t23x.h"
|
||||
#include "pva_kmd_constants.h"
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_T23X_H
|
||||
#define PVA_KMD_T23X_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_t26x.h"
|
||||
#include "pva_kmd_constants.h"
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_T26X_H
|
||||
#define PVA_KMD_T26X_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_api_cmdbuf.h"
|
||||
#include "pva_api_types.h"
|
||||
#include "pva_bit.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_TEGRA_STATS_H
|
||||
#define PVA_KMD_TEGRA_STATS_H
|
||||
#include "pva_kmd_device.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#include "pva_kmd_mutex.h"
|
||||
#include "pva_kmd_utils.h"
|
||||
#include "pva_kmd_thread_sema.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_utils.h"
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_UTILS_H
|
||||
#define PVA_KMD_UTILS_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_api_types.h"
|
||||
#include "pva_kmd_vpu_app_auth.h"
|
||||
@@ -23,7 +15,8 @@ enum pva_error pva_kmd_init_vpu_app_auth(struct pva_kmd_device *pva, bool ena)
|
||||
struct pva_vpu_auth *pva_auth = pva_kmd_zalloc(sizeof(*pva_auth));
|
||||
if (pva_auth == NULL) {
|
||||
pva_kmd_log_err("Unable to allocate memory");
|
||||
return PVA_NOMEM;
|
||||
err = PVA_NOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
pva->pva_auth = pva_auth;
|
||||
@@ -37,12 +30,24 @@ enum pva_error pva_kmd_init_vpu_app_auth(struct pva_kmd_device *pva, bool ena)
|
||||
* Either of the 2 conditions if satisfied will enable authentication
|
||||
*/
|
||||
pva_auth->pva_auth_enable = ena;
|
||||
err = pva_kmd_mutex_init(&pva_auth->allow_list_lock);
|
||||
if (err != PVA_SUCCESS) {
|
||||
pva_kmd_log_err("Failed to initialize allow list lock");
|
||||
goto free;
|
||||
}
|
||||
|
||||
default_path_len = strnlen(default_path, ALLOWLIST_FILE_LEN);
|
||||
if (default_path_len > 0U) {
|
||||
(void)memcpy(pva_auth->pva_auth_allowlist_path, default_path,
|
||||
default_path_len);
|
||||
}
|
||||
|
||||
return PVA_SUCCESS;
|
||||
|
||||
free:
|
||||
pva_kmd_free(pva_auth);
|
||||
error:
|
||||
pva->pva_auth = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -365,4 +370,21 @@ enum pva_error pva_kmd_allowlist_parse(struct pva_kmd_device *pva)
|
||||
|
||||
fail:
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
void pva_kmd_deinit_vpu_app_auth(struct pva_kmd_device *pva)
|
||||
{
|
||||
struct pva_vpu_auth *pva_auth;
|
||||
|
||||
if (pva == NULL)
|
||||
return;
|
||||
|
||||
pva_auth = pva->pva_auth;
|
||||
if (pva_auth == NULL)
|
||||
return;
|
||||
|
||||
pva_kmd_allowlist_destroy(pva_auth);
|
||||
pva_kmd_mutex_deinit(&pva_auth->allow_list_lock);
|
||||
pva_kmd_free(pva_auth);
|
||||
pva->pva_auth = NULL;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_VPU_APP_AUTH_H
|
||||
#define PVA_KMD_VPU_APP_AUTH_H
|
||||
|
||||
@@ -68,6 +70,7 @@ struct pva_vpu_auth {
|
||||
};
|
||||
|
||||
enum pva_error pva_kmd_init_vpu_app_auth(struct pva_kmd_device *pva, bool ena);
|
||||
void pva_kmd_deinit_vpu_app_auth(struct pva_kmd_device *pva);
|
||||
|
||||
enum pva_error pva_kmd_verify_exectuable_hash(struct pva_kmd_device *pva,
|
||||
uint8_t *dataptr, size_t size);
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "pva_kmd_device.h"
|
||||
#include "pva_math_utils.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_VPU_OCD_H
|
||||
#define PVA_KMD_VPU_OCD_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_PLAT_FAULTS_H
|
||||
#define PVA_PLAT_FAULTS_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#ifndef PVA_KMD_DEVICE_MEMORY_H
|
||||
#define PVA_KMD_DEVICE_MEMORY_H
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SHIM_CCQ_H
|
||||
#define PVA_KMD_SHIM_CCQ_H
|
||||
#include "pva_api.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2025, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SHIM_DEBUGFS_H
|
||||
#define PVA_KMD_SHIM_DEBUGFS_H
|
||||
#include "pva_api.h"
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SHIM_INIT_H
|
||||
#define PVA_KMD_SHIM_INIT_H
|
||||
#include "pva_api.h"
|
||||
@@ -37,6 +29,13 @@ enum pva_error pva_kmd_power_on(struct pva_kmd_device *pva);
|
||||
*/
|
||||
void pva_kmd_power_off(struct pva_kmd_device *pva);
|
||||
|
||||
/**
|
||||
* @brief Reset assert FW so it can be in recovery and
|
||||
* user submission halted. This is requied for host1x
|
||||
* watchdog, or kmd submission timeout failures.
|
||||
*/
|
||||
void pva_kmd_fw_reset_assert(struct pva_kmd_device *pva);
|
||||
|
||||
/**
|
||||
* @brief Initialize firmware.
|
||||
*
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, NVIDIA Corporation. All Rights Reserved.
|
||||
*
|
||||
* NVIDIA Corporation and its licensors retain all intellectual property and
|
||||
* proprietary rights in and to this software and related documentation. Any
|
||||
* use, reproduction, disclosure or distribution of this software and related
|
||||
* documentation without an express license agreement from NVIDIA Corporation
|
||||
* is strictly prohibited.
|
||||
*/
|
||||
/* SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
#ifndef PVA_KMD_SHIM_SILICON_H
|
||||
#define PVA_KMD_SHIM_SILICON_H
|
||||
#include "pva_api.h"
|
||||
@@ -70,7 +62,8 @@ enum pva_kmd_intr_line {
|
||||
/**
|
||||
* @brief Interrupt handler function prototype.
|
||||
*/
|
||||
typedef void (*pva_kmd_intr_handler_t)(void *data);
|
||||
typedef void (*pva_kmd_intr_handler_t)(void *data,
|
||||
enum pva_kmd_intr_line intr_line);
|
||||
|
||||
/**
|
||||
* @brief Bind an interrupt handler to an interrupt line.
|
||||
@@ -112,14 +105,23 @@ void pva_kmd_free_intr(struct pva_kmd_device *pva,
|
||||
enum pva_error pva_kmd_read_fw_bin(struct pva_kmd_device *pva);
|
||||
|
||||
/**
|
||||
* @brief Get base address of read only syncpoints.
|
||||
* @brief Reset assert FW so it can be in recovery and
|
||||
* user submission halted. This is requied for host1x
|
||||
* watchdog, or kmd submission timeout failures.
|
||||
*/
|
||||
uint32_t pva_kmd_get_syncpt_ro_offset(struct pva_kmd_device *pva);
|
||||
void pva_kmd_fw_reset_assert(struct pva_kmd_device *pva);
|
||||
|
||||
/**
|
||||
* @brief Get base address of read write syncpoints.
|
||||
* @brief Get starting IOVA of the memory shared by R5 and KMD.
|
||||
*
|
||||
* The starting IOVA is determined by the IOVA allocator on different platforms.
|
||||
* On Linux, the IOVA range is 0-2GB. On QNX, the IOVA range is 2GB-4GB
|
||||
* (configured in DTS).
|
||||
*
|
||||
* This memory region corresponds to the 2GB-4GB region of the R5 virtual
|
||||
* address space.
|
||||
*/
|
||||
uint32_t pva_kmd_get_syncpt_rw_offset(struct pva_kmd_device *pva);
|
||||
uint64_t pva_kmd_get_r5_iova_start(void);
|
||||
|
||||
/**
|
||||
* @brief Configure EVP, Segment config registers and SCR registers.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user